diff --git a/.artifactignore b/.artifactignore index 1126a160d9..cbaad306e2 100644 --- a/.artifactignore +++ b/.artifactignore @@ -1,2 +1,5 @@ **/* !*.deb +!coverage.info +!coverage.xml +!build.info diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index ff0ff6c0cb..e276bd332d 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -14,15 +14,55 @@ parameters: - name: swss_artifact_name type: string -- name: sairedis_artifact_name +- name: swss_common_artifact_name type: string -- name: swss_common_artifact_name +- name: swss_common_branch type: string + default: '$(BUILD_BRANCH)' - name: artifact_name type: string +- name: buildimage_artifact_name + type: string + default: 'sonic-buildimage.vs' + +- name: buildimage_artifact_project + type: string + default: 'build' + +- name: buildimage_artifact_pipeline + type: string + default: 'Azure.sonic-buildimage.official.vs' + +- name: buildimage_artifact_branch + type: string + default: '$(BUILD_BRANCH)' + +- name: sairedis_artifact_name + type: string + +- name: sairedis_artifact_project + type: string + default: 'build' + +- name: sairedis_artifact_pipeline + type: string + default: 'Azure.sonic-sairedis' + +- name: sairedis_artifact_branch + type: string + default: '$(BUILD_BRANCH)' + +- name: sairedis_artifact_pattern + type: string + default: '**' + +- name: asan + type: boolean + default: false + jobs: - job: displayName: ${{ parameters.arch }} @@ -39,19 +79,42 @@ jobs: pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' + runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific - project: build - pipeline: Azure.sonic-sairedis + project: ${{ parameters.sairedis_artifact_project }} + pipeline: ${{ parameters.sairedis_artifact_pipeline }} artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' + runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + path: $(Build.ArtifactStagingDirectory)/download/sairedis + patterns: | + ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaivs-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsairedis_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsairedis-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaimetadata_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaimetadata-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/syncd-vs_*.deb + displayName: "Download sonic sairedis deb packages" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-dash-api + ${{ if eq(parameters.arch, 'amd64') }}: + artifact: sonic-dash-api + ${{ else }}: + artifact: sonic-dash-api.${{ parameters.arch }} + runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' path: $(Build.ArtifactStagingDirectory)/download - displayName: "Download sonic sairedis deb packages" + patterns: | + libdashapi*.deb + displayName: "Download dash api" - task: DownloadPipelineArtifact@2 inputs: artifact: ${{ parameters.swss_artifact_name }} @@ -60,33 +123,44 @@ jobs: - task: DownloadPipelineArtifact@2 inputs: source: specific - project: build - pipeline: Azure.sonic-buildimage.official.vs - artifact: sonic-buildimage.vs + project: ${{ parameters.buildimage_artifact_project }} + pipeline: ${{ parameters.buildimage_artifact_pipeline }} + artifact: ${{ parameters.buildimage_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' + runBranch: 'refs/heads/${{ parameters.buildimage_artifact_branch }}' path: $(Build.ArtifactStagingDirectory)/download - patterns: '**/target/docker-sonic-vs.gz' - displayName: "Download sonic-buildimage docker-sonic-vs" + patterns: '**/target/${{ parameters.artifact_name }}.gz' + displayName: "Download sonic-buildimage ${{ parameters.artifact_name }}" - script: | set -ex echo $(Build.DefinitionName).$(Build.BuildNumber) - docker load < $(Build.ArtifactStagingDirectory)/download/target/docker-sonic-vs.gz + docker load < $(Build.ArtifactStagingDirectory)/download/target/${{ parameters.artifact_name }}.gz mkdir -p .azure-pipelines/docker-sonic-vs/debs + find $(Build.ArtifactStagingDirectory)/download/sairedis -name '*.deb' -exec cp "{}" .azure-pipelines/docker-sonic-vs/debs \; cp -v $(Build.ArtifactStagingDirectory)/download/*.deb .azure-pipelines/docker-sonic-vs/debs + if [ -f $(Build.ArtifactStagingDirectory)/download/coverage.info ]; then + cp -v $(Build.ArtifactStagingDirectory)/download/coverage.info $(Build.ArtifactStagingDirectory)/ + cp -v $(Build.ArtifactStagingDirectory)/download/coverage.xml $(Build.ArtifactStagingDirectory)/ + fi pushd .azure-pipelines - docker build --no-cache -t docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) docker-sonic-vs + build_dir=$(grep BUILD_DIR $(Build.ArtifactStagingDirectory)/download/build.info | cut -d= -f2) + build_args="--build-arg build_dir=$build_dir" + if [ '${{ parameters.asan }}' == True ]; then + build_args="$build_args --build-arg need_dbg=y" + fi + + docker build $build_args --no-cache -t docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} docker-sonic-vs popd - docker save docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) | gzip -c > $(Build.ArtifactStagingDirectory)/docker-sonic-vs.gz + docker save docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} | gzip -c > $(Build.ArtifactStagingDirectory)/docker-sonic-vs.gz rm -rf $(Build.ArtifactStagingDirectory)/download - displayName: "Build docker-sonic-vs" + displayName: "Build ${{ parameters.artifact_name }}" - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.artifact_name }} displayName: "Archive sonic docker vs image" diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index f8040bca56..9c7e84b208 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -26,9 +26,29 @@ parameters: - name: sairedis_artifact_name type: string +- name: sairedis_artifact_project + type: string + default: 'build' + +- name: sairedis_artifact_pipeline + type: string + default: 'Azure.sonic-sairedis' + +- name: sairedis_artifact_branch + type: string + default: '$(BUILD_BRANCH)' + +- name: sairedis_artifact_pattern + type: string + default: '**' + - name: swss_common_artifact_name type: string +- name: swss_common_branch + type: string + default: '$(BUILD_BRANCH)' + - name: artifact_name type: string @@ -43,6 +63,22 @@ parameters: - name: common_lib_artifact_name type: string +- name: common_lib_artifact_project + type: string + default: 'build' + +- name: common_lib_artifact_pipeline + type: string + default: 'Azure.sonic-buildimage.common_libs' + +- name: common_lib_artifact_branch + type: string + default: '$(BUILD_BRANCH)' + +- name: asan + type: boolean + default: false + jobs: - job: displayName: ${{ parameters.arch }} @@ -62,15 +98,14 @@ jobs: clean: true submodules: true - script: | - sudo apt-get install -y libhiredis0.14 libhiredis-dev - sudo apt-get install -y libzmq5 libzmq3-dev - sudo apt-get install -qq -y \ - libhiredis-dev \ - swig3.0 - sudo apt-get install -y libdbus-1-3 - sudo apt-get install -y libteam-dev \ - libteam5 \ - libteamdctl0 + sudo apt-get update + sudo apt-get install -y \ + libhiredis-dev \ + libzmq3-dev \ + swig4.0 \ + libdbus-1-dev \ + libteam-dev + sudo pip3 install lcov_cobertura displayName: "Install dependencies" - task: DownloadPipelineArtifact@2 inputs: @@ -79,8 +114,9 @@ jobs: pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: $(Build.ArtifactStagingDirectory)/download + runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' + allowPartiallySucceededBuilds: true + path: $(Build.ArtifactStagingDirectory)/download/swsscommon patterns: | libswsscommon_1.0.0_${{ parameters.arch }}.deb libswsscommon-dev_1.0.0_${{ parameters.arch }}.deb @@ -88,45 +124,66 @@ jobs: - task: DownloadPipelineArtifact@2 inputs: source: specific - project: build - pipeline: Azure.sonic-sairedis + project: ${{ parameters.sairedis_artifact_project }} + pipeline: ${{ parameters.sairedis_artifact_pipeline }} artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: $(Build.ArtifactStagingDirectory)/download + runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + allowPartiallySucceededBuilds: true + path: $(Build.ArtifactStagingDirectory)/download/sairedis patterns: | - libsaivs_*.deb - libsaivs-dev_*.deb - libsairedis_*.deb - libsairedis-dev_*.deb - libsaimetadata_*.deb - libsaimetadata-dev_*.deb - syncd-vs_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaivs-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsairedis_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsairedis-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaimetadata_*.deb + ${{ parameters.sairedis_artifact_pattern }}/libsaimetadata-dev_*.deb + ${{ parameters.sairedis_artifact_pattern }}/syncd-vs_*.deb displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific - project: build - pipeline: Azure.sonic-buildimage.common_libs + project: ${{ parameters.common_lib_artifact_project }} + pipeline: ${{ parameters.common_lib_artifact_pipeline }} artifact: ${{ parameters.common_lib_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: $(Build.ArtifactStagingDirectory)/download + runBranch: 'refs/heads/${{ parameters.common_lib_artifact_branch }}' + path: $(Build.ArtifactStagingDirectory)/download/common patterns: | - target/debs/buster/libnl-3-200_*.deb - target/debs/buster/libnl-3-dev_*.deb - target/debs/buster/libnl-genl-3-200_*.deb - target/debs/buster/libnl-genl-3-dev_*.deb - target/debs/buster/libnl-route-3-200_*.deb - target/debs/buster/libnl-route-3-dev_*.deb - target/debs/buster/libnl-nf-3-200_*.deb - target/debs/buster/libnl-nf-3-dev_*.deb + target/debs/bullseye/libnl-3-200_*.deb + target/debs/bullseye/libnl-3-dev_*.deb + target/debs/bullseye/libnl-genl-3-200_*.deb + target/debs/bullseye/libnl-genl-3-dev_*.deb + target/debs/bullseye/libnl-route-3-200_*.deb + target/debs/bullseye/libnl-route-3-dev_*.deb + target/debs/bullseye/libnl-nf-3-200_*.deb + target/debs/bullseye/libnl-nf-3-dev_*.deb + target/debs/bullseye/libyang_*.deb + target/debs/bullseye/libprotobuf*.deb + target/debs/bullseye/libprotoc*.deb + target/debs/bullseye/protobuf-compiler*.deb displayName: "Download common libs" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-dash-api + ${{ if eq(parameters.arch, 'amd64') }}: + artifact: sonic-dash-api + ${{ else }}: + artifact: sonic-dash-api.${{ parameters.arch }} + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download/common + patterns: | + libdashapi*.deb + displayName: "Download dash api" - script: | set -ex cd download - sudo dpkg -i $(find target/debs/buster -type f) - sudo dpkg -i $(ls *.deb) + sudo dpkg -i $(find common -type f -name '*.deb') + sudo dpkg -i $(find swsscommon -type f -name '*.deb') + sudo dpkg -i $(find sairedis -type f -name '*.deb') cd .. rm -rf download workingDirectory: $(Build.ArtifactStagingDirectory) @@ -137,6 +194,10 @@ jobs: cp -r pytest.tgz $(Build.ArtifactStagingDirectory)/ if [ '${{ parameters.archive_gcov }}' == True ]; then export ENABLE_GCOV=y + echo BUILD_DIR=$(pwd) > build.info + fi + if [ '${{ parameters.asan }}' == True ]; then + export ENABLE_ASAN=y fi ./autogen.sh dpkg-buildpackage -us -uc -b -j$(nproc) && cp ../*.deb . diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index 935dec1386..d3664cb1c0 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -1,24 +1,37 @@ FROM docker-sonic-vs ARG docker_container_name +ARG need_dbg +ARG build_dir +ENV BUILD_DIR=$build_dir -ADD ["debs", "/debs"] +COPY ["debs", "/debs"] -RUN dpkg --purge python-swsscommon python3-swsscommon swss libsairedis sonic-db-cli libswsscommon libsaimetadata libsaivs syncd-vs +# Remove existing packages first before installing the new/current packages. This is to overcome limitations with +# Docker's diff detection mechanism, where only the file size and the modification timestamp (which will remain the +# same, even though contents have changed) are checked between the previous and current layer. +RUN dpkg --purge libswsscommon python3-swsscommon sonic-db-cli libsaimetadata libsairedis libsaivs syncd-vs swss sonic-eventd libdashapi -RUN dpkg -i /debs/libswsscommon_1.0.0_amd64.deb -RUN dpkg -i /debs/python-swsscommon_1.0.0_amd64.deb -RUN dpkg -i /debs/python3-swsscommon_1.0.0_amd64.deb -RUN dpkg -i /debs/sonic-db-cli_1.0.0_amd64.deb +RUN dpkg -i /debs/libdashapi_1.0.0_amd64.deb \ + /debs/libswsscommon_1.0.0_amd64.deb \ + /debs/python3-swsscommon_1.0.0_amd64.deb \ + /debs/sonic-db-cli_1.0.0_amd64.deb \ + /debs/libsaimetadata_1.0.0_amd64.deb \ + /debs/libsairedis_1.0.0_amd64.deb \ + /debs/libsaivs_1.0.0_amd64.deb \ + /debs/syncd-vs_1.0.0_amd64.deb \ + /debs/swss_1.0.0_amd64.deb -RUN dpkg -i /debs/libsaimetadata_1.0.0_amd64.deb -RUN dpkg -i /debs/libsairedis_1.0.0_amd64.deb -RUN dpkg -i /debs/libsaivs_1.0.0_amd64.deb -RUN dpkg -i /debs/syncd-vs_1.0.0_amd64.deb - -RUN dpkg --purge swss -RUN dpkg -i /debs/swss_1.0.0_amd64.deb +RUN if [ "$need_dbg" = "y" ] ; then dpkg -i /debs/swss-dbg_1.0.0_amd64.deb ; fi RUN apt-get update -RUN apt-get -y install lcov +RUN apt-get -y install software-properties-common libdatetime-perl libcapture-tiny-perl build-essential libcpanel-json-xs-perl git + +RUN git clone -b v2.0 --single-branch --depth 1 https://github.com/linux-test-project/lcov && cd lcov && make install + +RUN lcov --version + +RUN pip3 install lcov_cobertura + +RUN if [ -n "$BUILD_DIR" ]; then mkdir -p $BUILD_DIR && tar -xf /tmp/gcov/gcov-source.tar -C $BUILD_DIR; fi diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 0bd769222d..9bce6feccd 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -47,7 +47,7 @@ jobs: vmImage: 'ubuntu-20.04' variables: - DIFF_COVER_CHECK_THRESHOLD: 50 + DIFF_COVER_CHECK_THRESHOLD: 80 DIFF_COVER_ENABLE: 'true' container: @@ -59,9 +59,9 @@ jobs: set -ex # Install .NET CORE curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - - sudo apt-add-repository https://packages.microsoft.com/debian/10/prod + sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update - sudo apt-get install -y dotnet-sdk-5.0 + sudo apt-get install -y dotnet-sdk-7.0 displayName: "Install .NET CORE" - script: | sudo apt-get install -y lcov @@ -122,6 +122,8 @@ jobs: codeCoverageTool: Cobertura summaryFileLocation: '$(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/coverage.xml' reportDirectory: '$(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/' + pathToSources: '$(Build.SourcesDirectory)' + failIfCoverageEmpty: true displayName: 'Publish c c++ test coverage' condition: eq('${{ parameters.archive_gcov }}', true) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 2dc8e3c567..dbddd64077 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -16,23 +16,53 @@ parameters: type: boolean default: false +- name: docker_sonic_vs_name + type: string + default: docker-sonic-vs + +- name: swss_common_branch + type: string + default: '$(BUILD_BRANCH)' + +- name: sonic_buildimage_ubuntu20_04 + type: string + default: '$(BUILD_BRANCH)' + +- name: asan + type: boolean + default: false + +- name: num_ports + type: number + default: 0 + +- name: run_tests_pattern + type: string + default: "" + jobs: - job: displayName: vstest timeoutInMinutes: ${{ parameters.timeout }} + ${{ if parameters.archive_gcov }}: + variables: + DIFF_COVER_CHECK_THRESHOLD: 80 + DIFF_COVER_ENABLE: 'true' + DIFF_COVER_COVERAGE_FILES: Cobertura.xml - pool: sonic-common + pool: sonic-common-test steps: - script: | + ip a show dev eth0 || true ls -A1 | xargs -I{} sudo rm -rf {} displayName: "Clean workspace" - checkout: self - task: DownloadPipelineArtifact@2 inputs: - artifact: docker-sonic-vs + artifact: ${{ parameters.docker_sonic_vs_name }} path: $(Build.ArtifactStagingDirectory)/download - displayName: "Download pre-stage built docker-sonic-vs" + displayName: "Download pre-stage built ${{ parameters.docker_sonic_vs_name }}" - task: DownloadPipelineArtifact@2 inputs: source: specific @@ -40,22 +70,45 @@ jobs: pipeline: Azure.sonic-swss-common artifact: sonic-swss-common.amd64.ubuntu20_04 runVersion: 'latestFromBranch' - runBranch: 'refs/heads/$(BUILD_BRANCH)' + runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic swss common deb packages" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-buildimage-ubuntu20.04 + artifact: sonic-buildimage.amd64.ubuntu20_04 + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/${{ parameters.sonic_buildimage_ubuntu20_04 }}' + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download sonic buildimage ubuntu20.04 deb packages" + + - script: | + set -ex + # Install .NET CORE + curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - + sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod + sudo apt-get update + sudo apt-get install -y dotnet-sdk-7.0 + sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin + displayName: "Install .NET CORE" - script: | set -ex sudo .azure-pipelines/build_and_install_module.sh - sudo apt-get install -y libhiredis0.14 + sudo apt-get install -y libhiredis0.14 libyang0.16 + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libprotobuf*_amd64.deb $(Build.ArtifactStagingDirectory)/download/libprotobuf-lite*_amd64.deb $(Build.ArtifactStagingDirectory)/download/python3-protobuf*_amd64.deb + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libdashapi*.deb sudo dpkg -i --force-confask,confnew $(Build.ArtifactStagingDirectory)/download/libswsscommon_1.0.0_amd64.deb || apt-get install -f sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/python3-swsscommon_1.0.0_amd64.deb # install packages for vs test sudo apt-get install -y net-tools bridge-utils vlan sudo apt-get install -y python3-pip - sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker==4.4.1 redis==3.3.4 flaky==3.7.0 + sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0 + sudo pip3 install lcov_cobertura displayName: "Install dependencies" - script: | @@ -68,65 +121,82 @@ jobs: sudo /sbin/ip link del Vrf1 type vrf table 1001 pushd tests + params="" if [ '${{ parameters.archive_gcov }}' == True ]; then - all_tests=$(ls test_*.py) - all_tests="${all_tests} p4rt" - test_set=() - # Run 20 tests as a set. - for test in ${all_tests}; do - test_set+=("${test}") - if [ ${#test_set[@]} -ge 20 ]; then - test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) - echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) - container_count=$(docker ps -q -a | wc -l) - if [ ${container_count} -gt 0 ]; then - ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) - fi - test_set=() - fi - done - if [ ${#test_set[@]} -gt 0 ]; then - test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) - echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) - container_count=$(docker ps -q -a | wc -l) - if [ ${container_count} -gt 0 ]; then - ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) - fi - fi - else - sudo py.test -v --force-flaky --junitxml=tests_tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) + cp $(Build.ArtifactStagingDirectory)/download/coverage.info ./ + cp $(Build.ArtifactStagingDirectory)/download/coverage.xml ./ + params=" ${params} --enable-coverage --force-recreate-dvs " + fi + if [ '${{ parameters.asan }}' == True ]; then + params=" ${params} --graceful-stop " + fi + if [ ${{ parameters.num_ports }} -gt 0 ]; then + params=" ${params} --num-ports=${{ parameters.num_ports }} " + fi + + all_tests=$(ls test_*.py | xargs) + all_tests="${all_tests} p4rt" + + if [ -n '${{ parameters.run_tests_pattern }}' ]; then + all_tests=" $(ls ${{ parameters.run_tests_pattern }} | xargs) " fi + # Run the tests in parallel and retry + retry=3 + IMAGE_NAME=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} + echo $all_tests | xargs -n 1 | xargs -P 8 -I TEST_MODULE sudo ./run-tests.sh "$IMAGE_NAME" "$params" "TEST_MODULE" 3 + rm -rf $(Build.ArtifactStagingDirectory)/download displayName: "Run vs tests" + continueOnError: ${{ parameters.asan }} + + - script: | + set -ex + reportgenerator -reporttypes:Cobertura -reports:tests/*coverage.xml -targetdir:. + mkdir $(Build.ArtifactStagingDirectory)/gcov + cp Cobertura.xml tests/*coverage.xml $(Build.ArtifactStagingDirectory)/gcov/ + cp tests/*coverage.info $(Build.ArtifactStagingDirectory)/gcov/ + condition: ${{ parameters.archive_gcov }} + displayName: "Generate coverage.xml" + + - task: PublishCodeCoverageResults@1 + condition: ${{ parameters.archive_gcov }} + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: '$(System.DefaultWorkingDirectory)/Cobertura.xml' + displayName: 'Publish test coverage' - task: PublishTestResults@2 inputs: testResultsFiles: '**/*_tr.xml' testRunTitle: vstest - condition: always() + condition: succeeded() - script: | cp -r tests/log $(Build.ArtifactStagingDirectory)/ - if [ '${{ parameters.archive_gcov }}' == True ]; then - sudo apt-get install -y lcov - cd $(Build.ArtifactStagingDirectory)/gcov_tmp/ - tar -zcvf sonic-gcov.tar.gz sonic-gcov/ - rm -rf sonic-gcov + if [ '${{ parameters.asan }}' == True ]; then + cp -vr tests/log/*/log/asan $(Build.ArtifactStagingDirectory)/ fi displayName: "Collect logs" condition: always() - - publish: $(Build.ArtifactStagingDirectory)/gcov_tmp - artifact: ${{ parameters.gcov_artifact_name }} - displayName: "Publish gcov output" - condition: and(succeeded(), eq('${{ parameters.archive_gcov }}', true)) - - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.log_artifact_name }}@$(System.JobAttempt) displayName: "Publish logs" condition: always() + + - publish: $(Build.ArtifactStagingDirectory)/asan + artifact: asan-reports + displayName: "Publish ASAN reports" + condition: eq('${{ parameters.asan }}', true) + + - script: | + if [ "$(ls -A $(Build.ArtifactStagingDirectory)/asan)" ]; then + echo "There are issues reported by ASAN" + exit 1 + else + echo "No issues reported by ASAN" + fi + displayName: "Check ASAN reports" + condition: eq('${{ parameters.asan }}', true) + continueOnError: true diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 0000000000..2c8b0498f3 --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,4 @@ +name: "CodeQL config" +queries: + - uses: security-and-quality + - uses: security-extended diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000000..3c1596eef9 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,157 @@ +# For more infomation, please visit: https://github.com/github/codeql-action + +name: "CodeQL" + +on: + push: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + pull_request_target: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + workflow_dispatch: + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-20.04 + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'cpp','python' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2.1.29 + with: + config-file: ./.github/codeql/codeql-config.yml + languages: ${{ matrix.language }} + + - if: matrix.language == 'cpp' + name: Install prerequisites + run: | + sudo apt-get update + sudo apt-get install -y libxml-simple-perl \ + aspell \ + aspell-en \ + libhiredis-dev \ + libnl-3-dev \ + libnl-genl-3-dev \ + libnl-route-3-dev \ + libnl-nf-3-dev \ + libyang-dev \ + libzmq3-dev \ + libzmq5 \ + swig3.0 \ + libpython2.7-dev \ + libgtest-dev \ + libgmock-dev \ + libboost1.71-dev \ + libboost-serialization1.71-dev \ + dh-exec \ + doxygen \ + cdbs \ + bison \ + flex \ + graphviz \ + autoconf-archive \ + uuid-dev \ + libjansson-dev \ + nlohmann-json3-dev \ + python \ + stgit + + - if: matrix.language == 'cpp' + name: Build sonic-swss-common + run: | + cd .. + git clone https://github.com/sonic-net/sonic-swss-common + pushd sonic-swss-common + ./autogen.sh + dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + popd + dpkg-deb -x libswsscommon_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libswsscommon-dev_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + env: + SWSSCOMMON_VER: "1.0.0" + + - if: matrix.language == 'cpp' + name: Build sonic-sairedis + run: | + cd .. + git clone --recursive https://github.com/sonic-net/sonic-sairedis + pushd sonic-sairedis + ./autogen.sh + DEB_BUILD_OPTIONS=nocheck \ + SWSS_COMMON_INC="$(dirname $GITHUB_WORKSPACE)/usr/include" \ + SWSS_COMMON_LIB="$(dirname $GITHUB_WORKSPACE)/usr/lib/x86_64-linux-gnu" \ + DEB_CFLAGS_SET="-Wno-error" DEB_CXXFLAGS_SET="-Wno-error" \ + dpkg-buildpackage -rfakeroot -us -uc -b -Psyncd,vs,nopython2 -j$(nproc) + popd + dpkg-deb -x libsairedis_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsairedis-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaimetadata_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaimetadata-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaivs_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaivs-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + env: + SAIREDIS_VER: "1.0.0" + + # Inject libnl deb only after sonic-sairedis compilation is done. + - if: matrix.language == 'cpp' + name: Build libnl + run: | + cd .. + git clone https://github.com/sonic-net/sonic-buildimage + pushd sonic-buildimage/src/libnl3 + git clone https://github.com/thom311/libnl libnl3-${LIBNL3_VER} + pushd libnl3-${LIBNL3_VER} + git checkout tags/libnl${LIBNL3_VER//./_} + git checkout -b sonic + git config --local user.name $USER + git config --local user.email $USER@microsoft.com + stg init + stg import -s ../patch/series + git config --local --unset user.name + git config --local --unset user.email + ln -s ../debian debian + dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + popd + dpkg-deb -x libnl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + popd + env: + LIBNL3_VER: "3.5.0" + LIBNL3_REV: "1" + + - if: matrix.language == 'cpp' + name: Build repository + run: | + ./autogen.sh + ./configure --prefix=/usr \ + --with-extra-inc=$(dirname $GITHUB_WORKSPACE)/usr/include \ + --with-extra-lib=$(dirname $GITHUB_WORKSPACE)/lib/x86_64-linux-gnu \ + --with-extra-usr-lib=$(dirname $GITHUB_WORKSPACE)/usr/lib/x86_64-linux-gnu \ + --with-libnl-3.0-inc=$(dirname $GITHUB_WORKSPACE)/usr/include/libnl3 + + - name: Perform CodeQL analysis + uses: github/codeql-action/analyze@v2.1.29 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml new file mode 100644 index 0000000000..975769a505 --- /dev/null +++ b/.github/workflows/semgrep.yml @@ -0,0 +1,22 @@ +name: Semgrep + +on: + pull_request: {} + push: + branches: + - master + - '201[7-9][0-1][0-9]' + - '202[0-9][0-1][0-9]' + +jobs: + semgrep: + if: github.repository_owner == 'sonic-net' + name: Semgrep + runs-on: ubuntu-latest + container: + image: returntocorp/semgrep + steps: + - uses: actions/checkout@v3 + - run: semgrep ci + env: + SEMGREP_RULES: p/default diff --git a/.gitignore b/.gitignore index c2522ba711..001db00e4b 100644 --- a/.gitignore +++ b/.gitignore @@ -74,6 +74,11 @@ swssconfig/swssplayer tlm_teamd/tlm_teamd teamsyncd/teamsyncd tests/tests +tests/mock_tests/tests_response_publisher +tests/mock_tests/tests_fpmsyncd +tests/mock_tests/tests_intfmgrd +tests/mock_tests/tests_teammgrd +tests/mock_tests/tests_portsyncd # Test Files # @@ -85,5 +90,7 @@ tests/mock_tests/tests.trs tests/test-suite.log tests/tests.log tests/tests.trs +tests/mock_tests/**/*log +tests/mock_tests/**/*trs orchagent/p4orch/tests/**/*gcda orchagent/p4orch/tests/**/*gcno diff --git a/README.md b/README.md index 32492bc29b..e627f04317 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,18 @@ -[![Total alerts](https://img.shields.io/lgtm/alerts/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/alerts/) -[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/context:python) -[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/context:cpp) +*static analysis:* + +[![Total alerts](https://img.shields.io/lgtm/alerts/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/alerts/) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/context:python) +[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/context:cpp) + +*sonic-swss builds:* + +[![master build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=master&label=master)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=master) +[![202205 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202205&label=202205)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202205) +[![202111 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202111&label=202111)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202111) +[![202106 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202106&label=202106)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202106) +[![202012 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202012&label=202012)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202012) +[![201911 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=201911&label=201911)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=201911) -[![VS](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/sonic-swss-build/badge/icon?subject=VS%20build)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/sonic-swss-build/) # SONiC - SWitch State Service - SWSS @@ -41,7 +51,7 @@ For your convenience, you can install prepared packages on Debian Jessie: #### Install from Source -Checkout the source: `git clone https://github.com/Azure/sonic-swss.git` and install it yourself. +Checkout the source: `git clone https://github.com/sonic-net/sonic-swss.git` and install it yourself. Get SAI header files into /usr/include/sai. Put the SAI header files that you use to compile libsairedis into /usr/include/sai @@ -64,13 +74,13 @@ You can also build a debian package using: ## Need Help? For general questions, setup help, or troubleshooting: -- [sonicproject on Google Groups](https://groups.google.com/d/forum/sonicproject) +- [sonicproject on Google Groups](https://groups.google.com/g/sonicproject) For bug reports or feature requests, please open an Issue. ## Contribution guide -See the [contributors guide](https://github.com/Azure/SONiC/blob/gh-pages/CONTRIBUTING.md) for information about how to contribute. +See the [contributors guide](https://github.com/sonic-net/SONiC/wiki/Becoming-a-contributor) for information about how to contribute. ### GitHub Workflow diff --git a/azure-pipelines-dash.yml b/azure-pipelines-dash.yml new file mode 100644 index 0000000000..3e77d0b26e --- /dev/null +++ b/azure-pipelines-dash.yml @@ -0,0 +1,75 @@ +# C/C++ with GCC +# Build your C/C++ project with GCC using make. +# Add steps that publish test results, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc + +pr: +- dash + +trigger: + batch: true + branches: + include: + - dash + +# this part need to be set in UI +schedules: +- cron: "0 0 * * 6" + displayName: Weekly build + branches: + include: + - dash + always: true + +variables: + - name: BUILD_BRANCH + ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: + value: $(System.PullRequest.TargetBranch) + ${{ else }}: + value: $(Build.SourceBranchName) + +stages: +- stage: Build + jobs: + - template: .azure-pipelines/build-template.yml + parameters: + arch: amd64 + sonic_slave: sonic-slave-bullseye + swss_common_artifact_name: sonic-swss-common + swss_common_branch: master + artifact_name: sonic-swss + archive_pytests: true + archive_gcov: true + sairedis_artifact_name: sonic-sairedis + sairedis_artifact_branch: master + common_lib_artifact_name: common-lib + common_lib_artifact_branch: master + + +- stage: BuildDocker + dependsOn: Build + condition: succeeded('Build') + jobs: + - template: .azure-pipelines/build-docker-sonic-vs-template.yml + parameters: + buildimage_artifact_branch: master + swss_common_artifact_name: sonic-swss-common + swss_common_branch: master + swss_artifact_name: sonic-swss + artifact_name: docker-sonic-vs + sairedis_artifact_name: sonic-sairedis + sairedis_artifact_branch: master + +- stage: Test + dependsOn: BuildDocker + condition: succeeded('BuildDocker') + jobs: + - template: .azure-pipelines/test-docker-sonic-vs-template.yml + parameters: + log_artifact_name: log + gcov_artifact_name: sonic-gcov + sonic_slave: sonic-slave-bullseye + swss_common_branch: master + archive_gcov: true + num_ports: 2 + run_tests_pattern: test_dash_*.py diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 166905654e..083fb1047c 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -41,7 +41,8 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - sonic_slave: sonic-slave-buster + pool: sonicbld + sonic_slave: sonic-slave-bullseye common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common sairedis_artifact_name: sonic-sairedis @@ -49,6 +50,20 @@ stages: archive_pytests: true archive_gcov: true +- stage: BuildAsan + dependsOn: [] + jobs: + - template: .azure-pipelines/build-template.yml + parameters: + arch: amd64 + pool: sonicbld + sonic_slave: sonic-slave-bullseye + common_lib_artifact_name: common-lib + swss_common_artifact_name: sonic-swss-common + sairedis_artifact_name: sonic-sairedis + artifact_name: sonic-swss-asan + asan: true + - stage: BuildArm dependsOn: Build condition: succeeded('Build') @@ -58,7 +73,7 @@ stages: arch: armhf timeout: 240 pool: sonicbld-armhf - sonic_slave: sonic-slave-buster-armhf + sonic_slave: sonic-slave-bullseye-armhf common_lib_artifact_name: common-lib.armhf swss_common_artifact_name: sonic-swss-common.armhf sairedis_artifact_name: sonic-sairedis.armhf @@ -70,7 +85,7 @@ stages: arch: arm64 timeout: 240 pool: sonicbld-arm64 - sonic_slave: sonic-slave-buster-arm64 + sonic_slave: sonic-slave-bullseye-arm64 common_lib_artifact_name: common-lib.arm64 swss_common_artifact_name: sonic-swss-common.arm64 sairedis_artifact_name: sonic-sairedis.arm64 @@ -88,6 +103,18 @@ stages: swss_artifact_name: sonic-swss artifact_name: docker-sonic-vs +- stage: BuildDockerAsan + dependsOn: BuildAsan + condition: succeeded('BuildAsan') + jobs: + - template: .azure-pipelines/build-docker-sonic-vs-template.yml + parameters: + swss_common_artifact_name: sonic-swss-common + sairedis_artifact_name: sonic-sairedis + swss_artifact_name: sonic-swss-asan + artifact_name: docker-sonic-vs-asan + asan: true + - stage: Test dependsOn: BuildDocker condition: succeeded('BuildDocker') @@ -96,17 +123,29 @@ stages: parameters: log_artifact_name: log gcov_artifact_name: sonic-gcov - sonic_slave: sonic-slave-buster + sonic_slave: sonic-slave-bullseye archive_gcov: true +- stage: TestAsan + dependsOn: BuildDockerAsan + condition: succeeded('BuildDockerAsan') + jobs: + - template: .azure-pipelines/test-docker-sonic-vs-template.yml + parameters: + log_artifact_name: log-asan + gcov_artifact_name: sonic-gcov + sonic_slave: sonic-slave-bullseye + docker_sonic_vs_name: docker-sonic-vs-asan + asan: true + - stage: Gcov + condition: false dependsOn: Test - condition: always() jobs: - template: .azure-pipelines/gcov.yml parameters: arch: amd64 - sonic_slave: sonic-slave-buster + sonic_slave: sonic-slave-bullseye swss_common_artifact_name: sonic-swss-common sairedis_artifact_name: sonic-sairedis swss_artifact_name: sonic-swss diff --git a/cfgmgr/Makefile.am b/cfgmgr/Makefile.am index 64a57a6e58..a8cbddb4e7 100644 --- a/cfgmgr/Makefile.am +++ b/cfgmgr/Makefile.am @@ -5,7 +5,7 @@ LIBNL_LIBS = -lnl-genl-3 -lnl-route-3 -lnl-3 SAIMETA_LIBS = -lsaimeta -lsaimetadata -lzmq COMMON_LIBS = -lswsscommon -bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd +bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd cfgmgrdir = $(datadir)/swss @@ -15,7 +15,10 @@ dist_cfgmgr_DATA = \ buffer_pool_mellanox.lua \ buffer_check_headroom_vs.lua \ buffer_headroom_vs.lua \ - buffer_pool_vs.lua + buffer_pool_vs.lua \ + buffer_check_headroom_barefoot.lua \ + buffer_headroom_barefoot.lua \ + buffer_pool_barefoot.lua if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -23,85 +26,96 @@ else DBGFLAGS = -g endif -vlanmgrd_SOURCES = vlanmgrd.cpp vlanmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +COMMON_ORCH_SOURCE = $(top_srcdir)/orchagent/orch.cpp \ + $(top_srcdir)/orchagent/request_parser.cpp \ + $(top_srcdir)/orchagent/response_publisher.cpp \ + $(top_srcdir)/lib/recorder.cpp + +vlanmgrd_SOURCES = vlanmgrd.cpp vlanmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h vlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vlanmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -teammgrd_SOURCES = teammgrd.cpp teammgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +teammgrd_SOURCES = teammgrd.cpp teammgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) teammgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -portmgrd_SOURCES = portmgrd.cpp portmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +portmgrd_SOURCES = portmgrd.cpp portmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h portmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) portmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) portmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -intfmgrd_SOURCES = intfmgrd.cpp intfmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/lib/subintf.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +fabricmgrd_SOURCES = fabricmgrd.cpp fabricmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +fabricmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +fabricmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +fabricmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +intfmgrd_SOURCES = intfmgrd.cpp intfmgr.cpp $(top_srcdir)/lib/subintf.cpp $(COMMON_ORCH_SOURCE) shellcmd.h intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) intfmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -buffermgrd_SOURCES = buffermgrd.cpp buffermgr.cpp buffermgrdyn.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +buffermgrd_SOURCES = buffermgrd.cpp buffermgr.cpp buffermgrdyn.cpp $(COMMON_ORCH_SOURCE) shellcmd.h buffermgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) buffermgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) buffermgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -vrfmgrd_SOURCES = vrfmgrd.cpp vrfmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +vrfmgrd_SOURCES = vrfmgrd.cpp vrfmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h vrfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vrfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vrfmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -nbrmgrd_SOURCES = nbrmgrd.cpp nbrmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +nbrmgrd_SOURCES = nbrmgrd.cpp nbrmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h nbrmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CFLAGS) $(CFLAGS_ASAN) nbrmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CPPFLAGS) $(CFLAGS_ASAN) nbrmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) $(LIBNL_LIBS) -vxlanmgrd_SOURCES = vxlanmgrd.cpp vxlanmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +vxlanmgrd_SOURCES = vxlanmgrd.cpp vxlanmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h vxlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vxlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) vxlanmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -sflowmgrd_SOURCES = sflowmgrd.cpp sflowmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +sflowmgrd_SOURCES = sflowmgrd.cpp sflowmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h sflowmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) sflowmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) sflowmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -natmgrd_SOURCES = natmgrd.cpp natmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +natmgrd_SOURCES = natmgrd.cpp natmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h natmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) natmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) natmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -coppmgrd_SOURCES = coppmgrd.cpp coppmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +coppmgrd_SOURCES = coppmgrd.cpp coppmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h coppmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) coppmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) coppmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -tunnelmgrd_SOURCES = tunnelmgrd.cpp tunnelmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +tunnelmgrd_SOURCES = tunnelmgrd.cpp tunnelmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h tunnelmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) tunnelmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) tunnelmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h +macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h macsecmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) macsecmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) macsecmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) if GCOV_ENABLED -vlanmgrd_LDADD += -lgcovpreload -teammgrd_LDADD += -lgcovpreload -portmgrd_LDADD += -lgcovpreload -intfmgrd_LDADD+= -lgcovpreload -buffermgrd_LDADD += -lgcovpreload -vrfmgrd_LDADD += -lgcovpreload -nbrmgrd_LDADD += -lgcovpreload -vxlanmgrd_LDADD += -lgcovpreload -sflowmgrd_LDADD += -lgcovpreload -natmgrd_LDADD += -lgcovpreload -coppmgrd_LDADD += -lgcovpreload -tunnelmgrd_LDADD += -lgcovpreload -macsecmgrd_LDADD += -lgcovpreload +vlanmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +teammgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +portmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +fabricmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +intfmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +buffermgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +vrfmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +nbrmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +vxlanmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +sflowmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +natmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +coppmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +tunnelmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +macsecmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED @@ -118,5 +132,6 @@ natmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp coppmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp tunnelmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp macsecmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +fabricmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp endif diff --git a/cfgmgr/buffer_check_headroom_barefoot.lua b/cfgmgr/buffer_check_headroom_barefoot.lua new file mode 100644 index 0000000000..74551b1a42 --- /dev/null +++ b/cfgmgr/buffer_check_headroom_barefoot.lua @@ -0,0 +1,6 @@ +local ret = {} + +table.insert(ret, "result:true") +table.insert(ret, "debug:No need to check port headroom limit as shared headroom pool model is supported.") + +return ret diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index 20b62d2938..1b6851f77d 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -5,7 +5,7 @@ local port = KEYS[1] local input_profile_name = ARGV[1] -local input_profile_size = ARGV[2] +local input_profile_size = tonumber(ARGV[2]) local new_pg = ARGV[3] local function is_port_with_8lanes(lanes) @@ -60,7 +60,8 @@ if is_port_with_8lanes(lanes) then pipeline_latency = pipeline_latency * 2 - 1 egress_mirror_size = egress_mirror_size * 2 end -accumulative_size = accumulative_size + 2 * pipeline_latency * 1024 + egress_mirror_size +local lossy_pg_size = pipeline_latency * 1024 +accumulative_size = accumulative_size + lossy_pg_size + egress_mirror_size -- Fetch all keys in BUFFER_PG according to the port redis.call('SELECT', appl_db) @@ -81,41 +82,61 @@ local function get_number_of_pgs(keyname) return size end -local no_input_pg = true -if new_pg ~= nil then - if get_number_of_pgs(new_pg) ~= 0 then - no_input_pg = false - new_pg = 'BUFFER_PG_TABLE:' .. new_pg +-- Fetch all the pending removing PGs +local pending_remove_pg_keys = redis.call('SMEMBERS', 'BUFFER_PG_TABLE_DEL_SET') +local pending_remove_pg_set = {} +for i = 1, #pending_remove_pg_keys do + pending_remove_pg_set['BUFFER_PG_TABLE:' .. pending_remove_pg_keys[i]] = true + table.insert(debuginfo, 'debug:pending remove entry found: ' .. 'BUFFER_PG_TABLE:' .. pending_remove_pg_keys[i]) +end + +-- Fetch all the PGs in APPL_DB, and store them into a hash table +-- But skip the items that are in pending_remove_pg_set +local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') +local all_pgs = {} +for i = 1, #pg_keys do + if not pending_remove_pg_set[pg_keys[i]] then + local profile = redis.call('HGET', pg_keys[i], 'profile') + all_pgs[pg_keys[i]] = profile + else + table.insert(debuginfo, 'debug:pending remove entry skipped: ' .. pg_keys[i]) end end --- Fetch all the PGs, accumulate the sizes +-- Fetch all the pending PGs, and store them into the hash table +-- Overwrite any existing entries +local pending_pg_keys = redis.call('KEYS', '_BUFFER_PG_TABLE:' .. port .. ':*') +for i = 1, #pending_pg_keys do + local profile = redis.call('HGET', pending_pg_keys[i], 'profile') + -- Remove the leading underscore when storing it into the hash table + all_pgs[string.sub(pending_pg_keys[i], 2, -1)] = profile + table.insert(debuginfo, 'debug:pending entry: ' .. pending_pg_keys[i] .. ':' .. profile) +end + +if new_pg ~= nil and get_number_of_pgs(new_pg) ~= 0 then + all_pgs['BUFFER_PG_TABLE:' .. new_pg] = input_profile_name +end + +-- Handle all the PGs, accumulate the sizes -- Assume there is only one lossless profile configured among all PGs on each port table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) -local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') -for i = 1, #pg_keys do - local profile = redis.call('HGET', pg_keys[i], 'profile') +for pg_key, profile in pairs(all_pgs) do local current_profile_size - if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_keys[i]) then - if profile ~= input_profile_name and not no_input_pg then - local referenced_profile = redis.call('HGETALL', 'BUFFER_PROFILE_TABLE:' .. profile) - for j = 1, #referenced_profile, 2 do - if referenced_profile[j] == 'size' then - current_profile_size = tonumber(referenced_profile[j+1]) - end - end - else - current_profile_size = input_profile_size - profile = input_profile_name + if profile ~= input_profile_name then + local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') + if not referenced_profile_size then + referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') + table.insert(debuginfo, 'debug:pending profile: ' .. profile) end - accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_keys[i]) - table.insert(debuginfo, 'debug:' .. pg_keys[i] .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_keys[i]) .. ':accu:' .. accumulative_size) + current_profile_size = tonumber(referenced_profile_size) + else + current_profile_size = input_profile_size end -end - -if not no_input_pg then - accumulative_size = accumulative_size + input_profile_size * get_number_of_pgs(new_pg) - table.insert(debuginfo, 'debug:' .. new_pg .. '*:' .. input_profile_name .. ':' .. input_profile_size .. ':' .. get_number_of_pgs(new_pg) .. ':accu:' .. accumulative_size) + if current_profile_size == 0 then + current_profile_size = lossy_pg_size + end + accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) end if max_headroom_size > accumulative_size then diff --git a/cfgmgr/buffer_headroom_barefoot.lua b/cfgmgr/buffer_headroom_barefoot.lua new file mode 100644 index 0000000000..f5e61013b3 --- /dev/null +++ b/cfgmgr/buffer_headroom_barefoot.lua @@ -0,0 +1,147 @@ +-- KEYS - profile name +-- ARGV[1] - port speed +-- ARGV[2] - cable length +-- ARGV[3] - port mtu +-- ARGV[4] - gearbox delay + +-- Parameters retried from databases: +-- From CONFIG_DB.LOSSLESS_TRAFFIC_PATTERN +-- small packet percentage: the parameter which is used to control worst case regarding the cell utilization +-- mtu: the mtu of lossless packet +-- From STATE_DB.ASIC_TABLE: +-- cell size: cell_size of the ASIC +-- pipeline_latency: the latency (XON) +-- mac_phy_delay: the bytes held in the switch chip's egress pipeline and PHY when XOFF has been generated +-- peer_response_time: the bytes that are held in the peer switch's pipeline and will be send out when the XOFF packet is received + +local lossless_mtu +local small_packet_percentage +local cell_size +local pipeline_latency +local mac_phy_delay +local peer_response_time + +local port_speed = tonumber(ARGV[1]) +local cable_length = tonumber(string.sub(ARGV[2], 1, -2)) +local port_mtu = tonumber(ARGV[3]) +local gearbox_delay = tonumber(ARGV[4]) + +local config_db = "4" +local state_db = "6" + +local ret = {} + +-- Pause quanta should be taken for each operating speed is defined in IEEE 802.3 31B.3.7. +-- The key of table pause_quanta_per_speed is operating speed at Mb/s. +-- The value of table pause_quanta_per_speed is the number of pause_quanta. +local pause_quanta_per_speed = {} +pause_quanta_per_speed[400000] = 905 +pause_quanta_per_speed[200000] = 453 +pause_quanta_per_speed[100000] = 394 +pause_quanta_per_speed[50000] = 147 +pause_quanta_per_speed[40000] = 118 +pause_quanta_per_speed[25000] = 80 +pause_quanta_per_speed[10000] = 67 +pause_quanta_per_speed[1000] = 2 +pause_quanta_per_speed[100] = 1 + +-- Get pause_quanta from the pause_quanta_per_speed table +local pause_quanta = pause_quanta_per_speed[port_speed] + +if gearbox_delay == nil then + gearbox_delay = 0 +end + +-- Fetch ASIC info from ASIC table in STATE_DB +redis.call("SELECT", state_db) +local asic_keys = redis.call("KEYS", "ASIC_TABLE*") + +-- Only one key should exist +local asic_table_content = redis.call("HGETALL", asic_keys[1]) + +for i = 1, #asic_table_content, 2 do + if asic_table_content[i] == "cell_size" then + cell_size = tonumber(asic_table_content[i+1]) + end + if asic_table_content[i] == "pipeline_latency" then + pipeline_latency = tonumber(asic_table_content[i+1]) * 1024 + end + if asic_table_content[i] == "mac_phy_delay" then + mac_phy_delay = tonumber(asic_table_content[i+1]) * 1024 + end + -- If failed to get pause_quanta from the table, then use the default peer_response_time stored in state_db + if asic_table_content[i] == "peer_response_time" and pause_quanta == nil then + peer_response_time = tonumber(asic_table_content[i+1]) * 1024 + end +end + +-- Fetch lossless traffic info from CONFIG_DB +redis.call("SELECT", config_db) +local lossless_traffic_keys = redis.call("KEYS", "LOSSLESS_TRAFFIC_PATTERN*") + +-- Only one key should exist +local lossless_traffic_table_content = redis.call("HGETALL", lossless_traffic_keys[1]) +for i = 1, #lossless_traffic_table_content, 2 do + if lossless_traffic_table_content[i] == "mtu" then + lossless_mtu = tonumber(lossless_traffic_table_content[i+1]) + end + if lossless_traffic_table_content[i] == "small_packet_percentage" then + small_packet_percentage = tonumber(lossless_traffic_table_content[i+1]) + end +end + +-- Fetch the shared headroom pool size +local shp_size = tonumber(redis.call("HGET", "BUFFER_POOL|ingress_lossless_pool", "xoff")) + +-- Calculate the headroom information +local speed_of_light = 198000000 +local minimal_packet_size = 64 +local cell_occupancy +local worst_case_factor +local propagation_delay +local bytes_on_cable +local bytes_on_gearbox +local xoff_value +local xon_value +local headroom_size + +if cell_size > 2 * minimal_packet_size then + worst_case_factor = cell_size / minimal_packet_size +else + worst_case_factor = (2 * cell_size) / (1 + cell_size) +end + +cell_occupancy = (100 - small_packet_percentage + small_packet_percentage * worst_case_factor) / 100 + +if (gearbox_delay == 0) then + bytes_on_gearbox = 0 +else + bytes_on_gearbox = port_speed * gearbox_delay / (8 * 1024) +end + +-- If successfully get pause_quanta from the table, then calculate peer_response_time from it +if pause_quanta ~= nil then + peer_response_time = (pause_quanta) * 512 / 8 +end + +if port_speed == 400000 then + peer_response_time = 2 * peer_response_time +end + +bytes_on_cable = 2 * cable_length * port_speed * 1000000000 / speed_of_light / (8 * 1024) +propagation_delay = port_mtu + bytes_on_cable + 2 * bytes_on_gearbox + mac_phy_delay + peer_response_time + +-- Calculate the xoff and xon and then round up at 1024 bytes +xoff_value = lossless_mtu + propagation_delay * cell_occupancy +xoff_value = math.ceil(xoff_value / 1024) * 1024 +xon_value = pipeline_latency +xon_value = math.ceil(xon_value / 1024) * 1024 + +headroom_size = xon_value +headroom_size = math.ceil(headroom_size / 1024) * 1024 + +table.insert(ret, "xon" .. ":" .. math.ceil(xon_value)) +table.insert(ret, "xoff" .. ":" .. math.ceil(xoff_value)) +table.insert(ret, "size" .. ":" .. math.ceil(headroom_size)) + +return ret diff --git a/cfgmgr/buffer_headroom_mellanox.lua b/cfgmgr/buffer_headroom_mellanox.lua index ae36b5caf1..d99cd02816 100644 --- a/cfgmgr/buffer_headroom_mellanox.lua +++ b/cfgmgr/buffer_headroom_mellanox.lua @@ -39,6 +39,7 @@ local ret = {} -- the key of table pause_quanta_per_speed is operating speed at Mb/s -- the value of table pause_quanta_per_speed is the number of pause_quanta local pause_quanta_per_speed = {} +pause_quanta_per_speed[800000] = 905 pause_quanta_per_speed[400000] = 905 pause_quanta_per_speed[200000] = 453 pause_quanta_per_speed[100000] = 394 diff --git a/cfgmgr/buffer_pool_barefoot.lua b/cfgmgr/buffer_pool_barefoot.lua new file mode 100644 index 0000000000..49c3a961f7 --- /dev/null +++ b/cfgmgr/buffer_pool_barefoot.lua @@ -0,0 +1,30 @@ +-- KEYS - None +-- ARGV - None + +local result = {} +local config_db = "4" +local state_db = "6" + +redis.call("SELECT", state_db) +local asic_keys = redis.call("KEYS", "ASIC_TABLE*") +local cell_size = tonumber(redis.call("HGET", asic_keys[1], "cell_size")) + +-- Based on cell_size, calculate singular headroom +local ppg_headroom = 400 * cell_size + +redis.call("SELECT", config_db) +local ports = redis.call("KEYS", "PORT|*") +local ports_num = #ports + +-- 2 PPGs per port, 70% of possible maximum value. +local shp_size = math.ceil(ports_num * 2 * ppg_headroom * 0.7) + +local ingress_lossless_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|ingress_lossless_pool', 'size')) +local ingress_lossy_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|ingress_lossy_pool', 'size')) +local egress_lossy_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|egress_lossy_pool', 'size')) + +table.insert(result, "ingress_lossless_pool" .. ":" .. ingress_lossless_pool_size_fixed .. ":" .. shp_size) +table.insert(result, "ingress_lossy_pool" .. ":" .. ingress_lossy_pool_size_fixed) +table.insert(result, "egress_lossy_pool" .. ":" .. egress_lossy_pool_size_fixed) + +return result diff --git a/cfgmgr/buffer_pool_mellanox.lua b/cfgmgr/buffer_pool_mellanox.lua index 8c51c28706..15bb81efb2 100644 --- a/cfgmgr/buffer_pool_mellanox.lua +++ b/cfgmgr/buffer_pool_mellanox.lua @@ -10,12 +10,13 @@ local port_count_8lanes = 0 -- Number of lossy PG on ports with 8 lanes local lossypg_8lanes = 0 +local ingress_profile_is_lossless = {} + -- Private headrom local private_headroom = 10 * 1024 local result = {} local profiles = {} -local lossless_profiles = {} local total_port = 0 @@ -52,11 +53,11 @@ local function iterate_all_items(all_items, check_lossless) port = string.match(all_items[i], "Ethernet%d+") if port ~= nil then local range = string.match(all_items[i], "Ethernet%d+:([^%s]+)$") - local profile_name = redis.call('HGET', all_items[i], 'profile') - if not profile_name then + local profile_name_without_table = redis.call('HGET', all_items[i], 'profile') + if not profile_name_without_table then return 1 end - profile_name = "BUFFER_PROFILE_TABLE:" .. profile_name + local profile_name = "BUFFER_PROFILE_TABLE:" .. profile_name_without_table local profile_ref_count = profiles[profile_name] if profile_ref_count == nil then -- Indicate an error in case the referenced profile hasn't been inserted or has been removed @@ -71,10 +72,11 @@ local function iterate_all_items(all_items, check_lossless) size = 1 + tonumber(string.sub(range, -1)) - tonumber(string.sub(range, 1, 1)) end profiles[profile_name] = profile_ref_count + size - if port_set_8lanes[port] and profile_name == 'BUFFER_PROFILE_TABLE:ingress_lossy_profile' then + if port_set_8lanes[port] and ingress_profile_is_lossless[profile_name] == false then + -- Handle additional buffer reserved for lossy PG on 8-lane ports lossypg_8lanes = lossypg_8lanes + size end - if check_lossless and lossless_profiles[profile_name] then + if check_lossless and ingress_profile_is_lossless[profile_name] then if lossless_ports[port] == nil then lossless_port_count = lossless_port_count + 1 lossless_ports[port] = true @@ -113,7 +115,8 @@ local function iterate_profile_list(all_items) -- To distinguish both cases, a new name "ingress_lossy_profile_list" is introduced to indicate -- the profile is used by the profile list where its size should be zero. profile_name = 'BUFFER_PROFILE_TABLE:' .. profile_name - if profile_name == 'BUFFER_PROFILE_TABLE:ingress_lossy_profile' then + -- TODO CHECK ALL LOSSY PROFILES + if ingress_profile_is_lossless[profile_name] == false then profile_name = profile_name .. '_list' if profiles[profile_name] == nil then profiles[profile_name] = 0 @@ -130,7 +133,7 @@ local function iterate_profile_list(all_items) return 0 end -local function fetch_buffer_pool_size_from_appldb() +local function fetch_buffer_pool_size_from_appldb(shp_enabled) local buffer_pools = {} redis.call('SELECT', config_db) local buffer_pool_keys = redis.call('KEYS', 'BUFFER_POOL|*') @@ -155,16 +158,43 @@ local function fetch_buffer_pool_size_from_appldb() end xoff = redis.call('HGET', 'BUFFER_POOL_TABLE:' .. buffer_pools[i], 'xoff') if not xoff then - table.insert(result, buffer_pools[i] .. ':' .. size) + if shp_enabled and size == "0" and buffer_pools[i] == "ingress_lossless_pool" then + -- During initialization, if SHP is enabled + -- 1. the buffer pool sizes, xoff have initialized to 0, which means the shared headroom pool is disabled + -- 2. but the buffer profiles already indicate the shared headroom pool is enabled + -- 3. later on the buffer pool sizes are updated with xoff being non-zero + -- In case the orchagent starts handling buffer configuration between 2 and 3, + -- It is inconsistent between buffer pools and profiles, which fails Mellanox SAI sanity check + -- To avoid it, it indicates the shared headroom pool is enabled by setting a very small buffer pool and shared headroom pool sizes + table.insert(result, buffer_pools[i] .. ':2048:1024') + else + table.insert(result, buffer_pools[i] .. ':' .. size) + end else table.insert(result, buffer_pools[i] .. ':' .. size .. ':' .. xoff) end end end +-- Main -- -- Connect to CONFIG_DB redis.call('SELECT', config_db) +-- Parse all the pools and seperate them according to the direction +local ipools = {} +local epools = {} +local pools = redis.call('KEYS', 'BUFFER_POOL|*') +for i = 1, #pools, 1 do + local type = redis.call('HGET', pools[i], 'type') + if type == 'ingress' then + table.insert(ipools, pools[i]) + else + if type == 'egress' then + table.insert(epools, pools[i]) + end + end +end + local ports_table = redis.call('KEYS', 'PORT|*') total_port = #ports_table @@ -250,9 +280,19 @@ redis.call('SELECT', appl_db) local all_profiles = redis.call('KEYS', 'BUFFER_PROFILE*') for i = 1, #all_profiles, 1 do if all_profiles[i] ~= "BUFFER_PROFILE_TABLE_KEY_SET" and all_profiles[i] ~= "BUFFER_PROFILE_TABLE_DEL_SET" then - local xoff = redis.call('HGET', all_profiles[i], 'xoff') - if xoff then - lossless_profiles[all_profiles[i]] = true + local pool = redis.call('HGET', all_profiles[i], 'pool') + for j = 1, #ipools, 1 do + if "BUFFER_POOL|" .. pool == ipools[j] then + -- For ingress profiles, check whether it is lossless or lossy + -- For lossy profiles, there is buffer implicitly reserved when they are applied on PGs + local xoff = redis.call('HGET', all_profiles[i], 'xoff') + if xoff then + ingress_profile_is_lossless[all_profiles[i]] = true + else + ingress_profile_is_lossless[all_profiles[i]] = false + end + break + end end profiles[all_profiles[i]] = 0 end @@ -266,7 +306,7 @@ local fail_count = 0 fail_count = fail_count + iterate_all_items(all_pgs, true) fail_count = fail_count + iterate_all_items(all_tcs, false) if fail_count > 0 then - fetch_buffer_pool_size_from_appldb() + fetch_buffer_pool_size_from_appldb(shp_enabled) return result end @@ -276,7 +316,7 @@ local all_egress_profile_lists = redis.call('KEYS', 'BUFFER_PORT_EGRESS_PROFILE_ fail_count = fail_count + iterate_profile_list(all_ingress_profile_lists) fail_count = fail_count + iterate_profile_list(all_egress_profile_lists) if fail_count > 0 then - fetch_buffer_pool_size_from_appldb() + fetch_buffer_pool_size_from_appldb(shp_enabled) return result end @@ -289,12 +329,13 @@ local accumulative_xoff = 0 for name in pairs(profiles) do if name ~= "BUFFER_PROFILE_TABLE_KEY_SET" and name ~= "BUFFER_PROFILE_TABLE_DEL_SET" then local size = tonumber(redis.call('HGET', name, 'size')) - if size ~= nil then - if name == "BUFFER_PROFILE_TABLE:ingress_lossy_profile" then - size = size + lossypg_reserved + if size ~= nil then + -- Handle the implicitly reserved buffer for lossy profile applied on PG + if ingress_profile_is_lossless[name] == false then + size = size + lossypg_reserved end if size ~= 0 then - if shp_enabled and shp_size == 0 then + if shp_size == 0 then local xon = tonumber(redis.call('HGET', name, 'xon')) local xoff = tonumber(redis.call('HGET', name, 'xoff')) if xon ~= nil and xoff ~= nil and xon + xoff > size then @@ -304,6 +345,8 @@ for name in pairs(profiles) do accumulative_occupied_buffer = accumulative_occupied_buffer + size * profiles[name] end table.insert(statistics, {name, size, profiles[name]}) + else + table.insert(statistics, {name, "-", profiles[name]}) end end end @@ -314,6 +357,12 @@ accumulative_occupied_buffer = accumulative_occupied_buffer + lossypg_extra_for_ -- Accumulate sizes for private headrooms local accumulative_private_headroom = 0 +local force_enable_shp = false +if accumulative_xoff > 0 and shp_enabled ~= true then + force_enable_shp = true + shp_size = 655360 + shp_enabled = true +end if shp_enabled then accumulative_private_headroom = lossless_port_count * private_headroom accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_private_headroom @@ -336,7 +385,6 @@ redis.call('SELECT', config_db) -- Fetch all the pools that need update local pools_need_update = {} -local ipools = redis.call('KEYS', 'BUFFER_POOL|ingress*') local ingress_pool_count = 0 local ingress_lossless_pool_size = nil for i = 1, #ipools, 1 do @@ -351,7 +399,6 @@ for i = 1, #ipools, 1 do end end -local epools = redis.call('KEYS', 'BUFFER_POOL|egress*') for i = 1, #epools, 1 do local size = redis.call('HGET', epools[i], 'size') if not size then @@ -361,6 +408,9 @@ end if shp_enabled and shp_size == 0 then shp_size = math.ceil(accumulative_xoff / over_subscribe_ratio) + if shp_size == 0 then + shp_size = 655360 + end end local pool_size @@ -402,6 +452,7 @@ table.insert(result, "debug:mgmt_pool:" .. mgmt_pool_size) if shp_enabled then table.insert(result, "debug:accumulative_private_headroom:" .. accumulative_private_headroom) table.insert(result, "debug:accumulative xoff:" .. accumulative_xoff) + table.insert(result, "debug:force enabled shp:" .. tostring(force_enable_shp)) end table.insert(result, "debug:accumulative_mgmt_pg:" .. accumulative_management_pg) table.insert(result, "debug:egress_mirror:" .. accumulative_egress_mirror_overhead) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index d8faa1033b..32f71d8280 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -196,7 +196,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) // Although we have up to 8 PGs for now, the range to check is expanded to 32 support more PGs set lossless_pg_combinations = generateIdListFromMap(lossless_pg_id, sizeof(lossless_pg_id)); - if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") + if (m_portStatusLookup[port] == "down" && (m_platform == "mellanox" || m_platform == "barefoot")) { for (auto lossless_pg : lossless_pg_combinations) { @@ -549,24 +549,23 @@ void BufferMgr::doTask(Consumer &consumer) task_status = doSpeedUpdateTask(port); } } - - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; - } + } + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); + break; } } } diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index eb5de60b65..e88280eb56 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -11,7 +11,7 @@ #include #include #include "json.h" -#include "json.hpp" +#include #include "warm_restart.h" using namespace std; @@ -21,26 +21,6 @@ using json = nlohmann::json; /* SELECT() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - void usage() { cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json] [-z zero_profiles.json]>" << endl; @@ -189,6 +169,8 @@ int main(int argc, char **argv) WarmStart::initialize("buffermgrd", "swss"); WarmStart::checkWarmStart("buffermgrd", "swss"); + DBConnector applStateDb("APPL_STATE_DB", 0); + vector buffer_table_connectors = { TableConnector(&cfgDb, CFG_PORT_TABLE_NAME), TableConnector(&cfgDb, CFG_PORT_CABLE_LEN_TABLE_NAME), @@ -202,7 +184,7 @@ int main(int argc, char **argv) TableConnector(&stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), TableConnector(&stateDb, STATE_PORT_TABLE_NAME) }; - cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr)); + cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, &applStateDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr)); } else if (!pg_lookup_file.empty()) { diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 5017ad9d1b..6c9a1e831e 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -26,7 +26,7 @@ using namespace std; using namespace swss; -BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector &tables, shared_ptr> gearboxInfo, shared_ptr> zeroProfilesInfo) : +BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, DBConnector *applStateDb, const vector &tables, shared_ptr> gearboxInfo, shared_ptr> zeroProfilesInfo) : Orch(tables), m_platform(), m_bufferDirections{BUFFER_INGRESS, BUFFER_EGRESS}, @@ -38,6 +38,7 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC m_cfgDefaultLosslessBufferParam(cfgDb, CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), m_cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME), m_applBufferPoolTable(applDb, APP_BUFFER_POOL_TABLE_NAME), + m_applStateBufferPoolTable(applStateDb, APP_BUFFER_POOL_TABLE_NAME), m_applBufferProfileTable(applDb, APP_BUFFER_PROFILE_TABLE_NAME), m_applBufferObjectTables{ProducerStateTable(applDb, APP_BUFFER_PG_TABLE_NAME), ProducerStateTable(applDb, APP_BUFFER_QUEUE_TABLE_NAME)}, m_applBufferProfileListTables{ProducerStateTable(applDb, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), ProducerStateTable(applDb, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)}, @@ -1050,10 +1051,10 @@ bool BufferMgrDynamic::isHeadroomResourceValid(const string &port, const buffer_ // profile: the profile referenced by the new_pg (if provided) or all PGs // new_pg: which pg is newly added? - if (!profile.lossless) + if (!profile.lossless && new_pg.empty()) { - SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s pg %s", - port.c_str(), profile.name.c_str(), profile.size.c_str(), new_pg.c_str()); + SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s without a PG specified", + port.c_str(), profile.name.c_str(), profile.size.c_str()); return true; } @@ -1496,7 +1497,7 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons // Calculate whether accumulative headroom size exceeds the maximum value // Abort if it does - if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], exactly_matched_key)) + if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], key)) { SWSS_LOG_ERROR("Update speed (%s) and cable length (%s) for port %s failed, accumulative headroom size exceeds the limit", speed.c_str(), cable_length.c_str(), port.c_str()); @@ -1960,6 +1961,13 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel { bool isSHPEnabled = isNonZero(m_overSubscribeRatio); bool willSHPBeEnabled = isNonZero(newRatio); + if (m_portInitDone && (!isSHPEnabled) && willSHPBeEnabled) + { + if (!isSharedHeadroomPoolEnabledInSai()) + { + return task_process_status::task_need_retry; + } + } SWSS_LOG_INFO("Recalculate shared buffer pool size due to over subscribe ratio has been updated from %s to %s", m_overSubscribeRatio.c_str(), newRatio.c_str()); m_overSubscribeRatio = newRatio; @@ -1968,6 +1976,24 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel return task_process_status::task_success; } +bool BufferMgrDynamic::isSharedHeadroomPoolEnabledInSai() +{ + string xoff; + recalculateSharedBufferPool(); + if (!isNonZero(m_bufferPoolLookup[INGRESS_LOSSLESS_PG_POOL_NAME].xoff)) + { + return true; + } + m_applBufferPoolTable.flush(); + m_applStateBufferPoolTable.hget(INGRESS_LOSSLESS_PG_POOL_NAME, "xoff", xoff); + if (!isNonZero(xoff)) + { + SWSS_LOG_INFO("Shared headroom pool is enabled but has not been applied to SAI, retrying"); + return false; + } + + return true; +} task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple &tuple) { @@ -2416,6 +2442,14 @@ task_process_status BufferMgrDynamic::handleBufferPoolTable(KeyOpFieldsValuesTup { bool isSHPEnabledBySize = isNonZero(m_configuredSharedHeadroomPoolSize); + if (m_portInitDone && (!isSHPEnabledBySize) && willSHPBeEnabledBySize) + { + if (!isSharedHeadroomPoolEnabledInSai()) + { + return task_process_status::task_need_retry; + } + } + m_configuredSharedHeadroomPoolSize = newSHPSize; refreshSharedHeadroomPool(false, isSHPEnabledBySize != willSHPBeEnabledBySize); } @@ -2971,6 +3005,11 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke bufferPg.dynamic_calculated = profileRef.dynamic_calculated; bufferPg.configured_profile_name = profileName; bufferPg.lossless = profileRef.lossless; + if (!profileRef.lossless && !isHeadroomResourceValid(port, profileRef, key)) + { + SWSS_LOG_ERROR("Unable to configure lossy PG %s, accumulative headroom size exceeds the limit", key.c_str()); + return task_process_status::task_failed; + } } bufferPg.static_configured = true; bufferPg.configured_profile_name = profileName; @@ -3110,8 +3149,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string if (op == SET_COMMAND) { - auto &portQueue = m_portQueueLookup[port][queues]; - + bool successful = false; SWSS_LOG_INFO("Inserting entry BUFFER_QUEUE_TABLE:%s to APPL_DB", key.c_str()); for (auto i : kfvFieldsValues(tuple)) @@ -3122,8 +3160,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string auto rc = checkBufferProfileDirection(fvValue(i), BUFFER_EGRESS); if (rc != task_process_status::task_success) return rc; - portQueue.running_profile_name = fvValue(i); + + m_portQueueLookup[port][queues].running_profile_name = fvValue(i); SWSS_LOG_NOTICE("Queue %s has been configured on the system, referencing profile %s", key.c_str(), fvValue(i).c_str()); + successful = true; } else { @@ -3134,8 +3174,13 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } - // TODO: check overlap. Currently, assume there is no overlap + if (!successful) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE configuration on %s: no profile configured", key.c_str()); + return task_process_status::task_failed; + } + auto &portQueue = m_portQueueLookup[port][queues]; if (PORT_ADMIN_DOWN == portInfo.state) { handleSetSingleBufferObjectOnAdminDownPort(BUFFER_QUEUE, port, key, portQueue.running_profile_name); diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index 11b55d7667..b50b0ced69 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -147,7 +147,7 @@ typedef std::map gearbox_delay_t; class BufferMgrDynamic : public Orch { public: - BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector &tables, std::shared_ptr> gearboxInfo, std::shared_ptr> zeroProfilesInfo); + BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, DBConnector *applStateDb, const std::vector &tables, std::shared_ptr> gearboxInfo, std::shared_ptr> zeroProfilesInfo); using Orch::doTask; private: @@ -204,6 +204,7 @@ class BufferMgrDynamic : public Orch // BUFFER_POOL table and cache ProducerStateTable m_applBufferPoolTable; + Table m_applStateBufferPoolTable; Table m_stateBufferPoolTable; buffer_pool_lookup_t m_bufferPoolLookup; @@ -294,6 +295,7 @@ class BufferMgrDynamic : public Orch task_process_status allocateProfile(const std::string &speed, const std::string &cable, const std::string &mtu, const std::string &threshold, const std::string &gearbox_model, long lane_count, std::string &profile_name); void releaseProfile(const std::string &profile_name); bool isHeadroomResourceValid(const std::string &port, const buffer_profile_t &profile, const std::string &new_pg); + bool isSharedHeadroomPoolEnabledInSai(); void refreshSharedHeadroomPool(bool enable_state_updated_by_ratio, bool enable_state_updated_by_size); task_process_status checkBufferProfileDirection(const std::string &profiles, buffer_direction_t dir); std::string constructZeroProfileListFromNormalProfileList(const std::string &normalProfileList, const std::string &port); diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 1721cc8593..9b2c3ee4d7 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -8,7 +8,9 @@ #include "exec.h" #include "shellcmd.h" #include "warm_restart.h" -#include "json.hpp" +#include +#include +#include using json = nlohmann::json; @@ -19,10 +21,11 @@ static set g_copp_init_set; void CoppMgr::parseInitFile(void) { - std::ifstream ifs(COPP_INIT_FILE); + std::ifstream ifs(m_coppCfgfile); + if (ifs.fail()) { - SWSS_LOG_ERROR("COPP init file %s not found", COPP_INIT_FILE); + SWSS_LOG_ERROR("COPP init file %s not found", m_coppCfgfile.c_str()); return; } json j = json::parse(ifs); @@ -255,7 +258,43 @@ void CoppMgr::mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &tableNames) : +bool CoppMgr::isDupEntry(const std::string &key, std::vector &fvs) +{ + /* Compare with the existing contents of copp tables, in case for a key K preserved fvs are the same + * as the fvs in trap_group_fvs it will be ignored as a duplicate continue to next key. + * In case one of the fvs differs the preserved entry will be deleted and new entry will be set instead. + */ + std::vector preserved_fvs; + bool key_found = m_coppTable.get(key, preserved_fvs); + if (!key_found) + { + return false; + } + else + { + unordered_map preserved_copp_entry; + for (auto prev_fv : preserved_fvs) + { + preserved_copp_entry[fvField(prev_fv)] = fvValue(prev_fv); + } + for (auto fv: fvs) + { + string field = fvField(fv); + string value = fvValue(fv); + auto preserved_copp_it = preserved_copp_entry.find(field); + bool field_found = (preserved_copp_it != preserved_copp_entry.end()); + if ((!field_found) || (field_found && preserved_copp_it->second.compare(value))) + { + // overwrite -> delete preserved entry from copp table and set a new entry instead + m_appCoppTable.del(key); + return false; + } + } + } + return true; +} + +CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames, const string copp_init_file) : Orch(cfgDb, tableNames), m_cfgCoppTrapTable(cfgDb, CFG_COPP_TRAP_TABLE_NAME), m_cfgCoppGroupTable(cfgDb, CFG_COPP_GROUP_TABLE_NAME), @@ -263,16 +302,19 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_appCoppTable(appDb, APP_COPP_TABLE_NAME), m_stateCoppTrapTable(stateDb, STATE_COPP_TRAP_TABLE_NAME), m_stateCoppGroupTable(stateDb, STATE_COPP_GROUP_TABLE_NAME), - m_coppTable(appDb, APP_COPP_TABLE_NAME) + m_coppTable(appDb, APP_COPP_TABLE_NAME), + m_coppCfgfile(copp_init_file) { SWSS_LOG_ENTER(); parseInitFile(); std::vector group_keys; std::vector trap_keys; std::vector feature_keys; + std::vector preserved_copp_keys; std::vector group_cfg_keys; std::vector trap_cfg_keys; + unordered_set supported_copp_keys; CoppCfg group_cfg; CoppCfg trap_cfg; @@ -280,6 +322,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_cfgCoppGroupTable.getKeys(group_cfg_keys); m_cfgCoppTrapTable.getKeys(trap_cfg_keys); m_cfgFeatureTable.getKeys(feature_keys); + m_coppTable.getKeys(preserved_copp_keys); for (auto i: feature_keys) @@ -352,8 +395,14 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c if (!trap_group_fvs.empty()) { + supported_copp_keys.emplace(i.first); + if (isDupEntry(i.first, trap_group_fvs)) + { + continue; + } m_appCoppTable.set(i.first, trap_group_fvs); } + setCoppGroupStateOk(i.first); auto g_cfg = std::find(group_cfg_keys.begin(), group_cfg_keys.end(), i.first); if (g_cfg != group_cfg_keys.end()) @@ -361,6 +410,16 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c g_copp_init_set.insert(i.first); } } + + // Delete unsupported keys from preserved copp tables + for (auto it : preserved_copp_keys) + { + auto copp_it = supported_copp_keys.find(it); + if (copp_it == supported_copp_keys.end()) + { + m_appCoppTable.del(it); + } + } } void CoppMgr::setCoppGroupStateOk(string alias) diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index 1d53756fce..86f1b0e4e2 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -62,7 +62,7 @@ class CoppMgr : public Orch { public: CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, - const std::vector &tableNames); + const std::vector &tableNames, const std::string copp_init_file = COPP_INIT_FILE); using Orch::doTask; private: @@ -75,6 +75,7 @@ class CoppMgr : public Orch CoppCfg m_coppGroupInitCfg; CoppCfg m_coppTrapInitCfg; CoppCfg m_featuresCfgTable; + std::string m_coppCfgfile; void doTask(Consumer &consumer); @@ -100,6 +101,7 @@ class CoppMgr : public Orch bool isTrapGroupInstalled(std::string key); bool isFeatureEnabled(std::string feature); void mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable); + bool isDupEntry(const std::string &key, std::vector &fvs); void removeTrap(std::string key); void addTrap(std::string trap_ids, std::string trap_group); diff --git a/cfgmgr/coppmgrd.cpp b/cfgmgr/coppmgrd.cpp index 60b0a2442a..16c15c1238 100644 --- a/cfgmgr/coppmgrd.cpp +++ b/cfgmgr/coppmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("coppmgrd"); diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp new file mode 100644 index 0000000000..16a8111199 --- /dev/null +++ b/cfgmgr/fabricmgr.cpp @@ -0,0 +1,119 @@ +#include "logger.h" +#include "dbconnector.h" +#include "producerstatetable.h" +#include "tokenize.h" +#include "ipprefix.h" +#include "fabricmgr.h" +#include "exec.h" +#include "shellcmd.h" +#include + +using namespace std; +using namespace swss; + +FabricMgr::FabricMgr(DBConnector *cfgDb, DBConnector *appDb, const vector &tableNames) : + Orch(cfgDb, tableNames), + m_cfgFabricMonitorTable(cfgDb, CFG_FABRIC_MONITOR_DATA_TABLE_NAME), + m_cfgFabricPortTable(cfgDb, CFG_FABRIC_MONITOR_PORT_TABLE_NAME), + m_appFabricMonitorTable(appDb, APP_FABRIC_MONITOR_DATA_TABLE_NAME), + m_appFabricPortTable(appDb, APP_FABRIC_MONITOR_PORT_TABLE_NAME) +{ +} + +void FabricMgr::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto table = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + + string monErrThreshCrcCells, monErrThreshRxCells; + string monPollThreshRecovery, monPollThreshIsolation; + string isolateStatus; + string alias, lanes; + std::vector field_values; + string value; + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "monErrThreshCrcCells") + { + monErrThreshCrcCells = fvValue(i); + writeConfigToAppDb(key, "monErrThreshCrcCells", monErrThreshCrcCells); + } + else if (fvField(i) == "monErrThreshRxCells") + { + monErrThreshRxCells = fvValue(i); + writeConfigToAppDb(key, "monErrThreshRxCells", monErrThreshRxCells); + } + else if (fvField(i) == "monPollThreshRecovery") + { + monPollThreshRecovery = fvValue(i); + writeConfigToAppDb(key, "monPollThreshRecovery", monPollThreshRecovery); + } + else if (fvField(i) == "monPollThreshIsolation") + { + monPollThreshIsolation = fvValue(i); + writeConfigToAppDb(key, "monPollThreshIsolation", monPollThreshIsolation); + } + else if (fvField(i) == "alias") + { + alias = fvValue(i); + writeConfigToAppDb(key, "alias", alias); + } + else if (fvField(i) == "lanes") + { + lanes = fvValue(i); + writeConfigToAppDb(key, "lanes", lanes); + } + else if (fvField(i) == "isolateStatus") + { + isolateStatus = fvValue(i); + writeConfigToAppDb(key, "isolateStatus", isolateStatus); + } + else + { + field_values.emplace_back(i); + } + } + + for (auto &entry : field_values) + { + writeConfigToAppDb(key, fvField(entry), fvValue(entry)); + } + + } + it = consumer.m_toSync.erase(it); + } +} + +bool FabricMgr::writeConfigToAppDb(const std::string &key, const std::string &field, const std::string &value) +{ + vector fvs; + FieldValueTuple fv(field, value); + fvs.push_back(fv); + if (key == "FABRIC_MONITOR_DATA") + { + m_appFabricMonitorTable.set(key, fvs); + SWSS_LOG_INFO("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + } + else + { + m_appFabricPortTable.set(key, fvs); + SWSS_LOG_INFO("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + } + + return true; +} + + diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h new file mode 100644 index 0000000000..dbe2fd0d89 --- /dev/null +++ b/cfgmgr/fabricmgr.h @@ -0,0 +1,30 @@ +#pragma once + +#include "dbconnector.h" +#include "orch.h" +#include "producerstatetable.h" + +#include +#include +#include + +namespace swss { + + +class FabricMgr : public Orch +{ +public: + FabricMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames); + + using Orch::doTask; +private: + Table m_cfgFabricMonitorTable; + Table m_cfgFabricPortTable; + Table m_appFabricMonitorTable; + Table m_appFabricPortTable; + + void doTask(Consumer &consumer); + bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); +}; + +} diff --git a/cfgmgr/fabricmgrd.cpp b/cfgmgr/fabricmgrd.cpp new file mode 100644 index 0000000000..3d0777e17c --- /dev/null +++ b/cfgmgr/fabricmgrd.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include + +#include "exec.h" +#include "fabricmgr.h" +#include "schema.h" +#include "select.h" + +using namespace std; +using namespace swss; + +/* select() function timeout retry time, in millisecond */ +#define SELECT_TIMEOUT 1000 + +int main(int argc, char **argv) +{ + Logger::linkToDbNative("fabricmgrd"); + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("--- Starting fabricmgrd ---"); + + try + { + vector cfg_fabric_tables = { + CFG_FABRIC_MONITOR_DATA_TABLE_NAME, + CFG_FABRIC_MONITOR_PORT_TABLE_NAME, + }; + + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appDb("APPL_DB", 0); + + FabricMgr fabricmgr(&cfgDb, &appDb, cfg_fabric_tables); + + // TODO: add tables in stateDB which interface depends on to monitor list + vector cfgOrchList = {&fabricmgr}; + + swss::Select s; + for (Orch *o : cfgOrchList) + { + s.addSelectables(o->getSelectables()); + } + + while (true) + { + Selectable *sel; + int ret; + + ret = s.select(&sel, SELECT_TIMEOUT); + if (ret == Select::ERROR) + { + SWSS_LOG_NOTICE("Error: %s!", strerror(errno)); + continue; + } + if (ret == Select::TIMEOUT) + { + fabricmgr.doTask(); + continue; + } + + auto *c = (Executor *)sel; + c->execute(); + } + } + catch (const exception &e) + { + SWSS_LOG_ERROR("Runtime error: %s", e.what()); + } + return -1; +} + diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 3651a55150..78c9030807 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -40,8 +40,7 @@ IntfMgr::IntfMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_stateVrfTable(stateDb, STATE_VRF_TABLE_NAME), m_stateIntfTable(stateDb, STATE_INTERFACE_TABLE_NAME), m_appIntfTableProducer(appDb, APP_INTF_TABLE_NAME), - m_neighTable(appDb, APP_NEIGH_TABLE_NAME), - m_appLagTable(appDb, APP_LAG_TABLE_NAME) + m_neighTable(appDb, APP_NEIGH_TABLE_NAME) { auto subscriberStateTable = new swss::SubscriberStateTable(stateDb, STATE_PORT_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, 100); @@ -350,7 +349,7 @@ std::string IntfMgr::getIntfAdminStatus(const string &alias) } else if (!alias.compare(0, strlen("Po"), "Po")) { - portTable = &m_appLagTable; + portTable = &m_stateLagTable; } else { @@ -382,7 +381,7 @@ std::string IntfMgr::getIntfMtu(const string &alias) } else if (!alias.compare(0, strlen("Po"), "Po")) { - portTable = &m_appLagTable; + portTable = &m_stateLagTable; } else { @@ -447,8 +446,19 @@ std::string IntfMgr::setHostSubIntfMtu(const string &alias, const string &mtu, c } SWSS_LOG_INFO("subintf %s active mtu: %s", alias.c_str(), subifMtu.c_str()); cmd << IP_CMD " link set " << shellquote(alias) << " mtu " << shellquote(subifMtu); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + std::string cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (ret && !isIntfStateOk(alias)) + { + // Can happen when a SET notification on the PORT_TABLE in the State DB + // followed by a new DEL notification that send by portmgrd + SWSS_LOG_WARN("Setting mtu to %s netdev failed with cmd:%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + } + else if (ret) + { + throw runtime_error(cmd_str + " : " + res); + } return subifMtu; } @@ -468,7 +478,7 @@ void IntfMgr::updateSubIntfAdminStatus(const string &alias, const string &admin) continue; } std::vector fvVector; - string subintf_admin = setHostSubIntfAdminStatus(intf, m_subIntfList[intf].adminStatus, admin); + string subintf_admin = setHostSubIntfAdminStatus(intf, m_subIntfList[intf].adminStatus, admin); m_subIntfList[intf].currAdminStatus = subintf_admin; FieldValueTuple fvTuple("admin_status", subintf_admin); fvVector.push_back(fvTuple); @@ -480,13 +490,24 @@ void IntfMgr::updateSubIntfAdminStatus(const string &alias, const string &admin) std::string IntfMgr::setHostSubIntfAdminStatus(const string &alias, const string &admin_status, const string &parent_admin_status) { stringstream cmd; - string res; + string res, cmd_str; if (parent_admin_status == "up" || admin_status == "down") { SWSS_LOG_INFO("subintf %s admin_status: %s", alias.c_str(), admin_status.c_str()); cmd << IP_CMD " link set " << shellquote(alias) << " " << shellquote(admin_status); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (ret && !isIntfStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification + SWSS_LOG_WARN("Setting admin_status to %s netdev failed with cmd:%s, rc:%d, error:%s", + alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + } + else if (ret) + { + throw runtime_error(cmd_str + " : " + res); + } return admin_status; } else @@ -535,11 +556,12 @@ void IntfMgr::removeSubIntfState(const string &alias) bool IntfMgr::setIntfGratArp(const string &alias, const string &grat_arp) { /* - * Enable gratuitous ARP by accepting unsolicited ARP replies + * Enable gratuitous ARP by accepting unsolicited ARP replies and untracked neighbor advertisements */ stringstream cmd; string res; string garp_enabled; + int rc; if (grat_arp == "enabled") { @@ -557,8 +579,23 @@ bool IntfMgr::setIntfGratArp(const string &alias, const string &grat_arp) cmd << ECHO_CMD << " " << garp_enabled << " > /proc/sys/net/ipv4/conf/" << alias << "/arp_accept"; EXEC_WITH_ERROR_THROW(cmd.str(), res); - SWSS_LOG_INFO("ARP accept set to \"%s\" on interface \"%s\"", grat_arp.c_str(), alias.c_str()); + + cmd.clear(); + cmd.str(std::string()); + + // `accept_untracked_na` is not available in all kernels, so check for it before trying to set it + cmd << "test -f /proc/sys/net/ipv6/conf/" << alias << "/accept_untracked_na"; + rc = swss::exec(cmd.str(), res); + + if (rc == 0) { + cmd.clear(); + cmd.str(std::string()); + cmd << ECHO_CMD << " " << garp_enabled << " > /proc/sys/net/ipv6/conf/" << alias << "/accept_untracked_na"; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + SWSS_LOG_INFO("`accept_untracked_na` set to \"%s\" on interface \"%s\"", grat_arp.c_str(), alias.c_str()); + } + return true; } diff --git a/cfgmgr/intfmgr.h b/cfgmgr/intfmgr.h index 65fd051200..4eca2402ce 100644 --- a/cfgmgr/intfmgr.h +++ b/cfgmgr/intfmgr.h @@ -30,7 +30,7 @@ class IntfMgr : public Orch private: ProducerStateTable m_appIntfTableProducer; Table m_cfgIntfTable, m_cfgVlanIntfTable, m_cfgLagIntfTable, m_cfgLoopbackIntfTable; - Table m_statePortTable, m_stateLagTable, m_stateVlanTable, m_stateVrfTable, m_stateIntfTable, m_appLagTable; + Table m_statePortTable, m_stateLagTable, m_stateVlanTable, m_stateVrfTable, m_stateIntfTable; Table m_neighTable; SubIntfMap m_subIntfList; diff --git a/cfgmgr/intfmgrd.cpp b/cfgmgr/intfmgrd.cpp index 9ed3653333..e414590920 100644 --- a/cfgmgr/intfmgrd.cpp +++ b/cfgmgr/intfmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("intfmgrd"); @@ -62,8 +42,6 @@ int main(int argc, char **argv) WarmStart::checkWarmStart("intfmgrd", "swss"); IntfMgr intfmgr(&cfgDb, &appDb, &stateDb, cfg_intf_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&intfmgr}; swss::Select s; diff --git a/cfgmgr/macsecmgr.cpp b/cfgmgr/macsecmgr.cpp index 0edb86a5af..42e06731cc 100644 --- a/cfgmgr/macsecmgr.cpp +++ b/cfgmgr/macsecmgr.cpp @@ -34,6 +34,20 @@ constexpr std::uint64_t RETRY_TIME = 30; /* retry interval, in millisecond */ constexpr std::uint64_t RETRY_INTERVAL = 100; +/* + * The input cipher_str is the encoded string which can be either of length 66 bytes or 130 bytes. + * + * 66 bytes of length, for 128-byte cipher suite + * - first 2 bytes of the string will be the index from the magic salt string. + * - remaining 64 bytes will be encoded string from the 32-byte plain text CAK input string. + * + * 130 bytes of length, for 256-byte cipher suite + * - first 2 bytes of the string will be the index from the magic salt string. + * - remaining 128 bytes will be encoded string from the 32 byte plain text CAK input string. +*/ +constexpr std::size_t AES_LEN_128_BYTE = 66; +constexpr std::size_t AES_LEN_256_BYTE = 130; + static void lexical_convert(const std::string &policy_str, MACsecMgr::MACsecProfile::Policy & policy) { SWSS_LOG_ENTER(); @@ -78,6 +92,60 @@ static void lexical_convert(const std::string &cipher_str, MACsecMgr::MACsecProf } } + + +/* Decodes a Type 7 encoded input. + * + * The Type 7 encoding consists of two decimal digits(encoding the salt), followed a series of hexadecimal characters, + * two for every byte in the encoded password. An example encoding(of "password") is 044B0A151C36435C0D. + * This has a salt/offset of 4 (04 in the example), and encodes password via 4B0A151C36435C0D. + * + * The algorithm is a straightforward XOR Cipher that relies on the following ascii-encoded 53-byte constant: + * "dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87" + * + * Decode() + * Get the salt index from the first 2 chars + * For each byte in the provided text after the encoded salt: + * j = (salt index + 1) % 53 + * XOR the i'th byte of the password with the j'th byte of the magic constant. + * append to the decoded string. + */ +static std::string decodeKey(const std::string &cipher_str, const MACsecMgr::MACsecProfile::CipherSuite & cipher_suite) +{ + int salts[] = { 0x64, 0x73, 0x66, 0x64, 0x3B, 0x6B, 0x66, 0x6F, 0x41, 0x2C, 0x2E, 0x69, 0x79, 0x65, 0x77, 0x72, 0x6B, 0x6C, 0x64, 0x4A, 0x4B, 0x44, 0x48, 0x53, 0x55, 0x42, 0x73, 0x67, 0x76, 0x63, 0x61, 0x36, 0x39, 0x38, 0x33, 0x34, 0x6E, 0x63, 0x78, 0x76, 0x39, 0x38, 0x37, 0x33, 0x32, 0x35, 0x34, 0x6B, 0x3B, 0x66, 0x67, 0x38, 0x37 }; + + std::string decodedPassword = std::string(""); + std::string cipher_hex_str = std::string(""); + unsigned int hex_int, saltIdx; + + if ((cipher_suite == MACsecMgr::MACsecProfile::CipherSuite::GCM_AES_128) || + (cipher_suite == MACsecMgr::MACsecProfile::CipherSuite::GCM_AES_XPN_128)) + { + if (cipher_str.length() != AES_LEN_128_BYTE) + throw std::invalid_argument("Invalid length for cipher_string : " + cipher_str); + } + else if ((cipher_suite == MACsecMgr::MACsecProfile::CipherSuite::GCM_AES_256) || + (cipher_suite == MACsecMgr::MACsecProfile::CipherSuite::GCM_AES_XPN_256)) + { + if (cipher_str.length() != AES_LEN_256_BYTE) + throw std::invalid_argument("Invalid length for cipher_string : " + cipher_str); + } + + // Get the salt index from the cipher_str + saltIdx = (unsigned int) stoi(cipher_str.substr(0,2)); + + // Convert the hex string (eg: "aabbcc") to hex integers (eg: 0xaa, 0xbb, 0xcc) taking a substring of 2 chars at a time + // and do xor with the magic salt string + for (size_t i = 2; i < cipher_str.length(); i += 2) { + std::stringstream ss; + ss << std::hex << cipher_str.substr(i,2); + ss >> hex_int; + decodedPassword += (char)(hex_int ^ salts[saltIdx++ % (sizeof(salts)/sizeof(salts[0]))]); + } + + return decodedPassword; +} + template static bool get_value( const MACsecMgr::TaskArgs & ta, @@ -699,7 +767,7 @@ bool MACsecMgr::configureMACsec( port_name, network_id, "mka_cak", - profile.primary_cak); + decodeKey(profile.primary_cak, profile.cipher_suite)); wpa_cli_exec_and_check( session.sock, diff --git a/cfgmgr/macsecmgrd.cpp b/cfgmgr/macsecmgrd.cpp index ff7bda9087..263c5b4395 100644 --- a/cfgmgr/macsecmgrd.cpp +++ b/cfgmgr/macsecmgrd.cpp @@ -26,26 +26,6 @@ using namespace swss; MacAddress gMacAddress; -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - static bool received_sigterm = false; static struct sigaction old_sigaction; diff --git a/cfgmgr/natmgr.cpp b/cfgmgr/natmgr.cpp index 43077fbe32..d903544d9b 100644 --- a/cfgmgr/natmgr.cpp +++ b/cfgmgr/natmgr.cpp @@ -6129,7 +6129,7 @@ void NatMgr::doStaticNatTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -6472,7 +6472,7 @@ void NatMgr::doStaticNaptTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -6859,7 +6859,7 @@ void NatMgr::doNatPoolTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -7095,7 +7095,7 @@ void NatMgr::doNatBindingTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -7873,7 +7873,7 @@ void NatMgr::doNatAclTableTask(Consumer &consumer) else { SWSS_LOG_INFO("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -8137,7 +8137,7 @@ void NatMgr::doNatAclRuleTask(Consumer &consumer) else { SWSS_LOG_INFO("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } diff --git a/cfgmgr/natmgrd.cpp b/cfgmgr/natmgrd.cpp index db5a77f9a6..0e3a52fadc 100644 --- a/cfgmgr/natmgrd.cpp +++ b/cfgmgr/natmgrd.cpp @@ -39,24 +39,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -mutex gDbMutex; NatMgr *natmgr = NULL; NotificationConsumer *timeoutNotificationsConsumer = NULL; diff --git a/cfgmgr/nbrmgrd.cpp b/cfgmgr/nbrmgrd.cpp index 338d8d9d0d..2d325551a2 100644 --- a/cfgmgr/nbrmgrd.cpp +++ b/cfgmgr/nbrmgrd.cpp @@ -20,26 +20,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("nbrmgrd"); diff --git a/cfgmgr/portmgr.cpp b/cfgmgr/portmgr.cpp index 38c0418a7a..19ba41dc90 100644 --- a/cfgmgr/portmgr.cpp +++ b/cfgmgr/portmgr.cpp @@ -14,8 +14,10 @@ using namespace swss; PortMgr::PortMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), + m_cfgSendToIngressPortTable(cfgDb, CFG_SEND_TO_INGRESS_PORT_TABLE_NAME), m_cfgLagMemberTable(cfgDb, CFG_LAG_MEMBER_TABLE_NAME), m_statePortTable(stateDb, STATE_PORT_TABLE_NAME), + m_appSendToIngressPortTable(appDb, APP_SEND_TO_INGRESS_PORT_TABLE_NAME), m_appPortTable(appDb, APP_PORT_TABLE_NAME) { } @@ -23,27 +25,55 @@ PortMgr::PortMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c bool PortMgr::setPortMtu(const string &alias, const string &mtu) { stringstream cmd; - string res; + string res, cmd_str; // ip link set dev mtu cmd << IP_CMD << " link set dev " << shellquote(alias) << " mtu " << shellquote(mtu); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - - // Set the port MTU in application database to update both - // the port MTU and possibly the port based router interface MTU - return writeConfigToAppDb(alias, "mtu", mtu); + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (!ret) + { + // Set the port MTU in application database to update both + // the port MTU and possibly the port based router interface MTU + return writeConfigToAppDb(alias, "mtu", mtu); + } + else if (!isPortStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notif + SWSS_LOG_WARN("Setting mtu to alias:%s netdev failed with cmd:%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; + } + else + { + throw runtime_error(cmd_str + " : " + res); + } + return true; } bool PortMgr::setPortAdminStatus(const string &alias, const bool up) { stringstream cmd; - string res; + string res, cmd_str; // ip link set dev [up|down] cmd << IP_CMD << " link set dev " << shellquote(alias) << (up ? " up" : " down"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - - return writeConfigToAppDb(alias, "admin_status", (up ? "up" : "down")); + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (!ret) + { + return writeConfigToAppDb(alias, "admin_status", (up ? "up" : "down")); + } + else if (!isPortStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification + SWSS_LOG_WARN("Setting admin_status to alias:%s netdev failed with cmd%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; + } + else + { + throw runtime_error(cmd_str + " : " + res); + } + return true; } bool PortMgr::isPortStateOk(const string &alias) @@ -65,11 +95,49 @@ bool PortMgr::isPortStateOk(const string &alias) return false; } +void PortMgr::doSendToIngressPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string alias = kfvKey(t); + string op = kfvOp(t); + auto fvs = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Add SendToIngress Port: %s", + alias.c_str()); + m_appSendToIngressPortTable.set(alias, fvs); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("Removing SendToIngress Port: %s", + alias.c_str()); + m_appSendToIngressPortTable.del(alias); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + } + it = consumer.m_toSync.erase(it); + } + +} + void PortMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); auto table = consumer.getTableName(); + if (table == CFG_SEND_TO_INGRESS_PORT_TABLE_NAME) + { + doSendToIngressPortTask(consumer); + return; + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -118,16 +186,15 @@ void PortMgr::doTask(Consumer &consumer) { admin_status = fvValue(i); } - else + else { field_values.emplace_back(i); } } - for (auto &entry : field_values) + if (field_values.size()) { - writeConfigToAppDb(alias, fvField(entry), fvValue(entry)); - SWSS_LOG_NOTICE("Configure %s %s to %s", alias.c_str(), fvField(entry).c_str(), fvValue(entry).c_str()); + writeConfigToAppDb(alias, field_values); } if (!portOk) @@ -136,6 +203,7 @@ void PortMgr::doTask(Consumer &consumer) writeConfigToAppDb(alias, "mtu", mtu); writeConfigToAppDb(alias, "admin_status", admin_status); + /* Retry setting these params after the netdev is created */ field_values.clear(); field_values.emplace_back("mtu", mtu); field_values.emplace_back("admin_status", admin_status); @@ -176,3 +244,9 @@ bool PortMgr::writeConfigToAppDb(const std::string &alias, const std::string &fi return true; } + +bool PortMgr::writeConfigToAppDb(const std::string &alias, std::vector &field_values) +{ + m_appPortTable.set(alias, field_values); + return true; +} diff --git a/cfgmgr/portmgr.h b/cfgmgr/portmgr.h index dde346bfe1..3d6f0365bf 100644 --- a/cfgmgr/portmgr.h +++ b/cfgmgr/portmgr.h @@ -22,14 +22,18 @@ class PortMgr : public Orch using Orch::doTask; private: Table m_cfgPortTable; + Table m_cfgSendToIngressPortTable; Table m_cfgLagMemberTable; Table m_statePortTable; ProducerStateTable m_appPortTable; + ProducerStateTable m_appSendToIngressPortTable; std::set m_portList; void doTask(Consumer &consumer); + void doSendToIngressPortTask(Consumer &consumer); bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); + bool writeConfigToAppDb(const std::string &alias, std::vector &field_values); bool setPortMtu(const std::string &alias, const std::string &mtu); bool setPortAdminStatus(const std::string &alias, const bool up); bool isPortStateOk(const std::string &alias); diff --git a/cfgmgr/portmgrd.cpp b/cfgmgr/portmgrd.cpp index 180bbc1d63..4d04b42d38 100644 --- a/cfgmgr/portmgrd.cpp +++ b/cfgmgr/portmgrd.cpp @@ -15,26 +15,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("portmgrd"); @@ -46,6 +26,7 @@ int main(int argc, char **argv) { vector cfg_port_tables = { CFG_PORT_TABLE_NAME, + CFG_SEND_TO_INGRESS_PORT_TABLE_NAME, }; DBConnector cfgDb("CONFIG_DB", 0); @@ -53,8 +34,6 @@ int main(int argc, char **argv) DBConnector stateDb("STATE_DB", 0); PortMgr portmgr(&cfgDb, &appDb, &stateDb, cfg_port_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list vector cfgOrchList = {&portmgr}; swss::Select s; diff --git a/cfgmgr/sflowmgr.cpp b/cfgmgr/sflowmgr.cpp index bb732e83d5..122ffc0780 100644 --- a/cfgmgr/sflowmgr.cpp +++ b/cfgmgr/sflowmgr.cpp @@ -10,27 +10,42 @@ using namespace std; using namespace swss; -map sflowSpeedRateInitMap = -{ - {SFLOW_SAMPLE_RATE_KEY_400G, SFLOW_SAMPLE_RATE_VALUE_400G}, - {SFLOW_SAMPLE_RATE_KEY_200G, SFLOW_SAMPLE_RATE_VALUE_200G}, - {SFLOW_SAMPLE_RATE_KEY_100G, SFLOW_SAMPLE_RATE_VALUE_100G}, - {SFLOW_SAMPLE_RATE_KEY_50G, SFLOW_SAMPLE_RATE_VALUE_50G}, - {SFLOW_SAMPLE_RATE_KEY_40G, SFLOW_SAMPLE_RATE_VALUE_40G}, - {SFLOW_SAMPLE_RATE_KEY_25G, SFLOW_SAMPLE_RATE_VALUE_25G}, - {SFLOW_SAMPLE_RATE_KEY_10G, SFLOW_SAMPLE_RATE_VALUE_10G}, - {SFLOW_SAMPLE_RATE_KEY_1G, SFLOW_SAMPLE_RATE_VALUE_1G} -}; - -SflowMgr::SflowMgr(DBConnector *cfgDb, DBConnector *appDb, const vector &tableNames) : - Orch(cfgDb, tableNames), - m_cfgSflowTable(cfgDb, CFG_SFLOW_TABLE_NAME), - m_cfgSflowSessionTable(cfgDb, CFG_SFLOW_SESSION_TABLE_NAME), +SflowMgr::SflowMgr(DBConnector *appDb, const std::vector& tableNames) : + Orch(tableNames), m_appSflowTable(appDb, APP_SFLOW_TABLE_NAME), m_appSflowSessionTable(appDb, APP_SFLOW_SESSION_TABLE_NAME) { m_intfAllConf = true; m_gEnable = false; + m_gDirection = "rx"; + m_intfAllDir = "rx"; +} + +void SflowMgr::readPortConfig() +{ + auto consumer_it = m_consumerMap.find(CFG_PORT_TABLE_NAME); + if (consumer_it != m_consumerMap.end()) + { + consumer_it->second->drain(); + SWSS_LOG_NOTICE("Port Configuration Read.."); + } + else + { + SWSS_LOG_ERROR("Consumer object for PORT_TABLE not found"); + } +} + +bool SflowMgr::isPortEnabled(const std::string& alias) +{ + /* Checks if the sflow is enabled on the port */ + auto it = m_sflowPortConfMap.find(alias); + if (it == m_sflowPortConfMap.end()) + { + return false; + } + bool local_admin = it->second.local_admin_cfg; + bool status = it->second.admin == "up" ? true : false; + return m_gEnable && (m_intfAllConf || (local_admin && status)); } void SflowMgr::sflowHandleService(bool enable) @@ -69,7 +84,6 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple t = it->second; - string key = kfvKey(t); string op = kfvOp(t); auto values = kfvFieldsValues(t); @@ -85,14 +99,17 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) new_port = true; port_info.local_rate_cfg = false; port_info.local_admin_cfg = false; - port_info.speed = SFLOW_ERROR_SPEED_STR; + port_info.speed = ERROR_SPEED; + port_info.oper_speed = NA_SPEED; + port_info.local_dir_cfg = false; port_info.rate = ""; port_info.admin = ""; + port_info.dir = ""; m_sflowPortConfMap[key] = port_info; } - bool speed_change = false; - string new_speed = SFLOW_ERROR_SPEED_STR; + bool rate_update = false; + string new_speed = ERROR_SPEED; for (auto i : values) { if (fvField(i) == "speed") @@ -103,16 +120,26 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) if (m_sflowPortConfMap[key].speed != new_speed) { m_sflowPortConfMap[key].speed = new_speed; - speed_change = true; + /* if oper_speed is set, no need to write to APP_DB */ + if (m_sflowPortConfMap[key].oper_speed == NA_SPEED) + { + rate_update = true; + } + } + + string def_dir = "rx"; + if (m_sflowPortConfMap[key].dir != def_dir && !m_sflowPortConfMap[key].local_dir_cfg) + { + m_sflowPortConfMap[key].dir = def_dir; } - if (m_gEnable && m_intfAllConf) + if (isPortEnabled(key)) { - // If the Local rate Conf is already present, dont't override it even though the speed is changed - if (new_port || (speed_change && !m_sflowPortConfMap[key].local_rate_cfg)) + // If the Local rate conf is already present, dont't override it even though the speed is changed + if (new_port || (rate_update && !m_sflowPortConfMap[key].local_rate_cfg)) { vector fvs; - sflowGetGlobalInfo(fvs, m_sflowPortConfMap[key].speed); + sflowGetGlobalInfo(fvs, key, m_sflowPortConfMap[key].dir); m_appSflowSessionTable.set(key, fvs); } } @@ -123,7 +150,8 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) if (sflowPortConf != m_sflowPortConfMap.end()) { bool local_cfg = m_sflowPortConfMap[key].local_rate_cfg || - m_sflowPortConfMap[key].local_admin_cfg; + m_sflowPortConfMap[key].local_admin_cfg || + m_sflowPortConfMap[key].local_dir_cfg; m_sflowPortConfMap.erase(key); if ((m_intfAllConf && m_gEnable) || local_cfg) @@ -136,14 +164,67 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) } } -void SflowMgr::sflowHandleSessionAll(bool enable) +void SflowMgr::sflowProcessOperSpeed(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + auto values = kfvFieldsValues(t); + string oper_speed = ""; + bool rate_update = false; + + for (auto i : values) + { + if (fvField(i) == "speed") + { + oper_speed = fvValue(i); + } + } + + if (m_sflowPortConfMap.find(alias) != m_sflowPortConfMap.end() && op == SET_COMMAND) + { + SWSS_LOG_DEBUG("STATE_DB update: iface: %s, oper_speed: %s, cfg_speed: %s, new_speed: %s", + alias.c_str(), m_sflowPortConfMap[alias].oper_speed.c_str(), + m_sflowPortConfMap[alias].speed.c_str(), + oper_speed.c_str()); + /* oper_speed is updated by orchagent if the vendor supports and oper status is up */ + if (m_sflowPortConfMap[alias].oper_speed != oper_speed && !oper_speed.empty()) + { + rate_update = true; + if (oper_speed == m_sflowPortConfMap[alias].speed && m_sflowPortConfMap[alias].oper_speed == NA_SPEED) + { + /* if oper_speed is equal to cfg_speed, avoid the write to APP_DB + Can happen if auto-neg is not set */ + rate_update = false; + } + m_sflowPortConfMap[alias].oper_speed = oper_speed; + } + + if (isPortEnabled(alias) && rate_update && !m_sflowPortConfMap[alias].local_rate_cfg) + { + vector fvs; + sflowGetGlobalInfo(fvs, alias, m_sflowPortConfMap[alias].dir); + m_appSflowSessionTable.set(alias, fvs); + SWSS_LOG_NOTICE("Default sampling rate for %s updated to %s", alias.c_str(), findSamplingRate(alias).c_str()); + } + } + /* Do nothing for DEL as the SflowPortConfMap will already be cleared by the DEL from CONFIG_DB */ + it = consumer.m_toSync.erase(it); + } +} + +void SflowMgr::sflowHandleSessionAll(bool enable, string direction) { for (auto it: m_sflowPortConfMap) { if (enable) { vector fvs; - if (it.second.local_rate_cfg || it.second.local_admin_cfg) + if (it.second.local_rate_cfg || it.second.local_admin_cfg || it.second.local_dir_cfg) { sflowGetPortInfo(fvs, it.second); /* Use global admin state if there is not a local one */ @@ -151,10 +232,16 @@ void SflowMgr::sflowHandleSessionAll(bool enable) FieldValueTuple fv1("admin_state", "up"); fvs.push_back(fv1); } + + /* Use global sample direction state if there is not a local one */ + if (!it.second.local_dir_cfg) { + FieldValueTuple fv2("sample_direction", direction); + fvs.push_back(fv2); + } } else { - sflowGetGlobalInfo(fvs, it.second.speed); + sflowGetGlobalInfo(fvs, it.first, direction); } m_appSflowSessionTable.set(it.first, fvs); } @@ -169,7 +256,7 @@ void SflowMgr::sflowHandleSessionLocal(bool enable) { for (auto it: m_sflowPortConfMap) { - if (it.second.local_admin_cfg || it.second.local_rate_cfg) + if (it.second.local_admin_cfg || it.second.local_rate_cfg || it.second.local_dir_cfg) { vector fvs; sflowGetPortInfo(fvs, it.second); @@ -185,22 +272,16 @@ void SflowMgr::sflowHandleSessionLocal(bool enable) } } -void SflowMgr::sflowGetGlobalInfo(vector &fvs, string speed) +void SflowMgr::sflowGetGlobalInfo(vector &fvs, const string& alias, const string& dir) { - string rate; FieldValueTuple fv1("admin_state", "up"); fvs.push_back(fv1); - if (speed != SFLOW_ERROR_SPEED_STR && sflowSpeedRateInitMap.find(speed) != sflowSpeedRateInitMap.end()) - { - rate = sflowSpeedRateInitMap[speed]; - } - else - { - rate = SFLOW_ERROR_SPEED_STR; - } - FieldValueTuple fv2("sample_rate",rate); + FieldValueTuple fv2("sample_rate", findSamplingRate(alias)); fvs.push_back(fv2); + + FieldValueTuple fv3("sample_direction",dir); + fvs.push_back(fv3); } void SflowMgr::sflowGetPortInfo(vector &fvs, SflowPortInfo &local_info) @@ -213,6 +294,12 @@ void SflowMgr::sflowGetPortInfo(vector &fvs, SflowPortInfo &loc FieldValueTuple fv2("sample_rate", local_info.rate); fvs.push_back(fv2); + + if (local_info.local_dir_cfg) + { + FieldValueTuple fv3("sample_direction", local_info.dir); + fvs.push_back(fv3); + } } void SflowMgr::sflowCheckAndFillValues(string alias, vector &values, @@ -221,6 +308,7 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va string rate; bool admin_present = false; bool rate_present = false; + bool dir_present = false; for (auto i : values) { @@ -240,6 +328,14 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va FieldValueTuple fv(fvField(i), fvValue(i)); fvs.push_back(fv); } + if (fvField(i) == "sample_direction") + { + dir_present = true; + m_sflowPortConfMap[alias].dir = fvValue(i); + m_sflowPortConfMap[alias].local_dir_cfg = true; + FieldValueTuple fv(fvField(i), fvValue(i)); + fvs.push_back(fv); + } if (fvField(i) == "NULL") { continue; @@ -254,17 +350,7 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va if (m_sflowPortConfMap[alias].rate == "" || m_sflowPortConfMap[alias].local_rate_cfg) { - string speed = m_sflowPortConfMap[alias].speed; - - if (speed != SFLOW_ERROR_SPEED_STR && sflowSpeedRateInitMap.find(speed) != sflowSpeedRateInitMap.end()) - { - rate = sflowSpeedRateInitMap[speed]; - } - else - { - rate = SFLOW_ERROR_SPEED_STR; - } - m_sflowPortConfMap[alias].rate = rate; + m_sflowPortConfMap[alias].rate = findSamplingRate(alias); } m_sflowPortConfMap[alias].local_rate_cfg = false; FieldValueTuple fv("sample_rate", m_sflowPortConfMap[alias].rate); @@ -282,6 +368,36 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va FieldValueTuple fv("admin_state", m_sflowPortConfMap[alias].admin); fvs.push_back(fv); } + + if (!dir_present) + { + if (m_sflowPortConfMap[alias].dir == "") + { + /* By default direction is set to global, if not set explicitly */ + m_sflowPortConfMap[alias].dir = m_gDirection; + } + m_sflowPortConfMap[alias].local_dir_cfg = false; + FieldValueTuple fv("sample_direction", m_sflowPortConfMap[alias].dir); + fvs.push_back(fv); + } +} + +string SflowMgr::findSamplingRate(const string& alias) +{ + /* Default sampling rate is equal to the oper_speed, if present + if oper_speed is not found, use the configured speed */ + if (m_sflowPortConfMap.find(alias) == m_sflowPortConfMap.end()) + { + SWSS_LOG_ERROR("%s not found in port configuration map", alias.c_str()); + return ERROR_SPEED; + } + string oper_speed = m_sflowPortConfMap[alias].oper_speed; + string cfg_speed = m_sflowPortConfMap[alias].speed; + if (!oper_speed.empty() && oper_speed != NA_SPEED) + { + return oper_speed; + } + return cfg_speed; } void SflowMgr::doTask(Consumer &consumer) @@ -295,6 +411,11 @@ void SflowMgr::doTask(Consumer &consumer) sflowUpdatePortInfo(consumer); return; } + else if (table == STATE_PORT_TABLE_NAME) + { + sflowProcessOperSpeed(consumer); + return; + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -309,51 +430,92 @@ void SflowMgr::doTask(Consumer &consumer) { if (table == CFG_SFLOW_TABLE_NAME) { + SWSS_LOG_DEBUG("Current Cfg admin %d dir %s ", (unsigned int)m_gEnable, m_gDirection.c_str()); + bool enable = false; + string direction = "rx"; for (auto i : values) { if (fvField(i) == "admin_state") { - bool enable = false; if (fvValue(i) == "up") { enable = true; } - if (enable == m_gEnable) - { - break; - } - m_gEnable = enable; - sflowHandleService(enable); - if (m_intfAllConf) - { - sflowHandleSessionAll(enable); - } - sflowHandleSessionLocal(enable); } + else if (fvField(i) == "sample_direction") + { + direction = fvValue(i); + } + } + + if (direction != m_gDirection) + { + m_gDirection = direction; } + + if (m_gEnable != enable) + { + m_gEnable = enable; + sflowHandleService(enable); + } + + if (m_intfAllConf) + { + sflowHandleSessionAll(m_gEnable, m_gDirection); + } + + sflowHandleSessionLocal(m_gEnable); m_appSflowTable.set(key, values); + + SWSS_LOG_DEBUG("New config admin %d dir %s ", (unsigned int)m_gEnable, m_gDirection.c_str()); } else if (table == CFG_SFLOW_SESSION_TABLE_NAME) { if (key == "all") { + SWSS_LOG_DEBUG("current config gAdmin %d dir %s intfAllEna %d intfAllDir %s", + (unsigned int)m_gEnable, m_gDirection.c_str(), + (unsigned int)m_intfAllConf, m_intfAllDir.c_str()); + + string direction = m_intfAllDir; + bool enable = m_intfAllConf; for (auto i : values) { if (fvField(i) == "admin_state") { - bool enable = false; - if (fvValue(i) == "up") { enable = true; } - if ((enable != m_intfAllConf) && (m_gEnable)) + else if (fvValue(i) == "down") { - sflowHandleSessionAll(enable); + enable = false; } - m_intfAllConf = enable; } + else if (fvField(i) == "sample_direction") + { + direction = fvValue(i); + } + } + + if (m_intfAllDir != direction) + { + m_intfAllDir = direction; + } + + if (enable != m_intfAllConf) + { + m_intfAllConf = enable; } + + if (m_gEnable) + { + sflowHandleSessionAll(m_intfAllConf, m_intfAllDir); + } + + SWSS_LOG_DEBUG("New config gAdmin %d dir %s intfAllEna %d intfAllDir %s", + (unsigned int)m_gEnable, m_gDirection.c_str(), + (unsigned int)m_intfAllConf, m_intfAllDir.c_str()); } else { @@ -380,10 +542,11 @@ void SflowMgr::doTask(Consumer &consumer) if (m_gEnable) { sflowHandleService(false); - sflowHandleSessionAll(false); + sflowHandleSessionAll(false, ""); sflowHandleSessionLocal(false); } m_gEnable = false; + m_gDirection = "rx"; m_appSflowTable.del(key); } else if (table == CFG_SFLOW_SESSION_TABLE_NAME) @@ -394,7 +557,7 @@ void SflowMgr::doTask(Consumer &consumer) { if (m_gEnable) { - sflowHandleSessionAll(true); + sflowHandleSessionAll(true, m_gDirection); } } m_intfAllConf = true; @@ -404,14 +567,16 @@ void SflowMgr::doTask(Consumer &consumer) m_appSflowSessionTable.del(key); m_sflowPortConfMap[key].local_rate_cfg = false; m_sflowPortConfMap[key].local_admin_cfg = false; + m_sflowPortConfMap[key].local_dir_cfg = false; m_sflowPortConfMap[key].rate = ""; m_sflowPortConfMap[key].admin = ""; + m_sflowPortConfMap[key].dir = ""; /* If Global configured, set global session on port after local config is deleted */ if (m_intfAllConf) { vector fvs; - sflowGetGlobalInfo(fvs, m_sflowPortConfMap[key].speed); + sflowGetGlobalInfo(fvs, key, m_intfAllDir); m_appSflowSessionTable.set(key,fvs); } } diff --git a/cfgmgr/sflowmgr.h b/cfgmgr/sflowmgr.h index eb35ec2125..5cdc231d79 100644 --- a/cfgmgr/sflowmgr.h +++ b/cfgmgr/sflowmgr.h @@ -10,33 +10,19 @@ namespace swss { -#define SFLOW_SAMPLE_RATE_KEY_400G "400000" -#define SFLOW_SAMPLE_RATE_KEY_200G "200000" -#define SFLOW_SAMPLE_RATE_KEY_100G "100000" -#define SFLOW_SAMPLE_RATE_KEY_50G "50000" -#define SFLOW_SAMPLE_RATE_KEY_40G "40000" -#define SFLOW_SAMPLE_RATE_KEY_25G "25000" -#define SFLOW_SAMPLE_RATE_KEY_10G "10000" -#define SFLOW_SAMPLE_RATE_KEY_1G "1000" - -#define SFLOW_SAMPLE_RATE_VALUE_400G "400000" -#define SFLOW_SAMPLE_RATE_VALUE_200G "200000" -#define SFLOW_SAMPLE_RATE_VALUE_100G "100000" -#define SFLOW_SAMPLE_RATE_VALUE_50G "50000" -#define SFLOW_SAMPLE_RATE_VALUE_40G "40000" -#define SFLOW_SAMPLE_RATE_VALUE_25G "25000" -#define SFLOW_SAMPLE_RATE_VALUE_10G "10000" -#define SFLOW_SAMPLE_RATE_VALUE_1G "1000" - -#define SFLOW_ERROR_SPEED_STR "error" +#define ERROR_SPEED "error" +#define NA_SPEED "N/A" struct SflowPortInfo { bool local_rate_cfg; bool local_admin_cfg; + bool local_dir_cfg; std::string speed; + std::string oper_speed; std::string rate; std::string admin; + std::string dir; }; /* Port to Local config map */ @@ -45,26 +31,30 @@ typedef std::map SflowPortConfMap; class SflowMgr : public Orch { public: - SflowMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames); + SflowMgr(DBConnector *appDb, const std::vector& tableNames); + void readPortConfig(); using Orch::doTask; private: - Table m_cfgSflowTable; - Table m_cfgSflowSessionTable; ProducerStateTable m_appSflowTable; ProducerStateTable m_appSflowSessionTable; - SflowPortConfMap m_sflowPortConfMap; + SflowPortConfMap m_sflowPortConfMap; bool m_intfAllConf; bool m_gEnable; + std::string m_intfAllDir; + std::string m_gDirection; void doTask(Consumer &consumer); void sflowHandleService(bool enable); void sflowUpdatePortInfo(Consumer &consumer); - void sflowHandleSessionAll(bool enable); + void sflowProcessOperSpeed(Consumer &consumer); + void sflowHandleSessionAll(bool enable, std::string direction); void sflowHandleSessionLocal(bool enable); void sflowCheckAndFillValues(std::string alias, std::vector &values, std::vector &fvs); void sflowGetPortInfo(std::vector &fvs, SflowPortInfo &local_info); - void sflowGetGlobalInfo(std::vector &fvs, std::string speed); + void sflowGetGlobalInfo(std::vector &fvs, const std::string& alias, const std::string& direction); + bool isPortEnabled(const std::string& alias); + std::string findSamplingRate(const std::string& speed); }; } diff --git a/cfgmgr/sflowmgrd.cpp b/cfgmgr/sflowmgrd.cpp index 7de5f15a2d..2eef82bac7 100644 --- a/cfgmgr/sflowmgrd.cpp +++ b/cfgmgr/sflowmgrd.cpp @@ -15,26 +15,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("sflowmgrd"); @@ -44,21 +24,31 @@ int main(int argc, char **argv) try { - vector cfg_sflow_tables = { - CFG_SFLOW_TABLE_NAME, - CFG_SFLOW_SESSION_TABLE_NAME, - CFG_PORT_TABLE_NAME - }; - DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); + DBConnector stateDb("STATE_DB", 0); + + TableConnector conf_port_table(&cfgDb, CFG_PORT_TABLE_NAME); + TableConnector state_port_table(&stateDb, STATE_PORT_TABLE_NAME); + TableConnector conf_sflow_table(&cfgDb, CFG_SFLOW_TABLE_NAME); + TableConnector conf_sflow_session_table(&cfgDb, CFG_SFLOW_SESSION_TABLE_NAME); + + vector sflow_tables = { + conf_port_table, + state_port_table, + conf_sflow_table, + conf_sflow_session_table + }; - SflowMgr sflowmgr(&cfgDb, &appDb, cfg_sflow_tables); + SflowMgr sflowmgr(&appDb, sflow_tables); + /* During process startup, the ordering of config_db followed by state_db notifications cannot be guaranteed + and so handle the config events manually */ + sflowmgr.readPortConfig(); - vector cfgOrchList = {&sflowmgr}; + vector orchList = {&sflowmgr}; swss::Select s; - for (Orch *o : cfgOrchList) + for (Orch *o : orchList) { s.addSelectables(o->getSelectables()); } diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index 273674fbee..36c9d134e1 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -307,6 +307,8 @@ void TeamMgr::doLagTask(Consumer &consumer) { if (addLag(alias, min_links, fallback, fast_rate) == task_need_retry) { + // If LAG creation fails, we need to clean up any potentially orphaned teamd processes + removeLag(alias); it++; continue; } @@ -627,7 +629,7 @@ task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fal SWSS_LOG_INFO("Port channel %s teamd configuration: %s", alias.c_str(), conf.str().c_str()); - string warmstart_flag = WarmStart::isWarmStart() ? " -w -o " : " -r "; + string warmstart_flag = WarmStart::isWarmStart() ? " -w -o" : " -r"; cmd << TEAMD_CMD << warmstart_flag @@ -654,9 +656,42 @@ bool TeamMgr::removeLag(const string &alias) stringstream cmd; string res; + pid_t pid; - cmd << TEAMD_CMD << " -k -t " << shellquote(alias); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + try + { + std::stringstream cmd; + cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); + EXEC_WITH_ERROR_THROW(cmd.str(), res); + } + catch (const std::exception &e) + { + SWSS_LOG_NOTICE("Failed to remove non-existent port channel %s pid...", alias.c_str()); + return false; + } + + try + { + pid = static_cast(std::stoul(res, nullptr, 10)); + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to read port channel %s pid: %s", alias.c_str(), e.what()); + return false; + } + + try + { + std::stringstream cmd; + cmd << "kill -TERM " << pid; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, e.what()); + return false; + } SWSS_LOG_NOTICE("Stop port channel %s", alias.c_str()); @@ -716,6 +751,17 @@ task_process_status TeamMgr::addLagMember(const string &lag, const string &membe { SWSS_LOG_ENTER(); + stringstream cmd; + string res; + + // If port was already deleted, ignore this operation + cmd << IP_CMD << " link show " << shellquote(member); + if (exec(cmd.str(), res) != 0) + { + SWSS_LOG_WARN("Unable to find port %s", member.c_str()); + return task_ignore; + } + // If port is already enslaved, ignore this operation // TODO: check the current master if it is the same as to be configured if (isPortEnslaved(member)) @@ -723,9 +769,9 @@ task_process_status TeamMgr::addLagMember(const string &lag, const string &membe return task_ignore; } - stringstream cmd; - string res; uint16_t keyId = generateLacpKey(lag); + cmd.str(""); + cmd.clear(); // Set admin down LAG member (required by teamd) and enslave it // ip link set dev down; diff --git a/cfgmgr/teammgrd.cpp b/cfgmgr/teammgrd.cpp index ff4151c921..a18838c959 100644 --- a/cfgmgr/teammgrd.cpp +++ b/cfgmgr/teammgrd.cpp @@ -12,16 +12,6 @@ using namespace swss; #define SELECT_TIMEOUT 1000 -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; - bool received_sigterm = false; static struct sigaction old_sigaction; diff --git a/cfgmgr/tunnelmgrd.cpp b/cfgmgr/tunnelmgrd.cpp index 0a6a84eaeb..69157ba051 100644 --- a/cfgmgr/tunnelmgrd.cpp +++ b/cfgmgr/tunnelmgrd.cpp @@ -19,26 +19,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("tunnelmgrd"); diff --git a/cfgmgr/vlanmgr.cpp b/cfgmgr/vlanmgr.cpp index 1536349cb0..ee5b7a7067 100644 --- a/cfgmgr/vlanmgr.cpp +++ b/cfgmgr/vlanmgr.cpp @@ -134,6 +134,11 @@ bool VlanMgr::addHostVlan(int vlan_id) std::string res; EXEC_WITH_ERROR_THROW(cmds, res); + res.clear(); + const std::string echo_cmd = std::string("") + + ECHO_CMD + " 0 > /proc/sys/net/ipv4/conf/" + VLAN_PREFIX + std::to_string(vlan_id) + "/arp_evict_nocarrier"; + swss::exec(echo_cmd, res); + return true; } @@ -426,13 +431,13 @@ void VlanMgr::doVlanTask(Consumer &consumer) { SWSS_LOG_ERROR("%s doesn't exist", key.c_str()); } - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -534,7 +539,7 @@ void VlanMgr::processUntaggedVlanMembers(string vlan, const string &members) fvVector.push_back(t); KeyOpFieldsValuesTuple tuple = make_tuple(member_key, SET_COMMAND, fvVector); consumer.addToSync(tuple); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, tuple)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(tuple)).c_str()); } /* * There is pending task from consumer pipe, in this case just skip it. @@ -654,7 +659,7 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) { SWSS_LOG_DEBUG("%s doesn't exist", kfvKey(t).c_str()); } - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); } else { diff --git a/cfgmgr/vlanmgrd.cpp b/cfgmgr/vlanmgrd.cpp index b69dc78122..84bc19cf08 100644 --- a/cfgmgr/vlanmgrd.cpp +++ b/cfgmgr/vlanmgrd.cpp @@ -23,26 +23,6 @@ using namespace swss; MacAddress gMacAddress; -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("vlanmgrd"); diff --git a/cfgmgr/vrfmgrd.cpp b/cfgmgr/vrfmgrd.cpp index 735e59191d..3dbc7e447e 100644 --- a/cfgmgr/vrfmgrd.cpp +++ b/cfgmgr/vrfmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("vrfmgrd"); @@ -64,7 +44,6 @@ int main(int argc, char **argv) isWarmStart = WarmStart::isWarmStart(); - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&vrfmgr}; swss::Select s; diff --git a/cfgmgr/vxlanmgrd.cpp b/cfgmgr/vxlanmgrd.cpp index d47893a614..c992233c86 100644 --- a/cfgmgr/vxlanmgrd.cpp +++ b/cfgmgr/vxlanmgrd.cpp @@ -21,25 +21,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; MacAddress gMacAddress; int main(int argc, char **argv) diff --git a/configure.ac b/configure.ac index 5efe0a67bd..231f1e1c58 100644 --- a/configure.ac +++ b/configure.ac @@ -133,6 +133,7 @@ if test "x$asan_enabled" = "xtrue"; then CFLAGS_ASAN+=" -fsanitize=address" CFLAGS_ASAN+=" -DASAN_ENABLED" CFLAGS_ASAN+=" -ggdb -fno-omit-frame-pointer -U_FORTIFY_SOURCE" + CFLAGS_ASAN+=" -Wno-maybe-uninitialized" AC_SUBST(CFLAGS_ASAN) LDFLAGS_ASAN+=" -lasan" diff --git a/debian/rules b/debian/rules index 42e82b2f30..2291b00ba7 100755 --- a/debian/rules +++ b/debian/rules @@ -33,7 +33,7 @@ ifeq ($(ENABLE_ASAN), y) endif ifeq ($(ENABLE_GCOV), y) - configure_opts += --enable-gcov CFLAGS="-g -O0" CXXFLAGS="-g -O0" + configure_opts += --enable-gcov --enable-code-coverage CFLAGS="-g -O0" CXXFLAGS="-g -O0" endif override_dh_auto_configure: @@ -43,9 +43,10 @@ override_dh_auto_install: dh_auto_install --destdir=debian/swss ifeq ($(ENABLE_GCOV), y) mkdir -p debian/swss/tmp/gcov - sh ./tests/gcov_support.sh collect swss + lcov -c --directory . --no-external --exclude "$(shell pwd)/tests/*" --exclude "$(shell pwd)/**/tests/*" --output-file coverage.info + lcov_cobertura coverage.info -o coverage.xml + find ./ -type f -regex '.*\.\(h\|cpp\|gcno\|info\)' | tar -cf debian/swss/tmp/gcov/gcov-source.tar -T - endif override_dh_strip: dh_strip --dbg-package=swss-dbg - diff --git a/doc/swss-schema.md b/doc/swss-schema.md index ec28eb6c0f..74bfd687b8 100644 --- a/doc/swss-schema.md +++ b/doc/swss-schema.md @@ -233,6 +233,7 @@ and reflects the LAG ports into the redis under: `LAG_TABLE::port` key = ROUTE_TABLE:segment ; SRV6 segment name ; field = value path = STRING ; Comma-separated list of IPV6 prefixes for a SRV6 segment + type = STRING ; SRV6 segment list type like "insert", "encaps.red"; If not provided, default type will be "encaps.red" --------------------------------------------- ### SRV6_MY_SID_TABLE diff --git a/fdbsyncd/Makefile.am b/fdbsyncd/Makefile.am index b35ee5f309..93271f4e78 100644 --- a/fdbsyncd/Makefile.am +++ b/fdbsyncd/Makefile.am @@ -15,7 +15,7 @@ fdbsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(CF fdbsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) if GCOV_ENABLED -fdbsyncd_LDADD += -lgcovpreload +fdbsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/fdbsyncd/fdbsync.cpp b/fdbsyncd/fdbsync.cpp index 0cdcc63214..3c1fae145a 100644 --- a/fdbsyncd/fdbsync.cpp +++ b/fdbsyncd/fdbsync.cpp @@ -307,13 +307,6 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) op = "replace"; port_name = info->port_name; fdb_type = info->type; - /* Check if this vlan+key is also learned by vxlan neighbor then delete learned on */ - if (m_mac.find(key) != m_mac.end()) - { - macDelVxlanEntry(key, info); - SWSS_LOG_INFO("Local learn event deleting from VXLAN table DEL_KEY %s", key.c_str()); - macDelVxlan(key); - } } else { @@ -331,11 +324,11 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) if (fdb_type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { - type = "static"; + type = "sticky static"; } const std::string cmds = std::string("") @@ -347,6 +340,17 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) SWSS_LOG_INFO("cmd:%s, res=%s, ret=%d", cmds.c_str(), res.c_str(), ret); + if (info->op_type == FDB_OPER_ADD) + { + /* Check if this vlan+key is also learned by vxlan neighbor then delete the dest entry */ + if (m_mac.find(key) != m_mac.end()) + { + macDelVxlanEntry(key, info); + SWSS_LOG_INFO("Local learn event deleting from VXLAN table DEL_KEY %s", key.c_str()); + macDelVxlan(key); + } + } + return; } @@ -380,7 +384,7 @@ void FdbSync::addLocalMac(string key, string op) if (m_fdb_mac[key].type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { @@ -428,7 +432,7 @@ void FdbSync::updateMclagRemoteMac (struct m_fdb_info *info) if (fdb_type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { @@ -507,7 +511,7 @@ void FdbSync::macRefreshStateDB(int vlan, string kmac) if (m_fdb_mac[key].type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { diff --git a/fdbsyncd/fdbsyncd.cpp b/fdbsyncd/fdbsyncd.cpp index a83b2693e1..4f9405cbfd 100644 --- a/fdbsyncd/fdbsyncd.cpp +++ b/fdbsyncd/fdbsyncd.cpp @@ -19,7 +19,6 @@ int main(int argc, char **argv) DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); RedisPipeline pipelineAppDB(&appDb); DBConnector stateDb(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector log_db(LOGLEVEL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); DBConnector config_db(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); FdbSync sync(&pipelineAppDB, &stateDb, &config_db); diff --git a/fpmsyncd/Makefile.am b/fpmsyncd/Makefile.am index 29b81d7381..74d36b36c7 100644 --- a/fpmsyncd/Makefile.am +++ b/fpmsyncd/Makefile.am @@ -15,7 +15,7 @@ fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) fpmsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -fpmsyncd_LDADD += -lgcovpreload +fpmsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/fpmsyncd/fpminterface.h b/fpmsyncd/fpminterface.h new file mode 100644 index 0000000000..7d78b81808 --- /dev/null +++ b/fpmsyncd/fpminterface.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include "fpm/fpm.h" + +namespace swss +{ + +/** + * @brief FPM zebra communication interface + */ +class FpmInterface : public Selectable +{ +public: + virtual ~FpmInterface() = default; + + /** + * @brief Send netlink message through FPM socket + * @param msg Netlink message + * @return True on success, otherwise false is returned + */ + virtual bool send(nlmsghdr* nl_hdr) = 0; +}; + +} diff --git a/fpmsyncd/fpmlink.cpp b/fpmsyncd/fpmlink.cpp index d51b3b482a..13d170a805 100644 --- a/fpmsyncd/fpmlink.cpp +++ b/fpmsyncd/fpmlink.cpp @@ -39,7 +39,7 @@ bool FpmLink::isRawProcessing(struct nlmsghdr *h) int len; short encap_type = 0; struct rtmsg *rtm; - struct rtattr *tb[RTA_MAX + 1]; + struct rtattr *tb[RTA_MAX + 1] = {0}; rtm = (struct rtmsg *)NLMSG_DATA(h); @@ -54,7 +54,6 @@ bool FpmLink::isRawProcessing(struct nlmsghdr *h) return false; } - memset(tb, 0, sizeof(tb)); netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); if (!tb[RTA_MULTIPATH]) @@ -120,7 +119,7 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : m_server_up(false), m_routesync(rsync) { - struct sockaddr_in addr; + struct sockaddr_in addr = {}; int true_val = 1; m_server_socket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -141,7 +140,6 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : throw system_error(errno, system_category()); } - memset (&addr, 0, sizeof (addr)); addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); @@ -160,11 +158,17 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : m_server_up = true; m_messageBuffer = new char[m_bufSize]; + m_sendBuffer = new char[m_bufSize]; + + m_routesync->onFpmConnected(*this); } FpmLink::~FpmLink() { + m_routesync->onFpmDisconnected(); + delete[] m_messageBuffer; + delete[] m_sendBuffer; if (m_connected) close(m_connection_socket); if (m_server_up) @@ -212,52 +216,103 @@ uint64_t FpmLink::readData() hdr = reinterpret_cast(static_cast(m_messageBuffer + start)); left = m_pos - start; if (left < FPM_MSG_HDR_LEN) + { break; + } + /* fpm_msg_len includes header size */ msg_len = fpm_msg_len(hdr); if (left < msg_len) + { break; + } if (!fpm_msg_ok(hdr, left)) + { throw system_error(make_error_code(errc::bad_message), "Malformed FPM message received"); + } + + processFpmMessage(hdr); + + start += msg_len; + } + + memmove(m_messageBuffer, m_messageBuffer + start, m_pos - start); + m_pos = m_pos - (uint32_t)start; + return 0; +} + +void FpmLink::processFpmMessage(fpm_msg_hdr_t* hdr) +{ + size_t msg_len = fpm_msg_len(hdr); + + if (hdr->msg_type != FPM_MSG_TYPE_NETLINK) + { + return; + } + nlmsghdr *nl_hdr = (nlmsghdr *)fpm_msg_data(hdr); + + /* Read all netlink messages inside FPM message */ + for (; NLMSG_OK (nl_hdr, msg_len); nl_hdr = NLMSG_NEXT(nl_hdr, msg_len)) + { + /* + * EVPN Type5 Add Routes need to be process in Raw mode as they contain + * RMAC, VLAN and L3VNI information. + * Where as all other route will be using rtnl api to extract information + * from the netlink msg. + */ + bool isRaw = isRawProcessing(nl_hdr); + + nl_msg *msg = nlmsg_convert(nl_hdr); + if (msg == NULL) + { + throw system_error(make_error_code(errc::bad_message), "Unable to convert nlmsg"); + } - if (hdr->msg_type == FPM_MSG_TYPE_NETLINK) + nlmsg_set_proto(msg, NETLINK_ROUTE); + + if (isRaw) { - bool isRaw = false; + /* EVPN Type5 Add route processing */ + processRawMsg(nl_hdr); + } + else + { + NetDispatcher::getInstance().onNetlinkMessage(msg); + } + nlmsg_free(msg); + } +} - nlmsghdr *nl_hdr = (nlmsghdr *)fpm_msg_data(hdr); +bool FpmLink::send(nlmsghdr* nl_hdr) +{ + fpm_msg_hdr_t hdr{}; - /* - * EVPN Type5 Add Routes need to be process in Raw mode as they contain - * RMAC, VLAN and L3VNI information. - * Where as all other route will be using rtnl api to extract information - * from the netlink msg. - * */ - isRaw = isRawProcessing(nl_hdr); + size_t len = fpm_msg_align(sizeof(hdr) + nl_hdr->nlmsg_len); - nl_msg *msg = nlmsg_convert(nl_hdr); - if (msg == NULL) - { - throw system_error(make_error_code(errc::bad_message), "Unable to convert nlmsg"); - } + if (len > m_bufSize) + { + SWSS_LOG_THROW("Message length %zu is greater than the send buffer size %d", len, m_bufSize); + } - nlmsg_set_proto(msg, NETLINK_ROUTE); + hdr.version = FPM_PROTO_VERSION; + hdr.msg_type = FPM_MSG_TYPE_NETLINK; + hdr.msg_len = htons(static_cast(len)); - if (isRaw) - { - /* EVPN Type5 Add route processing */ - processRawMsg(nl_hdr); - } - else - { - NetDispatcher::getInstance().onNetlinkMessage(msg); - } - nlmsg_free(msg); + memcpy(m_sendBuffer, &hdr, sizeof(hdr)); + memcpy(m_sendBuffer + sizeof(hdr), nl_hdr, nl_hdr->nlmsg_len); + + size_t sent = 0; + while (sent != len) + { + auto rc = ::send(m_connection_socket, m_sendBuffer + sent, len - sent, 0); + if (rc == -1) + { + SWSS_LOG_ERROR("Failed to send FPM message: %s", strerror(errno)); + return false; } - start += msg_len; + sent += rc; } - memmove(m_messageBuffer, m_messageBuffer + start, m_pos - start); - m_pos = m_pos - (uint32_t)start; - return 0; + return true; } diff --git a/fpmsyncd/fpmlink.h b/fpmsyncd/fpmlink.h index 6cceef34ea..c025750edf 100644 --- a/fpmsyncd/fpmlink.h +++ b/fpmsyncd/fpmlink.h @@ -11,13 +11,13 @@ #include #include -#include "selectable.h" #include "fpm/fpm.h" +#include "fpmsyncd/fpminterface.h" #include "fpmsyncd/routesync.h" namespace swss { -class FpmLink : public Selectable { +class FpmLink : public FpmInterface { public: const int MSG_BATCH_SIZE; FpmLink(RouteSync *rsync, unsigned short port = FPM_DEFAULT_PORT); @@ -39,10 +39,15 @@ class FpmLink : public Selectable { m_routesync->onMsgRaw(h); }; + void processFpmMessage(fpm_msg_hdr_t* hdr); + + bool send(nlmsghdr* nl_hdr) override; + private: RouteSync *m_routesync; unsigned int m_bufSize; char *m_messageBuffer; + char *m_sendBuffer; unsigned int m_pos; bool m_connected; diff --git a/fpmsyncd/fpmsyncd.cpp b/fpmsyncd/fpmsyncd.cpp index 8f797e178c..5e16a6a6ca 100644 --- a/fpmsyncd/fpmsyncd.cpp +++ b/fpmsyncd/fpmsyncd.cpp @@ -4,10 +4,14 @@ #include "select.h" #include "selectabletimer.h" #include "netdispatcher.h" +#include "netlink.h" +#include "notificationconsumer.h" +#include "subscriberstatetable.h" #include "warmRestartHelper.h" #include "fpmsyncd/fpmlink.h" #include "fpmsyncd/routesync.h" +#include using namespace std; using namespace swss; @@ -47,21 +51,47 @@ static bool eoiuFlagsSet(Table &bgpStateTable) int main(int argc, char **argv) { swss::Logger::linkToDbNative("fpmsyncd"); + + const auto routeResponseChannelName = std::string("APPL_DB_") + APP_ROUTE_TABLE_NAME + "_RESPONSE_CHANNEL"; + DBConnector db("APPL_DB", 0); + DBConnector cfgDb("CONFIG_DB", 0); + SubscriberStateTable deviceMetadataTableSubscriber(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + Table deviceMetadataTable(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + DBConnector applStateDb("APPL_STATE_DB", 0); + std::unique_ptr routeResponseChannel; + RedisPipeline pipeline(&db); RouteSync sync(&pipeline); DBConnector stateDb("STATE_DB", 0); Table bgpStateTable(&stateDb, STATE_BGP_TABLE_NAME); + NetLink netlink; + + netlink.registerGroup(RTNLGRP_LINK); + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWROUTE, &sync); NetDispatcher::getInstance().registerMessageHandler(RTM_DELROUTE, &sync); + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWLINK, &sync); + NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); + + rtnl_route_read_protocol_names(DefaultRtProtoPath); + + std::string suppressionEnabledStr; + deviceMetadataTable.hget("localhost", "suppress-fib-pending", suppressionEnabledStr); + if (suppressionEnabledStr == "enabled") + { + routeResponseChannel = std::make_unique(&applStateDb, routeResponseChannelName); + sync.setSuppressionEnabled(true); + } while (true) { try { FpmLink fpm(&sync); + Select s; SelectableTimer warmStartTimer(timespec{0, 0}); // Before eoiu flags detected, check them periodically. It also stop upon detection of reconciliation done. @@ -80,6 +110,13 @@ int main(int argc, char **argv) cout << "Connected!" << endl; s.addSelectable(&fpm); + s.addSelectable(&netlink); + s.addSelectable(&deviceMetadataTableSubscriber); + + if (sync.isSuppressionEnabled()) + { + s.addSelectable(routeResponseChannel.get()); + } /* If warm-restart feature is enabled, execute 'restoration' logic */ bool warmStartEnabled = sync.m_warmStartHelper.checkAndStart(); @@ -139,11 +176,8 @@ int main(int argc, char **argv) SWSS_LOG_NOTICE("Warm-Restart EOIU hold timer expired."); } - if (sync.m_warmStartHelper.inProgress()) - { - sync.m_warmStartHelper.reconcile(); - SWSS_LOG_NOTICE("Warm-Restart reconciliation processed."); - } + sync.onWarmStartEnd(applStateDb); + // remove the one-shot timer. s.removeSelectable(temps); pipeline.flush(); @@ -182,6 +216,74 @@ int main(int argc, char **argv) s.removeSelectable(&eoiuCheckTimer); } } + else if (temps == &deviceMetadataTableSubscriber) + { + std::deque keyOpFvsQueue; + deviceMetadataTableSubscriber.pops(keyOpFvsQueue); + + for (const auto& keyOpFvs: keyOpFvsQueue) + { + const auto& key = kfvKey(keyOpFvs); + const auto& op = kfvOp(keyOpFvs); + const auto& fvs = kfvFieldsValues(keyOpFvs); + + if (op != SET_COMMAND) + { + continue; + } + + if (key != "localhost") + { + continue; + } + + for (const auto& fv: fvs) + { + const auto& field = fvField(fv); + const auto& value = fvValue(fv); + + if (field != "suppress-fib-pending") + { + continue; + } + + bool shouldEnable = (value == "enabled"); + + if (shouldEnable && !sync.isSuppressionEnabled()) + { + routeResponseChannel = std::make_unique(&applStateDb, routeResponseChannelName); + sync.setSuppressionEnabled(true); + s.addSelectable(routeResponseChannel.get()); + } + else if (!shouldEnable && sync.isSuppressionEnabled()) + { + /* When disabling suppression we mark all existing routes offloaded in zebra + * as there could be some transient routes which are pending response from + * orchagent, thus such updates might be missing. Since we are disabling suppression + * we no longer care about real HW offload status and can mark all routes as offloaded + * to avoid routes stuck in suppressed state after transition. */ + sync.markRoutesOffloaded(db); + + sync.setSuppressionEnabled(false); + s.removeSelectable(routeResponseChannel.get()); + routeResponseChannel.reset(); + } + } // end for fvs + } // end for keyOpFvsQueue + } + else if (routeResponseChannel && (temps == routeResponseChannel.get())) + { + std::deque notifications; + routeResponseChannel->pops(notifications); + + for (const auto& notification: notifications) + { + const auto& key = kfvKey(notification); + const auto& fieldValues = kfvFieldsValues(notification); + + sync.onRouteResponse(key, fieldValues); + } + } else if (!warmStartEnabled || sync.m_warmStartHelper.isReconciled()) { pipeline.flush(); diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index ab5868cdcf..0f6ee41188 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -10,6 +10,7 @@ #include "fpmsyncd/fpmlink.h" #include "fpmsyncd/routesync.h" #include "macaddress.h" +#include "converter.h" #include #include @@ -44,6 +45,36 @@ using namespace swss; #define ETHER_ADDR_STRLEN (3*ETH_ALEN) +/* Returns name of the protocol passed number represents */ +static string getProtocolString(int proto) +{ + static constexpr size_t protocolNameBufferSize = 128; + char buffer[protocolNameBufferSize] = {}; + + if (!rtnl_route_proto2str(proto, buffer, sizeof(buffer))) + { + return std::to_string(proto); + } + + return buffer; +} + +/* Helper to create unique pointer with custom destructor */ +template +static decltype(auto) makeUniqueWithDestructor(T* ptr, F func) +{ + return std::unique_ptr(ptr, func); +} + +template +static decltype(auto) makeNlAddr(const T& ip) +{ + nl_addr* addr; + nl_addr_parse(ip.to_string().c_str(), AF_UNSPEC, &addr); + return makeUniqueWithDestructor(addr, nl_addr_put); +} + + RouteSync::RouteSync(RedisPipeline *pipeline) : m_routeTable(pipeline, APP_ROUTE_TABLE_NAME, true), m_label_routeTable(pipeline, APP_LABEL_ROUTE_TABLE_NAME, true), @@ -347,7 +378,7 @@ bool RouteSync::getEvpnNextHop(struct nlmsghdr *h, int received_bytes, void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) { struct rtmsg *rtm; - struct rtattr *tb[RTA_MAX + 1]; + struct rtattr *tb[RTA_MAX + 1] = {0}; void *dest = NULL; char anyaddr[16] = {0}; char dstaddr[16] = {0}; @@ -360,7 +391,6 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) rtm = (struct rtmsg *)NLMSG_DATA(h); /* Parse attributes and extract fields of interest. */ - memset(tb, 0, sizeof(tb)); netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); if (tb[RTA_DST]) @@ -435,6 +465,7 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) inet_ntop(rtm->rtm_family, dstaddr, buf, MAX_ADDR_SIZE), dst_len); } + auto proto_str = getProtocolString(rtm->rtm_protocol); SWSS_LOG_INFO("Receive route message dest ip prefix: %s Op:%s", destipprefix, nlmsg_type == RTM_NEWROUTE ? "add":"del"); @@ -470,6 +501,8 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) return; } + sendOffloadReply(h); + switch (rtm->rtm_type) { case RTN_BLACKHOLE: @@ -518,17 +551,20 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) FieldValueTuple intf("ifname", intf_list); FieldValueTuple vni("vni_label", vni_list); FieldValueTuple mac("router_mac", mac_list); + FieldValueTuple proto("protocol", proto_str); fvVector.push_back(nh); fvVector.push_back(intf); fvVector.push_back(vni); fvVector.push_back(mac); + fvVector.push_back(proto); if (!warmRestartInProgress) { m_routeTable.set(destipprefix, fvVector); - SWSS_LOG_DEBUG("RouteTable set msg: %s vtep:%s vni:%s mac:%s intf:%s", - destipprefix, nexthops.c_str(), vni_list.c_str(), mac_list.c_str(), intf_list.c_str()); + SWSS_LOG_DEBUG("RouteTable set msg: %s vtep:%s vni:%s mac:%s intf:%s protocol:%s", + destipprefix, nexthops.c_str(), vni_list.c_str(), mac_list.c_str(), intf_list.c_str(), + proto_str.c_str()); } /* @@ -570,6 +606,12 @@ void RouteSync::onMsgRaw(struct nlmsghdr *h) void RouteSync::onMsg(int nlmsg_type, struct nl_object *obj) { + if (nlmsg_type == RTM_NEWLINK || nlmsg_type == RTM_DELLINK) + { + nl_cache_refill(m_nl_sock, m_link_cache); + return; + } + struct rtnl_route *route_obj = (struct rtnl_route *)obj; /* Supports IPv4 or IPv6 address, otherwise return immediately */ @@ -686,6 +728,11 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) return; } + if (!isSuppressionEnabled()) + { + sendOffloadReply(route_obj); + } + switch (rtnl_route_get_type(route_obj)) { case RTN_BLACKHOLE: @@ -734,14 +781,45 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) { SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", destipprefix, gw_list.c_str(), intf_list.c_str()); + // If intf_list has only this interface, that means all of the next hops of this route + // have been removed and the next hop on the eth0/docker0 has become the only next hop. + // In this case since we do not want the route with next hop on eth0/docker0, we return. + // But still we need to clear the route from the APPL_DB. Otherwise the APPL_DB and data + // path will be left with stale route entry + if(alsv.size() == 1) + { + if (!warmRestartInProgress) + { + SWSS_LOG_NOTICE("RouteTable del msg for route with only one nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + m_routeTable.del(destipprefix); + } + else + { + SWSS_LOG_NOTICE("Warm-Restart mode: Receiving delete msg for route with only nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + vector fvVector; + const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, + DEL_COMMAND, + fvVector); + m_warmStartHelper.insertRefreshMap(kfv); + } + } return; } } + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); + vector fvVector; + FieldValueTuple proto("protocol", proto_str); FieldValueTuple gw("nexthop", gw_list); FieldValueTuple intf("ifname", intf_list); + fvVector.push_back(proto); fvVector.push_back(gw); fvVector.push_back(intf); if (!mpls_list.empty()) @@ -805,6 +883,8 @@ void RouteSync::onLabelRouteMsg(int nlmsg_type, struct nl_object *obj) return; } + sendOffloadReply(route_obj); + /* Get the index of the master device */ uint32_t master_index = rtnl_route_get_table(route_obj); /* if the table_id is not set in the route obj then route is for default vrf. */ @@ -910,6 +990,8 @@ void RouteSync::onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vne return; } + sendOffloadReply(route_obj); + switch (rtnl_route_get_type(route_obj)) { case RTN_UNICAST: @@ -1010,6 +1092,18 @@ bool RouteSync::getIfName(int if_index, char *if_name, size_t name_len) return true; } +rtnl_link* RouteSync::getLinkByName(const char *name) +{ + auto link = rtnl_link_get_by_name(m_link_cache, name); + if (link == nullptr) + { + /* Trying to refill cache */ + nl_cache_refill(m_nl_sock ,m_link_cache); + link = rtnl_link_get_by_name(m_link_cache, name); + } + return link; +} + /* * getNextHopList() - parses next hop list attached to route_obj * @arg route_obj (input) Netlink route object @@ -1223,3 +1317,198 @@ string RouteSync::getNextHopWt(struct rtnl_route *route_obj) return result; } + +bool RouteSync::sendOffloadReply(struct nlmsghdr* hdr) +{ + SWSS_LOG_ENTER(); + + if (hdr->nlmsg_type != RTM_NEWROUTE) + { + return false; + } + + // Add request flag (required by zebra) + hdr->nlmsg_flags |= NLM_F_REQUEST; + + rtmsg *rtm = static_cast(NLMSG_DATA(hdr)); + + // Add offload flag + rtm->rtm_flags |= RTM_F_OFFLOAD; + + if (!m_fpmInterface) + { + SWSS_LOG_ERROR("Cannot send offload reply to zebra: FPM is disconnected"); + return false; + } + + // Send to zebra + if (!m_fpmInterface->send(hdr)) + { + SWSS_LOG_ERROR("Failed to send reply to zebra"); + return false; + } + + return true; +} + +bool RouteSync::sendOffloadReply(struct rtnl_route* route_obj) +{ + SWSS_LOG_ENTER(); + + nl_msg* msg{}; + rtnl_route_build_add_request(route_obj, NLM_F_CREATE, &msg); + + auto nlMsg = makeUniqueWithDestructor(msg, nlmsg_free); + + return sendOffloadReply(nlmsg_hdr(nlMsg.get())); +} + +void RouteSync::setSuppressionEnabled(bool enabled) +{ + SWSS_LOG_ENTER(); + + m_isSuppressionEnabled = enabled; + + SWSS_LOG_NOTICE("Pending routes suppression is %s", (m_isSuppressionEnabled ? "enabled": "disabled")); +} + +void RouteSync::onRouteResponse(const std::string& key, const std::vector& fieldValues) +{ + IpPrefix prefix; + std::string vrfName; + std::string protocol; + + bool isSetOperation{false}; + bool isSuccessReply{false}; + + if (!isSuppressionEnabled()) + { + return; + } + + auto colon = key.find(':'); + if (colon != std::string::npos && key.substr(0, colon).find(VRF_PREFIX) != std::string::npos) + { + vrfName = key.substr(0, colon); + prefix = IpPrefix{key.substr(colon + 1)}; + } + else + { + prefix = IpPrefix{key}; + } + + for (const auto& fieldValue: fieldValues) + { + std::string field = fvField(fieldValue); + std::string value = fvValue(fieldValue); + + if (field == "err_str") + { + isSuccessReply = (value == "SWSS_RC_SUCCESS"); + } + else if (field == "protocol") + { + // If field "protocol" is present in the field values then + // it is a SET operation. This field is absent only if we are + // processing DEL operation. + isSetOperation = true; + protocol = value; + } + } + + if (!isSetOperation) + { + SWSS_LOG_DEBUG("Received response for prefix %s(%s) deletion, ignoring ", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + if (!isSuccessReply) + { + SWSS_LOG_INFO("Received failure response for prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + auto routeObject = makeUniqueWithDestructor(rtnl_route_alloc(), rtnl_route_put); + auto dstAddr = makeNlAddr(prefix); + + rtnl_route_set_dst(routeObject.get(), dstAddr.get()); + + auto proto = rtnl_route_str2proto(protocol.c_str()); + if (proto < 0) + { + proto = swss::to_uint(protocol); + } + + rtnl_route_set_protocol(routeObject.get(), static_cast(proto)); + rtnl_route_set_family(routeObject.get(), prefix.isV4() ? AF_INET : AF_INET6); + + unsigned int vrfIfIndex = 0; + if (!vrfName.empty()) + { + auto* link = getLinkByName(vrfName.c_str()); + if (!link) + { + SWSS_LOG_DEBUG("Failed to find VRF when constructing response message for prefix %s(%s). " + "This message is probably outdated", prefix.to_string().c_str(), + vrfName.c_str()); + return; + } + vrfIfIndex = rtnl_link_get_ifindex(link); + } + + rtnl_route_set_table(routeObject.get(), vrfIfIndex); + + if (!sendOffloadReply(routeObject.get())) + { + SWSS_LOG_ERROR("Failed to send RTM_NEWROUTE message to zebra on prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + SWSS_LOG_INFO("Sent response to zebra for prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); +} + +void RouteSync::sendOffloadReply(DBConnector& db, const std::string& tableName) +{ + SWSS_LOG_ENTER(); + + Table routeTable{&db, tableName}; + + std::vector keys; + routeTable.getKeys(keys); + + for (const auto& key: keys) + { + std::vector fieldValues; + routeTable.get(key, fieldValues); + fieldValues.emplace_back("err_str", "SWSS_RC_SUCCESS"); + + onRouteResponse(key, fieldValues); + } +} + +void RouteSync::markRoutesOffloaded(swss::DBConnector& db) +{ + SWSS_LOG_ENTER(); + + sendOffloadReply(db, APP_ROUTE_TABLE_NAME); +} + +void RouteSync::onWarmStartEnd(DBConnector& applStateDb) +{ + SWSS_LOG_ENTER(); + + if (isSuppressionEnabled()) + { + markRoutesOffloaded(applStateDb); + } + + if (m_warmStartHelper.inProgress()) + { + m_warmStartHelper.reconcile(); + SWSS_LOG_NOTICE("Warm-Restart reconciliation processed."); + } +} diff --git a/fpmsyncd/routesync.h b/fpmsyncd/routesync.h index 2e53bb8d17..eb07eb8f15 100644 --- a/fpmsyncd/routesync.h +++ b/fpmsyncd/routesync.h @@ -4,10 +4,20 @@ #include "dbconnector.h" #include "producerstatetable.h" #include "netmsg.h" +#include "linkcache.h" +#include "fpminterface.h" #include "warmRestartHelper.h" #include #include +#include + +// Add RTM_F_OFFLOAD define if it is not there. +// Debian buster does not provide one but it is neccessary for compilation. +#ifndef RTM_F_OFFLOAD +#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */ +#endif + using namespace std; /* Parse the Raw netlink msg */ @@ -16,6 +26,9 @@ extern void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta namespace swss { +/* Path to protocol name database provided by iproute2 */ +constexpr auto DefaultRtProtoPath = "/etc/iproute2/rt_protos"; + class RouteSync : public NetMsg { public: @@ -26,6 +39,31 @@ class RouteSync : public NetMsg virtual void onMsg(int nlmsg_type, struct nl_object *obj); virtual void onMsgRaw(struct nlmsghdr *obj); + + void setSuppressionEnabled(bool enabled); + + bool isSuppressionEnabled() const + { + return m_isSuppressionEnabled; + } + + void onRouteResponse(const std::string& key, const std::vector& fieldValues); + + void onWarmStartEnd(swss::DBConnector& applStateDb); + + /* Mark all routes from DB with offloaded flag */ + void markRoutesOffloaded(swss::DBConnector& db); + + void onFpmConnected(FpmInterface& fpm) + { + m_fpmInterface = &fpm; + } + + void onFpmDisconnected() + { + m_fpmInterface = nullptr; + } + WarmStartHelper m_warmStartHelper; private: @@ -40,6 +78,9 @@ class RouteSync : public NetMsg struct nl_cache *m_link_cache; struct nl_sock *m_nl_sock; + bool m_isSuppressionEnabled{false}; + FpmInterface* m_fpmInterface {nullptr}; + /* Handle regular route (include VRF route) */ void onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf); @@ -63,6 +104,9 @@ class RouteSync : public NetMsg /* Get interface name based on interface index */ bool getIfName(int if_index, char *if_name, size_t name_len); + /* Get interface if_index based on interface name */ + rtnl_link* getLinkByName(const char *name); + void getEvpnNextHopSep(string& nexthops, string& vni_list, string& mac_list, string& intf_list); @@ -71,7 +115,7 @@ class RouteSync : public NetMsg string& mac_list, string& intf_list, string rmac, string vlan_id); - bool getEvpnNextHop(struct nlmsghdr *h, int received_bytes, struct rtattr *tb[], + virtual bool getEvpnNextHop(struct nlmsghdr *h, int received_bytes, struct rtattr *tb[], string& nexthops, string& vni_list, string& mac_list, string& intf_list); @@ -87,6 +131,15 @@ class RouteSync : public NetMsg /* Get next hop weights*/ string getNextHopWt(struct rtnl_route *route_obj); + + /* Sends FPM message with RTM_F_OFFLOAD flag set to zebra */ + bool sendOffloadReply(struct nlmsghdr* hdr); + + /* Sends FPM message with RTM_F_OFFLOAD flag set to zebra */ + bool sendOffloadReply(struct rtnl_route* route_obj); + + /* Sends FPM message with RTM_F_OFFLOAD flag set for all routes in the table */ + void sendOffloadReply(swss::DBConnector& db, const std::string& table); }; } diff --git a/gcovpreload/Makefile b/gcovpreload/Makefile index c4328c72b9..5039fe5056 100644 --- a/gcovpreload/Makefile +++ b/gcovpreload/Makefile @@ -6,7 +6,7 @@ CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || ec DYLIBSUFFIX=so DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) -DYLIB_MAKE_CMD=$(CC) -shared -fpic gcovpreload.c -o ${DYLIBNAME} +DYLIB_MAKE_CMD=$(CC) -shared -fpic gcovpreload.cpp -o ${DYLIBNAME} all: $(DYLIB_MAKE_CMD) diff --git a/gcovpreload/gcovpreload.c b/gcovpreload/gcovpreload.cpp similarity index 83% rename from gcovpreload/gcovpreload.c rename to gcovpreload/gcovpreload.cpp index 2141e9ef39..a545c217ce 100644 --- a/gcovpreload/gcovpreload.c +++ b/gcovpreload/gcovpreload.cpp @@ -2,15 +2,15 @@ #include #include #include -#define SIMPLE_WAY + +extern "C" void __gcov_dump(); void sighandler(int signo) { #ifdef SIMPLE_WAY exit(signo); #else - extern void __gcov_flush(); - __gcov_flush(); /* flush out gcov stats data */ + __gcov_dump(); raise(signo); /* raise the signal again to crash process */ #endif } @@ -33,9 +33,9 @@ void ctor() struct sigaction sa; sa.sa_handler = sighandler; sigemptyset(&sa.sa_mask); - sa.sa_flags = SA_RESETHAND; + sa.sa_flags = (int)SA_RESETHAND; - for(i = 0; i < sizeof(sigs)/sizeof(sigs[0]); ++i) { + for(i = 0; i < (int)(sizeof(sigs)/sizeof(sigs[0])); ++i) { if (sigaction(sigs[i], &sa, NULL) == -1) { perror("Could not set signal handler"); } diff --git a/gearsyncd/gearboxparser.cpp b/gearsyncd/gearboxparser.cpp index dfd68be2ec..879624fd25 100644 --- a/gearsyncd/gearboxparser.cpp +++ b/gearsyncd/gearboxparser.cpp @@ -15,6 +15,7 @@ */ #include "gearboxparser.h" +#include "gearboxutils.h" #include "phyparser.h" #include @@ -42,7 +43,7 @@ bool GearboxParser::parse() return false; } - json phys, phy, interfaces, interface, val, lanes; + json phys, phy, interfaces, interface, val, lanes, txFir; std::vector attrs; @@ -285,6 +286,27 @@ bool GearboxParser::parse() SWSS_LOG_ERROR("missing 'line_lanes' field in 'interfaces' item %d in gearbox configuration", iter); return false; } + + for (std::string txFirKey: swss::tx_fir_strings) + { + if (interface.find(txFirKey) != interface.end()) + { + txFir = interface[txFirKey]; // vec + std::string txFirValuesStr(""); + for (uint32_t iter2 = 0; iter2 < txFir.size(); iter2++) + { + val = txFir[iter2]; + if (txFirValuesStr.length() > 0) + { + txFirValuesStr += ","; + } + txFirValuesStr += std::to_string(val.get()); + } + attr = std::make_pair(txFirKey, txFirValuesStr); + attrs.push_back(attr); + } + } + std::string key; key = "interface:" + std::to_string(index); if (getWriteToDb() == true) diff --git a/gearsyncd/gearparserbase.h b/gearsyncd/gearparserbase.h index d9db7556d9..51807c308a 100644 --- a/gearsyncd/gearparserbase.h +++ b/gearsyncd/gearparserbase.h @@ -25,7 +25,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wshadow" -#include "swss/json.hpp" +#include #pragma GCC diagnostic pop using json = nlohmann::json; diff --git a/lgtm.yml b/lgtm.yml index 981fcd589b..b3da3bba3f 100644 --- a/lgtm.yml +++ b/lgtm.yml @@ -16,6 +16,7 @@ extraction: - libnl-genl-3-dev - libnl-route-3-dev - libnl-nf-3-dev + - libyang-dev - libzmq3-dev - libzmq5 - swig3.0 diff --git a/lib/gearboxutils.cpp b/lib/gearboxutils.cpp index f9b3228621..bc35ed3456 100644 --- a/lib/gearboxutils.cpp +++ b/lib/gearboxutils.cpp @@ -266,6 +266,11 @@ std::map GearboxUtils::loadInterfaceMap(Table *gearbox } } } + else if (tx_fir_strings.find(val.first) != tx_fir_strings.end()) + { + SWSS_LOG_DEBUG("Parsed key:%s, val:%s", val.first.c_str(), val.second.c_str()); + interface.tx_firs[val.first] = val.second; + } } gearboxInterfaceMap[interface.index] = interface; } diff --git a/lib/gearboxutils.h b/lib/gearboxutils.h index 28ab48761e..a239aa3a10 100644 --- a/lib/gearboxutils.h +++ b/lib/gearboxutils.h @@ -30,6 +30,24 @@ namespace swss { +static const std::set tx_fir_strings = +{ + "system_tx_fir_pre1", + "system_tx_fir_pre2", + "system_tx_fir_pre3", + "system_tx_fir_post1", + "system_tx_fir_post2", + "system_tx_fir_post3", + "system_tx_fir_main", + "line_tx_fir_pre1", + "line_tx_fir_pre2", + "line_tx_fir_pre3", + "line_tx_fir_post1", + "line_tx_fir_post2", + "line_tx_fir_post3", + "line_tx_fir_main" +}; + typedef struct { int phy_id; @@ -54,6 +72,7 @@ typedef struct int phy_id; std::set line_lanes; std::set system_lanes; + std::map tx_firs; } gearbox_interface_t; typedef struct diff --git a/lib/recorder.cpp b/lib/recorder.cpp new file mode 100644 index 0000000000..449039adff --- /dev/null +++ b/lib/recorder.cpp @@ -0,0 +1,121 @@ +#include "recorder.h" +#include "timestamp.h" +#include "logger.h" +#include + +using namespace swss; + +const std::string Recorder::DEFAULT_DIR = "."; +const std::string Recorder::REC_START = "|recording started"; +const std::string Recorder::SWSS_FNAME = "swss.rec"; +const std::string Recorder::SAIREDIS_FNAME = "sairedis.rec"; +const std::string Recorder::RESPPUB_FNAME = "responsepublisher.rec"; + + +Recorder& Recorder::Instance() +{ + static Recorder m_recorder; + return m_recorder; +} + + +SwSSRec::SwSSRec() +{ + /* Set Default values */ + setRecord(true); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::SWSS_FNAME); + setName("SwSS"); +} + + +ResPubRec::ResPubRec() +{ + /* Set Default values */ + setRecord(false); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::RESPPUB_FNAME); + setName("Response Publisher"); +} + + +SaiRedisRec::SaiRedisRec() +{ + /* Set Default values */ + setRecord(true); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::SAIREDIS_FNAME); + setName("SaiRedis"); +} + + +void RecWriter::startRec(bool exit_if_failure) +{ + if (!isRecord()) + { + return ; + } + + fname = getLoc() + "/" + getFile(); + record_ofs.open(fname, std::ofstream::out | std::ofstream::app); + if (!record_ofs.is_open()) + { + SWSS_LOG_ERROR("%s Recorder: Failed to open recording file %s: error %s", getName().c_str(), fname.c_str(), strerror(errno)); + if (exit_if_failure) + { + exit(EXIT_FAILURE); + } + else + { + setRecord(false); + } + } + record_ofs << swss::getTimestamp() << Recorder::REC_START << std::endl; + SWSS_LOG_NOTICE("%s Recorder: Recording started at %s", getName().c_str(), fname.c_str()); +} + + +RecWriter::~RecWriter() +{ + if (record_ofs.is_open()) + { + record_ofs.close(); + } +} + + +void RecWriter::record(const std::string& val) +{ + if (!isRecord()) + { + return ; + } + record_ofs << swss::getTimestamp() << "|" << val << std::endl; + if (isRotate()) + { + setRotate(false); + logfileReopen(); + } +} + + +void RecWriter::logfileReopen() +{ + /* + * On log rotate we will use the same file name, we are assuming that + * logrotate daemon move filename to filename.1 and we will create new + * empty file here. + */ + record_ofs.close(); + record_ofs.open(fname, std::ofstream::out | std::ofstream::app); + + if (!record_ofs.is_open()) + { + SWSS_LOG_ERROR("%s Recorder: Failed to open file %s: %s", getName().c_str(), fname.c_str(), strerror(errno)); + return; + } + SWSS_LOG_INFO("%s Recorder: LogRotate request handled", getName().c_str()); +} diff --git a/lib/recorder.h b/lib/recorder.h new file mode 100644 index 0000000000..971c3a2bb7 --- /dev/null +++ b/lib/recorder.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace swss { + +class RecBase { +public: + RecBase() = default; + /* Setters */ + void setRecord(bool record) { m_recording = record; } + void setRotate(bool rotate) { m_rotate = rotate; } + void setLocation(const std::string& loc) { m_location = loc; } + void setFileName(const std::string& name) { m_filename = name; } + void setName(const std::string& name) { m_name = name; } + + /* getters */ + bool isRecord() { return m_recording; } + bool isRotate() { return m_rotate; } + std::string getLoc() { return m_location; } + std::string getFile() { return m_filename; } + std::string getName() { return m_name; } + +private: + bool m_recording; + bool m_rotate; + std::string m_location; + std::string m_filename; + std::string m_name; +}; + +class RecWriter : public RecBase { +public: + RecWriter() = default; + virtual ~RecWriter(); + void startRec(bool exit_if_failure); + void record(const std::string& val); + +protected: + void logfileReopen(); + +private: + std::ofstream record_ofs; + std::string fname; +}; + +class SwSSRec : public RecWriter { +public: + SwSSRec(); +}; + +/* Record Handler for Response Publisher Class */ +class ResPubRec : public RecWriter { +public: + ResPubRec(); +}; + +class SaiRedisRec : public RecBase { +public: + SaiRedisRec(); +}; + +/* Interface to access recorder classes */ +class Recorder { +public: + static Recorder& Instance(); + static const std::string DEFAULT_DIR; + static const std::string REC_START; + static const std::string SWSS_FNAME; + static const std::string SAIREDIS_FNAME; + static const std::string RESPPUB_FNAME; + + Recorder() = default; + /* Individual Handlers */ + SwSSRec swss; + SaiRedisRec sairedis; + ResPubRec respub; +}; + +} diff --git a/mclagsyncd/Makefile.am b/mclagsyncd/Makefile.am index d4b4b03c40..eb4fc20d0c 100644 --- a/mclagsyncd/Makefile.am +++ b/mclagsyncd/Makefile.am @@ -15,7 +15,7 @@ mclagsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) mclagsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -mclagsyncd_LDADD += -lgcovpreload +mclagsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/mclagsyncd/mclaglink.cpp b/mclagsyncd/mclaglink.cpp index b09660ee56..b8040c1646 100644 --- a/mclagsyncd/mclaglink.cpp +++ b/mclagsyncd/mclaglink.cpp @@ -191,7 +191,8 @@ void MclagLink::setPortIsolate(char *msg) { static const unordered_set supported { BRCM_PLATFORM_SUBSTRING, - BFN_PLATFORM_SUBSTRING + BFN_PLATFORM_SUBSTRING, + CTC_PLATFORM_SUBSTRING }; const char *platform = getenv("platform"); @@ -1744,7 +1745,7 @@ MclagLink::MclagLink(Select *select, int port) : m_server_up(false), m_select(select) { - struct sockaddr_in addr; + struct sockaddr_in addr = {}; int true_val = 1; m_server_socket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -1765,7 +1766,6 @@ MclagLink::MclagLink(Select *select, int port) : throw system_error(errno, system_category()); } - memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons((unsigned short int)port); addr.sin_addr.s_addr = htonl(MCLAG_DEFAULT_IP); diff --git a/mclagsyncd/mclaglink.h b/mclagsyncd/mclaglink.h index a811f8cb2e..09129fd88f 100644 --- a/mclagsyncd/mclaglink.h +++ b/mclagsyncd/mclaglink.h @@ -53,6 +53,7 @@ #define BRCM_PLATFORM_SUBSTRING "broadcom" #define BFN_PLATFORM_SUBSTRING "barefoot" +#define CTC_PLATFORM_SUBSTRING "centec" using namespace std; diff --git a/natsyncd/Makefile.am b/natsyncd/Makefile.am index cdee9d52ae..562d452c41 100644 --- a/natsyncd/Makefile.am +++ b/natsyncd/Makefile.am @@ -15,7 +15,7 @@ natsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) natsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lnl-nf-3 -lswsscommon if GCOV_ENABLED -natsyncd_LDADD += -lgcovpreload +natsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/neighsyncd/Makefile.am b/neighsyncd/Makefile.am index cb61a83bbc..1f34e9e92f 100644 --- a/neighsyncd/Makefile.am +++ b/neighsyncd/Makefile.am @@ -15,7 +15,7 @@ neighsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) neighsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -neighsyncd_LDADD += -lgcovpreload +neighsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index 8f208e73fe..46f51b9266 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -143,6 +143,12 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); } + if (!delete_key && !strncmp(macStr, "none", MAX_ADDR_SIZE)) + { + SWSS_LOG_NOTICE("Mac address is 'none' for ADD op, ignoring for %s", ipStr); + return; + } + /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) { diff --git a/neighsyncd/restore_neighbors.py b/neighsyncd/restore_neighbors.py index a02e5434fc..19be323b7e 100755 --- a/neighsyncd/restore_neighbors.py +++ b/neighsyncd/restore_neighbors.py @@ -80,21 +80,29 @@ def is_intf_oper_state_up(intf): return True return False -def is_intf_up(intf, db): - if not is_intf_oper_state_up(intf): - return False +def check_state_db(intf, db): + table_name = '' if 'Vlan' in intf: table_name = 'VLAN_MEMBER_TABLE|{}|*'.format(intf) - key = db.keys(db.STATE_DB, table_name) - if key is None: - log_info ("Vlan member is not yet created") - return False - if is_intf_up.counter == 0: - time.sleep(3*CHECK_INTERVAL) - is_intf_up.counter = 1 - log_info ("intf {} is up".format(intf)) + elif 'PortChannel' in intf: + table_name = 'LAG_MEMBER_TABLE|{}|*'.format(intf) + else: + return True + key = db.keys(db.STATE_DB, table_name) + if key is None: + log_info ("members for {} are not yet created".format(intf)) + return False + if is_intf_up.counter == 0: + time.sleep(3*CHECK_INTERVAL) + is_intf_up.counter = 1 + log_info ("intf {} is up".format(intf)) return True +def is_intf_up(intf, db): + if not is_intf_oper_state_up(intf): + return False + return check_state_db(intf, db) + # read the neigh table from AppDB to memory, format as below # build map as below, this can efficiently access intf and family groups later # { intf1 -> { { family1 -> [[ip1, mac1], [ip2, mac2] ...] } diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index d39e73d737..e7743ab44d 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -1,8 +1,10 @@ INCLUDES = -I $(top_srcdir)/lib \ -I $(top_srcdir) \ -I $(top_srcdir)/warmrestart \ + -I switch \ -I flex_counter \ -I debug_counter \ + -I port \ -I pbh \ -I nhg @@ -45,6 +47,7 @@ orchagent_SOURCES = \ main.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ orchdaemon.cpp \ orch.cpp \ notifications.cpp \ @@ -56,6 +59,8 @@ orchagent_SOURCES = \ mplsrouteorch.cpp \ neighorch.cpp \ intfsorch.cpp \ + port/port_capabilities.cpp \ + port/porthlpr.cpp \ portsorch.cpp \ fabricportsorch.cpp \ fgnhgorch.cpp \ @@ -73,6 +78,8 @@ orchagent_SOURCES = \ pbhorch.cpp \ saihelper.cpp \ saiattr.cpp \ + switch/switch_capabilities.cpp \ + switch/switch_helper.cpp \ switchorch.cpp \ pfcwdorch.cpp \ pfcactionhandler.cpp \ @@ -98,13 +105,23 @@ orchagent_SOURCES = \ bfdorch.cpp \ srv6orch.cpp \ response_publisher.cpp \ - nvgreorch.cpp + nvgreorch.cpp \ + zmqorch.cpp \ + dash/dashorch.cpp \ + dash/dashrouteorch.cpp \ + dash/dashvnetorch.cpp \ + dash/dashaclorch.cpp \ + dash/dashaclgroupmgr.cpp \ + dash/dashtagmgr.cpp \ + dash/pbutils.cpp \ + twamporch.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp flex_counter/flowcounterrouteorch.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp orchagent_SOURCES += p4orch/p4orch.cpp \ p4orch/p4orch_util.cpp \ p4orch/p4oidmapper.cpp \ + p4orch/tables_definition_manager.cpp \ p4orch/router_interface_manager.cpp \ p4orch/gre_tunnel_manager.cpp \ p4orch/neighbor_manager.cpp \ @@ -115,11 +132,12 @@ orchagent_SOURCES += p4orch/p4orch.cpp \ p4orch/acl_rule_manager.cpp \ p4orch/wcmp_manager.cpp \ p4orch/mirror_session_manager.cpp \ - p4orch/l3_admit_manager.cpp + p4orch/l3_admit_manager.cpp \ + p4orch/ext_tables_manager.cpp orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) -orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq +orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq -lprotobuf -ldashapi routeresync_SOURCES = routeresync.cpp routeresync_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) @@ -131,9 +149,9 @@ orchagent_restart_check_CPPFLAGS = $(DBGFLAGS) $(AM_CPPFLAGS) $(CFLAGS_COMMON) $ orchagent_restart_check_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lpthread if GCOV_ENABLED -orchagent_LDADD += -lgcovpreload -routeresync_LDADD += -lgcovpreload -orchagent_restart_check_LDADD += -lgcovpreload +orchagent_SOURCES += ../gcovpreload/gcovpreload.cpp +routeresync_SOURCES += ../gcovpreload/gcovpreload.cpp +orchagent_restart_check_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index ff7d911063..6906744cc2 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -28,12 +28,14 @@ extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern CrmOrch *gCrmOrch; +extern string gMySwitchType; #define MIN_VLAN_ID 1 // 0 is a reserved VLAN ID #define MAX_VLAN_ID 4095 // 4096 is a reserved VLAN ID #define STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY "is_action_list_mandatory" #define STATE_DB_ACL_ACTION_FIELD_ACTION_LIST "action_list" +#define STATE_DB_ACL_L3V4V6_SUPPORTED "supported_L3V4V6" #define COUNTERS_ACL_COUNTER_RULE_MAP "ACL_COUNTER_RULE_MAP" #define ACL_COUNTER_DEFAULT_POLLING_INTERVAL_MS 10000 // ms @@ -69,7 +71,9 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_INNER_ETHER_TYPE, SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE }, { MATCH_INNER_IP_PROTOCOL, SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL }, { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, - { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT } + { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, + { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -107,6 +111,11 @@ static acl_rule_attr_lookup_t aclDTelActionLookup = { ACTION_DTEL_REPORT_ALL_PACKETS, SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS } }; +static acl_rule_attr_lookup_t aclOtherActionLookup = +{ + { ACTION_COUNTER, SAI_ACL_ENTRY_ATTR_ACTION_COUNTER} +}; + static acl_packet_action_lookup_t aclPacketActionLookup = { { PACKET_ACTION_FORWARD, SAI_PACKET_ACTION_FORWARD }, @@ -195,6 +204,26 @@ static acl_table_action_list_lookup_t defaultAclActionList = } } }, + { + // L3V4V6 + TABLE_TYPE_L3V4V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, { // MIRROR TABLE_TYPE_MIRROR, @@ -351,8 +380,41 @@ static acl_table_match_field_lookup_t stageMandatoryMatchFields = } } } + }, + { + TABLE_TYPE_L3, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + } + } + }, + { + TABLE_TYPE_L3V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + } + } } - }; static acl_ip_type_lookup_t aclIpTypeLookup = @@ -375,6 +437,14 @@ static map aclCounterLookup = {SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT, SAI_ACL_COUNTER_ATTR_PACKETS}, }; +static map aclObjectStatusLookup = +{ + {AclObjectStatus::ACTIVE, "Active"}, + {AclObjectStatus::INACTIVE, "Inactive"}, + {AclObjectStatus::PENDING_CREATION, "Pending creation"}, + {AclObjectStatus::PENDING_REMOVAL, "Pending removal"} +}; + static sai_acl_table_attr_t AclEntryFieldToAclTableField(sai_acl_entry_attr_t attr) { if (!IS_ATTR_ID_IN_RANGE(attr, ACL_ENTRY, FIELD)) @@ -635,6 +705,7 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT auto l3Action = aclL3ActionLookup.find(action); auto mirrorAction = aclMirrorStageLookup.find(action); auto dtelAction = aclDTelActionLookup.find(action); + auto otherAction = aclOtherActionLookup.find(action); if (l3Action != aclL3ActionLookup.end()) { @@ -648,11 +719,16 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT { saiActionAttr = dtelAction->second; } + else if (otherAction != aclOtherActionLookup.end()) + { + saiActionAttr = otherAction->second; + } else { SWSS_LOG_ERROR("Unknown action %s", action.c_str()); return false; } + SWSS_LOG_INFO("Added action %s", action.c_str()); builder.withAction(AclEntryActionToAclAction(saiActionAttr)); } @@ -926,6 +1002,36 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) matchData.data.u8 = to_uint(attr_value); matchData.mask.u8 = 0xFF; } + else if (attr_name == MATCH_BTH_OPCODE) + { + auto opcode_data = tokenize(attr_value, '/'); + + if (opcode_data.size() == 2) + { + matchData.data.u8 = to_uint(opcode_data[0]); + matchData.mask.u8 = to_uint(opcode_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid BTH_OPCODE configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } + else if (attr_name == MATCH_AETH_SYNDROME) + { + auto syndrome_data = tokenize(attr_value, '/'); + + if (syndrome_data.size() == 2) + { + matchData.data.u8 = to_uint(syndrome_data[0]); + matchData.mask.u8 = to_uint(syndrome_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid AETH_SYNDROME configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } } catch (exception &e) { @@ -1056,6 +1162,11 @@ bool AclRule::createRule() status = sai_acl_api->create_acl_entry(&m_ruleOid, gSwitchId, (uint32_t)rule_attrs.size(), rule_attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("ACL rule %s already exists", m_id.c_str()); + return true; + } SWSS_LOG_ERROR("Failed to create ACL rule %s, rv:%d", m_id.c_str(), status); AclRange::remove(range_objects, range_object_list.count); @@ -1117,6 +1228,12 @@ bool AclRule::removeRule() auto status = sai_acl_api->remove_acl_entry(m_ruleOid); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("ACL rule already deleted"); + m_ruleOid = SAI_NULL_OBJECT_ID; + return true; + } SWSS_LOG_ERROR("Failed to delete ACL rule, status %s", sai_serialize_status(status).c_str()); return false; } @@ -1477,6 +1594,11 @@ const vector& AclRule::getRangeConfig() const return m_rangeConfig; } +bool AclRule::getCreateCounter() const +{ + return m_createCounter; +} + shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data) { shared_ptr aclRule; @@ -1624,6 +1746,13 @@ bool AclRule::createCounter() bool AclRule::removeRanges() { SWSS_LOG_ENTER(); + if (!m_ranges.size()) + { + //The Acl Rules which have mirror action will not have ranges created till the mirror becomes active + SWSS_LOG_INFO("No Acl Range created for ACL Rule %s in table %s", m_id.c_str(), m_pTable->getId().c_str()); + return true; + } + for (const auto& rangeConfig: m_rangeConfig) { if (!AclRange::remove(rangeConfig.rangeType, rangeConfig.min, rangeConfig.max)) @@ -1924,6 +2053,16 @@ bool AclRuleMirror::activate() setAction(it.first, attr.value.aclaction); } + // If the rule with mirror action is removed and then mirror is activated, create the counter before rule is created + if (!hasCounter()) + { + if (getCreateCounter() && !createCounter()) + { + SWSS_LOG_ERROR("createCounter failed for Rule %s session %s", m_id.c_str(), m_sessionName.c_str()); + return false; + } + } + if (!AclRule::createRule()) { return false; @@ -2058,6 +2197,30 @@ bool AclTable::addMandatoryActions() return true; } +bool AclTable::addStageMandatoryRangeFields() +{ + SWSS_LOG_ENTER(); + + string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; + auto match = SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE; + + if ((platform == BRCM_PLATFORM_SUBSTRING) && (sub_platform != BRCM_DNX_PLATFORM_SUBSTRING) && + (stage == ACL_STAGE_EGRESS)) + { + return false; + } + + type.addMatch(make_shared(set{ + {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})); + SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", + sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), + type.getName().c_str(), stage); + + return true; +} + + bool AclTable::addStageMandatoryMatchFields() { SWSS_LOG_ENTER(); @@ -2075,10 +2238,17 @@ bool AclTable::addStageMandatoryMatchFields() // Add the stage particular matching fields for (auto match : fields_for_stage[stage]) { - type.addMatch(make_shared(match)); - SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", - sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), - type.getName().c_str(), stage); + if (match != SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE) + { + type.addMatch(make_shared(match)); + SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", + sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), + type.getName().c_str(), stage); + } + else + { + addStageMandatoryRangeFields(); + } } } } @@ -2156,6 +2326,19 @@ bool AclTable::validate() return false; } + if (type.getName() == TABLE_TYPE_L3V4V6) + { + if (!m_pAclOrch->isAclL3V4V6TableSupported(stage)) + { + + SWSS_LOG_ERROR("Table %s: table type %s in stage %d not supported on this platform.", + id.c_str(), type.getName().c_str(), stage); + return false; + } + } + + + if (m_pAclOrch->isAclActionListMandatoryOnTableCreation(stage)) { if (type.getActions().empty()) @@ -2492,6 +2675,12 @@ bool AclTable::clear() for (auto& rulepair: rules) { auto& rule = *rulepair.second; + + if (rule.hasCounter()) + { + m_pAclOrch->deregisterFlexCounter(rule); + } + bool suc = rule.remove(); if (!suc) { @@ -2872,6 +3061,10 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr { SWSS_LOG_ENTER(); + // Clear ACL_TABLE and ACL_RULE status from STATE_DB + removeAllAclTableStatus(); + removeAllAclRuleStatus(); + // TODO: Query SAI to get mirror table capabilities // Right now, verified platforms that support mirroring IPv6 packets are // Broadcom and Mellanox. Virtual switch is also supported for testing @@ -2903,11 +3096,36 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr }; } + if ( platform == MRVL_PLATFORM_SUBSTRING || + platform == INVM_PLATFORM_SUBSTRING || + platform == VS_PLATFORM_SUBSTRING) + { + m_L3V4V6Capability = + { + {ACL_STAGE_INGRESS, true}, + {ACL_STAGE_EGRESS, true}, + }; + } + else + { + m_L3V4V6Capability = + { + {ACL_STAGE_INGRESS, false}, + {ACL_STAGE_EGRESS, false}, + }; + + } + + SWSS_LOG_NOTICE("%s switch capability:", platform.c_str()); SWSS_LOG_NOTICE(" TABLE_TYPE_MIRROR: %s", m_mirrorTableCapabilities[TABLE_TYPE_MIRROR] ? "yes" : "no"); SWSS_LOG_NOTICE(" TABLE_TYPE_MIRRORV6: %s", m_mirrorTableCapabilities[TABLE_TYPE_MIRRORV6] ? "yes" : "no"); + SWSS_LOG_NOTICE(" TABLE_TYPE_L3V4V6: Ingress [%s], Egress [%s]", + m_L3V4V6Capability[ACL_STAGE_INGRESS] ? "yes" : "no", + m_L3V4V6Capability[ACL_STAGE_EGRESS] ? "yes" : "no"); + // In Mellanox platform, V4 and V6 rules are stored in different tables // In Broadcom DNX platform also, V4 and V6 rules are stored in different tables @@ -2946,27 +3164,30 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr } m_switchOrch->set_switch_capability(fvVector); - sai_attribute_t attrs[2]; - attrs[0].id = SAI_SWITCH_ATTR_ACL_ENTRY_MINIMUM_PRIORITY; - attrs[1].id = SAI_SWITCH_ATTR_ACL_ENTRY_MAXIMUM_PRIORITY; - - sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 2, attrs); - if (status == SAI_STATUS_SUCCESS) + if (gMySwitchType != "dpu") { - SWSS_LOG_NOTICE("Get ACL entry priority values, min: %u, max: %u", attrs[0].value.u32, attrs[1].value.u32); - AclRule::setRulePriorities(attrs[0].value.u32, attrs[1].value.u32); - } - else - { - SWSS_LOG_ERROR("Failed to get ACL entry priority min/max values, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) + sai_attribute_t attrs[2]; + attrs[0].id = SAI_SWITCH_ATTR_ACL_ENTRY_MINIMUM_PRIORITY; + attrs[1].id = SAI_SWITCH_ATTR_ACL_ENTRY_MAXIMUM_PRIORITY; + + sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 2, attrs); + if (status == SAI_STATUS_SUCCESS) { - throw "AclOrch initialization failure"; + SWSS_LOG_NOTICE("Get ACL entry priority values, min: %u, max: %u", attrs[0].value.u32, attrs[1].value.u32); + AclRule::setRulePriorities(attrs[0].value.u32, attrs[1].value.u32); + } + else + { + SWSS_LOG_ERROR("Failed to get ACL entry priority min/max values, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); + if (handle_status != task_process_status::task_success) + { + throw "AclOrch initialization failure"; + } } - } - queryAclActionCapability(); + queryAclActionCapability(); + } for (auto stage: {ACL_STAGE_INGRESS, ACL_STAGE_EGRESS}) { @@ -3002,8 +3223,6 @@ void AclOrch::initDefaultTableTypes() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) - .withMatch(make_shared(set{ - {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})) .build() ); @@ -3021,8 +3240,31 @@ void AclOrch::initDefaultTableTypes() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) - .withMatch(make_shared(set{ - {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})) + .build() + ); + + + addAclTableType( + builder.withName(TABLE_TYPE_L3V4V6) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_OUTER_VLAN_ID)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); @@ -3057,7 +3299,9 @@ void AclOrch::initDefaultTableTypes() addAclTableType( builder.withName(TABLE_TYPE_DROP) .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); @@ -3265,10 +3509,21 @@ void AclOrch::putAclActionCapabilityInDB(acl_stage_type_t stage) } } + is_action_list_mandatory_stream << boolalpha << capabilities.isActionListMandatoryOnTableCreation; fvVector.emplace_back(STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY, is_action_list_mandatory_stream.str()); fvVector.emplace_back(STATE_DB_ACL_ACTION_FIELD_ACTION_LIST, acl_action_value_stream.str()); + + for (auto const& it : m_L3V4V6Capability) + { + string value = it.second ? "true" : "false"; + if (it.first == stage) + { + fvVector.emplace_back(STATE_DB_ACL_L3V4V6_SUPPORTED, value); + } + } + m_aclStageCapabilityTable.set(stage_str, fvVector); } @@ -3319,8 +3574,6 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, SWSS_LOG_THROW("%s is not an enum", action_name.c_str()); } - // TODO: once sai object api is available make this code compile -#ifdef SAIREDIS_SUPPORT_OBJECT_API vector values_list(meta->enummetadata->valuescount); sai_s32_list_t values; values.count = static_cast(values_list.size()); @@ -3339,7 +3592,7 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, } else { - SWSS_LOG_WARN("Failed to query enum values supported for ACL action %s - ", + SWSS_LOG_WARN("Failed to query enum values supported for ACL action %s - " "API is not implemented, assuming all values are supported for this action", action_name.c_str()); /* assume all enum values are supported */ @@ -3348,13 +3601,6 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, m_aclEnumActionCapabilities[acl_action].insert(meta->enummetadata->values[i]); } } -#else - /* assume all enum values are supported until sai object api is available */ - for (size_t i = 0; i < meta->enummetadata->valuescount; i++) - { - m_aclEnumActionCapabilities[acl_action].insert(meta->enummetadata->values[i]); - } -#endif // put supported values in DB for (const auto& it: lookupMap) @@ -3378,6 +3624,8 @@ AclOrch::AclOrch(vector& connectors, DBConnector* stateDb, Switc PortsOrch *portOrch, MirrorOrch *mirrorOrch, NeighOrch *neighOrch, RouteOrch *routeOrch, DTelOrch *dtelOrch) : Orch(connectors), m_aclStageCapabilityTable(stateDb, STATE_ACL_STAGE_CAPABILITY_TABLE_NAME), + m_aclTableStateTable(stateDb, STATE_ACL_TABLE_TABLE_NAME), + m_aclRuleStateTable(stateDb, STATE_ACL_RULE_TABLE_NAME), m_switchOrch(switchOrch), m_mirrorOrch(mirrorOrch), m_neighOrch(neighOrch), @@ -3698,7 +3946,7 @@ bool AclOrch::addAclTable(AclTable &newTable) } // Update matching field according to ACL stage newTable.addStageMandatoryMatchFields(); - + // Add mandatory ACL action if not present // We need to call addMandatoryActions here because addAclTable is directly called in other orchs. // The action_list is already added if the ACL table creation is triggered by CONFIGDD, but calling addMandatoryActions @@ -4074,6 +4322,16 @@ bool AclOrch::isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) co return it->second.isActionListMandatoryOnTableCreation; } +bool AclOrch::isAclL3V4V6TableSupported(acl_stage_type_t stage) const +{ + const auto& it = m_L3V4V6Capability.find(stage); + if (it == m_L3V4V6Capability.cend()) + { + return false; + } + return it->second; +} + bool AclOrch::isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const { const auto& it = m_aclCapabilities.find(stage); @@ -4201,6 +4459,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) { SWSS_LOG_NOTICE("Successfully updated existing ACL table %s", table_id.c_str()); + // Mark ACL table as ACTIVE + setAclTableStatus(table_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); } else @@ -4213,14 +4473,23 @@ void AclOrch::doAclTableTask(Consumer &consumer) else { if (addAclTable(newTable)) + { + // Mark ACL table as ACTIVE + setAclTableStatus(table_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); + } else + { + setAclTableStatus(table_id, AclObjectStatus::PENDING_CREATION); it++; + } } } else { it = consumer.m_toSync.erase(it); + // Mark the ACL table as inactive if the configuration is invalid + setAclTableStatus(table_id, AclObjectStatus::INACTIVE); SWSS_LOG_ERROR("Failed to create ACL table %s, invalid configuration", table_id.c_str()); } @@ -4228,9 +4497,17 @@ void AclOrch::doAclTableTask(Consumer &consumer) else if (op == DEL_COMMAND) { if (removeAclTable(table_id)) + { + // Remove ACL table status from STATE_DB + removeAclTableStatus(table_id); it = consumer.m_toSync.erase(it); + } else + { + // Set the status of ACL_TABLE to pending removal if removeAclTable returns error + setAclTableStatus(table_id, AclObjectStatus::PENDING_REMOVAL); it++; + } } else { @@ -4308,6 +4585,8 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } bool bHasTCPFlag = false; bool bHasIPProtocol = false; + bool bHasIPV4 = false; + bool bHasIPV6 = false; for (const auto& itr : kfvFieldsValues(t)) { string attr_name = to_upper(fvField(itr)); @@ -4318,6 +4597,14 @@ void AclOrch::doAclRuleTask(Consumer &consumer) { bHasTCPFlag = true; } + if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP) + { + bHasIPV4 = true; + } + if (attr_name == MATCH_SRC_IPV6 || attr_name == MATCH_DST_IPV6) + { + bHasIPV6 = true; + } if (attr_name == MATCH_IP_PROTOCOL || attr_name == MATCH_NEXT_HEADER) { bHasIPProtocol = true; @@ -4366,26 +4653,50 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } } + if (bHasIPV4 && bHasIPV6) + { + if (type == TABLE_TYPE_L3V4V6) + { + SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); + bAllAttributesOk = false; + } + } + // validate and create ACL rule if (bAllAttributesOk && newRule->validate()) { if (addAclRule(newRule, table_id)) + { + setAclRuleStatus(table_id, rule_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); + } else + { + setAclRuleStatus(table_id, rule_id, AclObjectStatus::PENDING_CREATION); it++; + } } else { it = consumer.m_toSync.erase(it); + // Mark the rule inactive if the configuration is invalid + setAclRuleStatus(table_id, rule_id, AclObjectStatus::INACTIVE); SWSS_LOG_ERROR("Failed to create ACL rule. Rule configuration is invalid"); } } else if (op == DEL_COMMAND) { if (removeAclRule(table_id, rule_id)) + { + removeAclRuleStatus(table_id, rule_id); it = consumer.m_toSync.erase(it); + } else + { + // Mark pending removal status if removeAclRule returns error + setAclRuleStatus(table_id, rule_id, AclObjectStatus::PENDING_REMOVAL); it++; + } } else { @@ -4417,10 +4728,12 @@ void AclOrch::doAclTableTypeTask(Consumer &consumer) } addAclTableType(builder.build()); + SWSS_LOG_NOTICE("Created ACL table type %s", key.c_str()); } else if (op == DEL_COMMAND) { removeAclTableType(key); + SWSS_LOG_NOTICE("Removed ACL table type %s", key.c_str()); } else { @@ -4741,3 +5054,55 @@ bool AclOrch::getAclBindPortId(Port &port, sai_object_id_t &port_id) return true; } + +// Set the status of ACL table in STATE_DB +void AclOrch::setAclTableStatus(string table_name, AclObjectStatus status) +{ + vector fvVector; + fvVector.emplace_back("status", aclObjectStatusLookup[status]); + m_aclTableStateTable.set(table_name, fvVector); +} + +// Remove the status record of given ACL table from STATE_DB +void AclOrch::removeAclTableStatus(string table_name) +{ + m_aclTableStateTable.del(table_name); +} + +// Set the status of ACL rule in STATE_DB +void AclOrch::setAclRuleStatus(string table_name, string rule_name, AclObjectStatus status) +{ + vector fvVector; + fvVector.emplace_back("status", aclObjectStatusLookup[status]); + m_aclRuleStateTable.set(table_name + string("|") + rule_name, fvVector); +} + +// Remove the status record of given ACL rule from STATE_DB +void AclOrch::removeAclRuleStatus(string table_name, string rule_name) +{ + m_aclRuleStateTable.del(table_name + string("|") + rule_name); +} + +// Remove all ACL table status from STATE_DB +void AclOrch::removeAllAclTableStatus() +{ + vector keys; + m_aclTableStateTable.getKeys(keys); + + for (auto key : keys) + { + m_aclTableStateTable.del(key); + } +} + +// Remove all ACL rule status from STATE_DB +void AclOrch::removeAllAclRuleStatus() +{ + vector keys; + m_aclRuleStateTable.getKeys(keys); + for (auto key : keys) + { + m_aclRuleStateTable.del(key); + } +} + diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index ce3e9e5d63..abeaf519e2 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -49,6 +49,8 @@ #define MATCH_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" #define MATCH_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define MATCH_BTH_OPCODE "BTH_OPCODE" +#define MATCH_AETH_SYNDROME "AETH_SYNDROME" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" @@ -65,6 +67,7 @@ #define ACTION_DTEL_TAIL_DROP_REPORT_ENABLE "TAIL_DROP_REPORT_ENABLE" #define ACTION_DTEL_FLOW_SAMPLE_PERCENT "FLOW_SAMPLE_PERCENT" #define ACTION_DTEL_REPORT_ALL_PACKETS "REPORT_ALL_PACKETS" +#define ACTION_COUNTER "COUNTER" #define PACKET_ACTION_FORWARD "FORWARD" #define PACKET_ACTION_DROP "DROP" @@ -83,20 +86,29 @@ #define IP_TYPE_IP "IP" #define IP_TYPE_NON_IP "NON_IP" #define IP_TYPE_IPv4ANY "IPV4ANY" -#define IP_TYPE_NON_IPv4 "NON_IPv4" +#define IP_TYPE_NON_IPv4 "NON_IPV4" #define IP_TYPE_IPv6ANY "IPV6ANY" -#define IP_TYPE_NON_IPv6 "NON_IPv6" +#define IP_TYPE_NON_IPv6 "NON_IPV6" #define IP_TYPE_ARP "ARP" #define IP_TYPE_ARP_REQUEST "ARP_REQUEST" #define IP_TYPE_ARP_REPLY "ARP_REPLY" #define MLNX_MAX_RANGES_COUNT 16 #define INGRESS_TABLE_DROP "IngressTableDrop" +#define EGRESS_TABLE_DROP "EgressTableDrop" #define RULE_OPER_ADD 0 #define RULE_OPER_DELETE 1 #define ACL_COUNTER_FLEX_COUNTER_GROUP "ACL_STAT_COUNTER" +enum AclObjectStatus +{ + ACTIVE = 0, + INACTIVE, + PENDING_CREATION, + PENDING_REMOVAL +}; + struct AclActionCapabilities { set actionList; @@ -263,6 +275,7 @@ class AclRule sai_object_id_t getCounterOid() const; bool hasCounter() const; vector getInPorts() const; + bool getCreateCounter() const; const vector& getRangeConfig() const; static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&); @@ -391,6 +404,9 @@ class AclTable // Add stage mandatory matching fields to ACL table bool addStageMandatoryMatchFields(); + // Add stage mandatory range fields to ACL table + bool addStageMandatoryRangeFields(); + // validate AclRule match attribute against rule and table configuration bool validateAclRuleMatch(sai_acl_entry_attr_t matchId, const AclRule& rule) const; // validate AclRule action attribute against rule and table configuration @@ -486,12 +502,14 @@ class AclOrch : public Orch, public Observer bool isAclMirrorV6Supported() const; bool isAclMirrorV4Supported() const; bool isAclMirrorTableSupported(string type) const; + bool isAclL3V4V6TableSupported(acl_stage_type_t stage) const; bool isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) const; bool isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const; bool isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_acl_action_parameter_t param) const; bool m_isCombinedMirrorV6Table = true; map m_mirrorTableCapabilities; + map m_L3V4V6Capability; void registerFlexCounter(const AclRule& rule); void deregisterFlexCounter(const AclRule& rule); @@ -546,6 +564,15 @@ class AclOrch : public Orch, public Observer string generateAclRuleIdentifierInCountersDb(const AclRule& rule) const; + void setAclTableStatus(string table_name, AclObjectStatus status); + void setAclRuleStatus(string table_name, string rule_name, AclObjectStatus status); + + void removeAclTableStatus(string table_name); + void removeAclRuleStatus(string table_name, string rule_name); + + void removeAllAclTableStatus(); + void removeAllAclRuleStatus(); + map m_AclTables; // TODO: Move all ACL tables into one map: name -> instance map m_ctrlAclTables; @@ -556,6 +583,9 @@ class AclOrch : public Orch, public Observer Table m_aclStageCapabilityTable; + Table m_aclTableStateTable; + Table m_aclRuleStateTable; + map m_mirrorTableId; map m_mirrorV6TableId; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 2d91a84b98..1b1cdeb29a 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -25,6 +25,7 @@ extern "C" { #define TABLE_TYPE_L3 "L3" #define TABLE_TYPE_L3V6 "L3V6" +#define TABLE_TYPE_L3V4V6 "L3V4V6" #define TABLE_TYPE_MIRROR "MIRROR" #define TABLE_TYPE_MIRRORV6 "MIRRORV6" #define TABLE_TYPE_MIRROR_DSCP "MIRROR_DSCP" diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index e3cab2581a..6c435cdddb 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -13,7 +13,9 @@ using namespace swss; #define BFD_SESSION_DEFAULT_TX_INTERVAL 1000 #define BFD_SESSION_DEFAULT_RX_INTERVAL 1000 -#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 3 +#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 10 +// TOS: default 6-bit DSCP value 48, default 2-bit ecn value 0. 48<<2 = 192 +#define BFD_SESSION_DEFAULT_TOS 192 #define BFD_SESSION_MILLISECOND_TO_MICROSECOND 1000 #define BFD_SRCPORTINIT 49152 #define BFD_SRCPORTMAX 65536 @@ -82,7 +84,12 @@ BfdOrch::~BfdOrch(void) void BfdOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - + BgpGlobalStateOrch* bgp_global_state_orch = gDirectory.get(); + bool tsa_enabled = false; + if (bgp_global_state_orch) + { + tsa_enabled = bgp_global_state_orch->getTsaState(); + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -94,18 +101,66 @@ void BfdOrch::doTask(Consumer &consumer) if (op == SET_COMMAND) { - if (!create_bfd_session(key, data)) + bool tsa_shutdown_enabled = false; + for (auto i : data) { - it++; - continue; + auto value = fvValue(i); + //shutdown_bfd_during_tsa parameter is used by the BFD session creator to ensure that the the + //specified session gets removed when the device goes into TSA state. + //if this parameter is not specified or set to false for a session, the + // corrosponding BFD session would be maintained even in TSA state. + if (fvField(i) == "shutdown_bfd_during_tsa" && value == "true" ) + { + tsa_shutdown_enabled = true; + break; + } + } + if (tsa_shutdown_enabled) + { + bfd_session_cache[key] = data; + if (!tsa_enabled) + { + if (!create_bfd_session(key, data)) + { + it++; + continue; + } + } + else + { + notify_session_state_down(key); + } + } + else + { + if (!create_bfd_session(key, data)) + { + it++; + continue; + } } } else if (op == DEL_COMMAND) { - if (!remove_bfd_session(key)) + if (bfd_session_cache.find(key) != bfd_session_cache.end() ) { - it++; - continue; + bfd_session_cache.erase(key); + if (!tsa_enabled) + { + if (!remove_bfd_session(key)) + { + it++; + continue; + } + } + } + else + { + if (!remove_bfd_session(key)) + { + it++; + continue; + } } } else @@ -243,6 +298,7 @@ bool BfdOrch::create_bfd_session(const string& key, const vector(value); + } + else if (fvField(i) == "shutdown_bfd_during_tsa") + { + //since we are handling shutdown_bfd_during_tsa in the caller function, we need to ignore it here. + //failure to ignore this parameter would cause error log. + continue; + } else SWSS_LOG_ERROR("Unsupported BFD attribute %s\n", fvField(i).c_str()); } @@ -306,9 +372,11 @@ bool BfdOrch::create_bfd_session(const string& key, const vector(&update)); +} + +void BfdOrch::handleTsaStateChange(bool tsaState) +{ + SWSS_LOG_ENTER(); + for (auto it : bfd_session_cache) + { + if (tsaState == true) + { + if (bfd_session_map.find(it.first) != bfd_session_map.end()) + { + notify_session_state_down(it.first); + remove_bfd_session(it.first); + } + } + else + { + if (bfd_session_map.find(it.first) == bfd_session_map.end()) + { + create_bfd_session(it.first, it.second); + } + } + } +} + +BgpGlobalStateOrch::BgpGlobalStateOrch(DBConnector *db, string tableName): + Orch(db, tableName) +{ + SWSS_LOG_ENTER(); + tsa_enabled = false; +} + +BgpGlobalStateOrch::~BgpGlobalStateOrch(void) +{ + SWSS_LOG_ENTER(); +} + +bool BgpGlobalStateOrch::getTsaState() +{ + SWSS_LOG_ENTER(); + return tsa_enabled; +} +void BgpGlobalStateOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + for (auto i : data) + { + auto value = fvValue(i); + auto type = fvField(i); + SWSS_LOG_INFO("SET on key %s, data T %s, V %s\n", key.c_str(), type.c_str(), value.c_str()); + if (type == "tsa_enabled") + { + bool state = true ? value == "true" : false; + if (tsa_enabled != state) + { + SWSS_LOG_NOTICE("BgpGlobalStateOrch TSA state Changed to %d from %d.\n", int(state), int(tsa_enabled)); + tsa_enabled = state; + + BfdOrch* bfd_orch = gDirectory.get(); + if (bfd_orch) + { + bfd_orch->handleTsaStateChange(state); + } + } + } + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("DEL on key %s is not expected.\n", key.c_str()); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + it = consumer.m_toSync.erase(it); + } +} + diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h index 4a0cb9edfb..31e0e4c930 100644 --- a/orchagent/bfdorch.h +++ b/orchagent/bfdorch.h @@ -17,6 +17,7 @@ class BfdOrch: public Orch, public Subject void doTask(swss::NotificationConsumer &consumer); BfdOrch(swss::DBConnector *db, std::string tableName, TableConnector stateDbBfdSessionTable); virtual ~BfdOrch(void); + void handleTsaStateChange(bool tsaState); private: bool create_bfd_session(const std::string& key, const std::vector& data); @@ -26,6 +27,7 @@ class BfdOrch: public Orch, public Subject uint32_t bfd_gen_id(void); uint32_t bfd_src_port(void); + void notify_session_state_down(const std::string& key); bool register_bfd_state_change_notification(void); void update_port_number(std::vector &attrs); sai_status_t retry_create_bfd_session(sai_object_id_t &bfd_session_id, vector attrs); @@ -37,6 +39,20 @@ class BfdOrch: public Orch, public Subject swss::NotificationConsumer* m_bfdStateNotificationConsumer; bool register_state_change_notif; + std::map> bfd_session_cache; + }; +class BgpGlobalStateOrch : public Orch +{ +public: + void doTask(Consumer &consumer); + BgpGlobalStateOrch(swss::DBConnector *db, std::string tableName); + virtual ~BgpGlobalStateOrch(void); + bool getTsaState(); + +private: + bool tsa_enabled; + +}; #endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 3519ba432f..c3a63c5ec3 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -19,6 +19,9 @@ extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; extern Directory gDirectory; extern sai_object_id_t gSwitchId; +extern string gMySwitchType; +extern string gMyHostName; +extern string gMyAsicName; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -30,12 +33,12 @@ static const vector bufferPoolWatermarkStatIds = }; type_map BufferOrch::m_buffer_type_maps = { - {APP_BUFFER_POOL_TABLE_NAME, new object_reference_map()}, - {APP_BUFFER_PROFILE_TABLE_NAME, new object_reference_map()}, - {APP_BUFFER_QUEUE_TABLE_NAME, new object_reference_map()}, - {APP_BUFFER_PG_TABLE_NAME, new object_reference_map()}, - {APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, new object_reference_map()}, - {APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, new object_reference_map()} + {APP_BUFFER_POOL_TABLE_NAME, make_shared()}, + {APP_BUFFER_PROFILE_TABLE_NAME, make_shared()}, + {APP_BUFFER_QUEUE_TABLE_NAME, make_shared()}, + {APP_BUFFER_PG_TABLE_NAME, make_shared()}, + {APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, make_shared()}, + {APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, make_shared()} }; map buffer_to_ref_table_map = { @@ -59,7 +62,11 @@ BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *st initTableHandlers(); initBufferReadyLists(applDb, confDb); initFlexCounterGroupTable(); - initBufferConstants(); + + if (gMySwitchType != "dpu") + { + initBufferConstants(); + } }; void BufferOrch::initTableHandlers() @@ -103,16 +110,32 @@ void BufferOrch::initBufferReadyLists(DBConnector *applDb, DBConnector *confDb) Table pg_table(applDb, APP_BUFFER_PG_TABLE_NAME); initBufferReadyList(pg_table, false); - Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); - initBufferReadyList(queue_table, false); + if(gMySwitchType == "voq") + { + Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); + initVoqBufferReadyList(queue_table, false); + } + else + { + Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); + initBufferReadyList(queue_table, false); + } } else { Table pg_table(confDb, CFG_BUFFER_PG_TABLE_NAME); initBufferReadyList(pg_table, true); - Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); - initBufferReadyList(queue_table, true); + if(gMySwitchType == "voq") + { + Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); + initVoqBufferReadyList(queue_table, true); + } + else + { + Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); + initBufferReadyList(queue_table, true); + } } } @@ -149,6 +172,38 @@ void BufferOrch::initBufferReadyList(Table& table, bool isConfigDb) } } +void BufferOrch::initVoqBufferReadyList(Table& table, bool isConfigDb) +{ + SWSS_LOG_ENTER(); + + std::vector keys; + table.getKeys(keys); + + const char dbKeyDelimiter = (isConfigDb ? config_db_key_delimiter : delimiter); + + // populate the lists with buffer configuration information + for (const auto& key: keys) + { + auto &&tokens = tokenize(key, dbKeyDelimiter); + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("Wrong format of a table '%s' key '%s'. Skip it", table.getTableName().c_str(), key.c_str()); + continue; + } + + // We need transform the key from config db format to appl db format + auto appldb_key = tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2] + delimiter + tokens[3]; + m_ready_list[appldb_key] = false; + + auto &&port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + for(const auto& port_name: port_names) + { + SWSS_LOG_INFO("Item %s has been inserted into ready list", appldb_key.c_str()); + m_port_ready_list_ref[port_name].push_back(appldb_key); + } + } +} + void BufferOrch::initBufferConstants() { sai_status_t status; @@ -323,6 +378,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); + string xoff; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) @@ -417,6 +473,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) attr.value.u64 = (uint64_t)stoul(value); attr.id = SAI_BUFFER_POOL_ATTR_XOFF_SIZE; attribs.push_back(attr); + xoff = value; } else { @@ -469,6 +526,15 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) // "FLEX_COUNTER_STATUS" m_countersDb->hset(COUNTERS_BUFFER_POOL_NAME_MAP, object_name, sai_serialize_object_id(sai_object)); } + + // Only publish the result when shared headroom pool is enabled and it has been successfully applied to SAI + if (!xoff.empty()) + { + vector fvs; + fvs.emplace_back("xoff", xoff); + SWSS_LOG_INFO("Publishing the result after applying the shared headroom pool size %s to SAI", xoff.c_str()); + m_publisher.publish(APP_BUFFER_POOL_TABLE_NAME, object_name, fvs, ReturnCode(SAI_STATUS_SUCCESS), true); + } } else if (op == DEL_COMMAND) { @@ -499,6 +565,9 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); m_countersDb->hdel(COUNTERS_BUFFER_POOL_NAME_MAP, object_name); + + vector fvs; + m_publisher.publish(APP_BUFFER_POOL_TABLE_NAME, object_name, fvs, ReturnCode(SAI_STATUS_SUCCESS), true); } else { @@ -635,6 +704,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } if (SAI_NULL_OBJECT_ID != sai_object) { + vector attribs_to_retry; SWSS_LOG_DEBUG("Modifying existing sai object:%" PRIx64, sai_object); for (auto &attribute : attribs) { @@ -646,7 +716,18 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } else if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to modify buffer profile, name:%s, sai object:%" PRIx64 ", status:%d", object_name.c_str(), sai_object, sai_status); + SWSS_LOG_NOTICE("Unable to modify buffer profile, name:%s, sai object:%" PRIx64 ", status:%d, will retry one more time", object_name.c_str(), sai_object, sai_status); + attribs_to_retry.push_back(attribute); + } + } + + for (auto &attribute : attribs) + { + sai_status = sai_buffer_api->set_buffer_profile_attribute(sai_object, &attribute); + if (SAI_STATUS_SUCCESS != sai_status) + { + // A retried attribute can not be "not implemented" + SWSS_LOG_ERROR("Failed to modify buffer profile, name:%s, sai object:%" PRIx64 ", status:%d, will retry once", object_name.c_str(), sai_object, sai_status); task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status); if (handle_status != task_process_status::task_success) { @@ -712,7 +793,8 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } /* -Input sample "BUFFER_QUEUE|Ethernet4,Ethernet45|10-15" + Input sample "BUFFER_QUEUE|Ethernet4,Ethernet45|10-15" or + "BUFFER_QUEUE|STG01-0101-0400-01T2-LC6|ASIC0|Ethernet4|10-15" */ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) { @@ -724,18 +806,47 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) vector tokens; sai_uint32_t range_low, range_high; bool need_update_sai = true; + bool local_port = false; + string local_port_name; SWSS_LOG_DEBUG("Processing:%s", key.c_str()); tokens = tokenize(key, delimiter); - if (tokens.size() != 2) + + vector port_names; + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); - return task_process_status::task_invalid_entry; + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 4 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + if (!parseIndexRange(tokens[3], range_low, range_high)) + { + return task_process_status::task_invalid_entry; + } + + if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) + { + local_port = true; + local_port_name = tokens[2]; + SWSS_LOG_INFO("System port %s is local port %d local port name %s", port_names[0].c_str(), local_port, local_port_name.c_str()); + } } - vector port_names = tokenize(tokens[0], list_item_delimiter); - if (!parseIndexRange(tokens[1], range_low, range_high)) + else { - return task_process_status::task_invalid_entry; + if (tokens.size() != 2) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0], list_item_delimiter); + if (!parseIndexRange(tokens[1], range_low, range_high)) + { + return task_process_status::task_invalid_entry; + } } if (op == SET_COMMAND) @@ -755,6 +866,21 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_failed; } + string old_buffer_profile_name; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) + && (old_buffer_profile_name == buffer_profile_name)) + { + if (m_partiallyAppliedQueues.find(key) == m_partiallyAppliedQueues.end()) + { + SWSS_LOG_INFO("Skip setting buffer queue %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str()); + return task_process_status::task_success; + } + else + { + m_partiallyAppliedQueues.erase(key); + } + } + SWSS_LOG_NOTICE("Set buffer queue %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); @@ -770,6 +896,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) sai_buffer_profile = SAI_NULL_OBJECT_ID; SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key); + m_partiallyAppliedQueues.erase(key); } else { @@ -784,6 +911,12 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) { Port port; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); + + if(local_port == true) + { + port_name = local_port_name; + } + if (!gPortsOrch->getPort(port_name, port)) { SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); @@ -792,20 +925,36 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) for (size_t ind = range_low; ind <= range_high; ind++) { SWSS_LOG_DEBUG("processing queue:%zd", ind); - if (port.m_queue_ids.size() <= ind) + sai_object_id_t queue_id; + + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", ind); - return task_process_status::task_invalid_entry; - } - if (port.m_queue_lock[ind]) + std :: vector queue_ids = gPortsOrch->getPortVoQIds(port); + if (queue_ids.size() <= ind) + { + SWSS_LOG_ERROR("Invalid voq index specified:%zd", ind); + return task_process_status::task_invalid_entry; + } + queue_id = queue_ids[ind]; + } + else { - SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str()); - return task_process_status::task_need_retry; + if (port.m_queue_ids.size() <= ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", ind); + return task_process_status::task_invalid_entry; + } + if (port.m_queue_lock[ind]) + { + SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str()); + m_partiallyAppliedQueues.insert(key); + return task_process_status::task_need_retry; + } + queue_id = port.m_queue_ids[ind]; } + if (need_update_sai) { - sai_object_id_t queue_id; - queue_id = port.m_queue_ids[ind]; SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id); sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr); if (sai_status != SAI_STATUS_SUCCESS) @@ -817,16 +966,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } - // create/remove a port queue counter for the queue buffer - else + // create/remove a port queue counter for the queue buffer. + // For VOQ chassis, flexcounterorch adds the Queue Counters for all egress and VOQ queues of all front panel and system ports + // to the FLEX_COUNTER_DB irrespective of BUFFER_QUEUE configuration. So Port Queue counter needs to be updated only for non VOQ switch. + else if (gMySwitchType != "voq") { auto flexCounterOrch = gDirectory.get(); auto queues = tokens[1]; - if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + if (op == SET_COMMAND && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) { gPortsOrch->createPortBufferQueueCounters(port, queues); } - else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + else if (op == DEL_COMMAND && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) { gPortsOrch->removePortBufferQueueCounters(port, queues); } @@ -841,23 +994,23 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) * so we added a map that will help us to know what was the last command for this port and priority - * if the last command was set command then it is a modify command and we dont need to increase the buffer counter * all other cases (no last command exist or del command was the last command) it means that we need to increase the ref counter */ - if (op == SET_COMMAND) + if (op == SET_COMMAND) { - if (queue_port_flags[port_name][ind] != SET_COMMAND) + if (queue_port_flags[port_name][ind] != SET_COMMAND) { /* if the last operation was not "set" then it's create and not modify - need to increase ref counter */ gPortsOrch->increasePortRefCount(port_name); } - } + } else if (op == DEL_COMMAND) { - if (queue_port_flags[port_name][ind] == SET_COMMAND) + if (queue_port_flags[port_name][ind] == SET_COMMAND) { /* we need to decrease ref counter only if the last operation was "SET_COMMAND" */ gPortsOrch->decreasePortRefCount(port_name); } - } - else + } + else { SWSS_LOG_ERROR("operation value is not SET or DEL (op = %s)", op.c_str()); return task_process_status::task_invalid_entry; @@ -883,8 +1036,17 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) // set order is detected. for (const auto &port_name : port_names) { - if (gPortsOrch->isPortAdminUp(port_name)) { - SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + if(local_port == true) + { + if (gPortsOrch->isPortAdminUp(local_port_name)) { + SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + } + } + else + { + if (gPortsOrch->isPortAdminUp(port_name)) { + SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + } } } } @@ -937,6 +1099,14 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_failed; } + string old_buffer_profile_name; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) + && (old_buffer_profile_name == buffer_profile_name)) + { + SWSS_LOG_INFO("Skip setting buffer priority group %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str()); + return task_process_status::task_success; + } + SWSS_LOG_NOTICE("Set buffer PG %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); @@ -1001,11 +1171,13 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup { auto flexCounterOrch = gDirectory.get(); auto pgs = tokens[1]; - if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + if (op == SET_COMMAND && + (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) { gPortsOrch->createPortBufferPgCounters(port, pgs); } - else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + else if (op == DEL_COMMAND && + (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) { gPortsOrch->removePortBufferPgCounters(port, pgs); } @@ -1021,23 +1193,23 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup * so we added a map that will help us to know what was the last command for this port and priority - * if the last command was set command then it is a modify command and we dont need to increase the buffer counter * all other cases (no last command exist or del command was the last command) it means that we need to increase the ref counter */ - if (op == SET_COMMAND) + if (op == SET_COMMAND) { - if (pg_port_flags[port_name][ind] != SET_COMMAND) + if (pg_port_flags[port_name][ind] != SET_COMMAND) { /* if the last operation was not "set" then it's create and not modify - need to increase ref counter */ gPortsOrch->increasePortRefCount(port_name); } - } + } else if (op == DEL_COMMAND) { - if (pg_port_flags[port_name][ind] == SET_COMMAND) + if (pg_port_flags[port_name][ind] == SET_COMMAND) { /* we need to decrease ref counter only if the last operation was "SET_COMMAND" */ gPortsOrch->decreasePortRefCount(port_name); } - } - else + } + else { SWSS_LOG_ERROR("operation value is not SET or DEL (op = %s)", op.c_str()); return task_process_status::task_invalid_entry; @@ -1106,6 +1278,14 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue return task_process_status::task_failed; } + string old_profile_name_list; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list) + && (old_profile_name_list == profile_name_list)) + { + SWSS_LOG_INFO("Skip setting buffer ingress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str()); + return task_process_status::task_success; + } + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); attr.value.objlist.count = (uint32_t)profile_list.size(); @@ -1177,6 +1357,14 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues return task_process_status::task_failed; } + string old_profile_name_list; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list) + && (old_profile_name_list == profile_name_list)) + { + SWSS_LOG_INFO("Skip setting buffer egress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str()); + return task_process_status::task_success; + } + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); attr.value.objlist.count = (uint32_t)profile_list.size(); @@ -1254,7 +1442,15 @@ void BufferOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - if (!gPortsOrch->isConfigDone()) + if (gMySwitchType == "voq") + { + if(!gPortsOrch->isInitDone()) + { + SWSS_LOG_INFO("Buffer task for %s can't be executed ahead of port config done", consumer.getTableName().c_str()); + return; + } + } + else if (!gPortsOrch->isConfigDone()) { SWSS_LOG_INFO("Buffer task for %s can't be executed ahead of port config done", consumer.getTableName().c_str()); return; diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 59428509b5..de1e75c0a6 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -49,6 +49,7 @@ class BufferOrch : public Orch void initTableHandlers(); void initBufferReadyLists(DBConnector *confDb, DBConnector *applDb); void initBufferReadyList(Table& table, bool isConfigDb); + void initVoqBufferReadyList(Table& table, bool isConfigDb); void initFlexCounterGroupTable(void); void initBufferConstants(); task_process_status processBufferPool(KeyOpFieldsValuesTuple &tuple); @@ -71,7 +72,7 @@ class BufferOrch : public Orch unique_ptr m_countersDb; bool m_isBufferPoolWatermarkCounterIdListGenerated = false; - + set m_partiallyAppliedQueues; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/bulker.h b/orchagent/bulker.h index bb5ca496c9..86308329b9 100644 --- a/orchagent/bulker.h +++ b/orchagent/bulker.h @@ -11,6 +11,34 @@ #include "logger.h" #include "sai_serialize.h" +typedef sai_status_t (*sai_bulk_set_outbound_ca_to_pa_entry_attribute_fn) ( + _In_ uint32_t object_count, + _In_ const sai_outbound_ca_to_pa_entry_t *entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); + +typedef sai_status_t (*sai_bulk_set_pa_validation_entry_attribute_fn) ( + _In_ uint32_t object_count, + _In_ const sai_pa_validation_entry_t *entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); + +typedef sai_status_t (*sai_bulk_set_outbound_routing_entry_attribute_fn) ( + _In_ uint32_t object_count, + _In_ const sai_outbound_routing_entry_t *entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); + +typedef sai_status_t (*sai_bulk_set_inbound_routing_entry_attribute_fn) ( + _In_ uint32_t object_count, + _In_ const sai_inbound_routing_entry_t *entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); + static inline bool operator==(const sai_ip_prefix_t& a, const sai_ip_prefix_t& b) { if (a.addr_family != b.addr_family) return false; @@ -33,6 +61,26 @@ static inline bool operator==(const sai_ip_prefix_t& a, const sai_ip_prefix_t& b } } +static inline bool operator==(const sai_ip_address_t& a, const sai_ip_address_t& b) +{ + if (a.addr_family != b.addr_family) return false; + + if (a.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + return a.addr.ip4 == b.addr.ip4 + ; + } + else if (a.addr_family == SAI_IP_ADDR_FAMILY_IPV6) + { + return memcmp(a.addr.ip6, b.addr.ip6, sizeof(a.addr.ip6)) == 0 + ; + } + else + { + throw std::invalid_argument("a has invalid addr_family"); + } +} + static inline bool operator==(const sai_route_entry_t& a, const sai_route_entry_t& b) { return a.switch_id == b.switch_id @@ -48,6 +96,49 @@ static inline bool operator==(const sai_inseg_entry_t& a, const sai_inseg_entry_ ; } +static inline bool operator==(const sai_neighbor_entry_t& a, const sai_neighbor_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.rif_id == b.rif_id + && a.ip_address == b.ip_address + ; +} + +static inline bool operator==(const sai_inbound_routing_entry_t& a, const sai_inbound_routing_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.eni_id == b.eni_id + && a.vni == b.vni + && a.sip == b.sip + && a.sip_mask == b.sip_mask + && a.priority == b.priority + ; +} + +static inline bool operator==(const sai_outbound_ca_to_pa_entry_t& a, const sai_outbound_ca_to_pa_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.dst_vnet_id == b.dst_vnet_id + && a.dip == b.dip + ; +} + +static inline bool operator==(const sai_pa_validation_entry_t& a, const sai_pa_validation_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.vnet_id == b.vnet_id + && a.sip == b.sip + ; +} + +static inline bool operator==(const sai_outbound_routing_entry_t& a, const sai_outbound_routing_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.eni_id == b.eni_id + && a.destination == b.destination + ; +} + static inline std::size_t hash_value(const sai_ip_prefix_t& a) { size_t seed = 0; @@ -65,6 +156,21 @@ static inline std::size_t hash_value(const sai_ip_prefix_t& a) return seed; } +static inline std::size_t hash_value(const sai_ip_address_t& a) +{ + size_t seed = 0; + boost::hash_combine(seed, a.addr_family); + if (a.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + boost::hash_combine(seed, a.addr.ip4); + } + else if (a.addr_family == SAI_IP_ADDR_FAMILY_IPV6) + { + boost::hash_combine(seed, a.addr.ip6); + } + return seed; +} + namespace std { template <> @@ -104,6 +210,72 @@ namespace std return seed; } }; + + template <> + struct hash + { + size_t operator()(const sai_neighbor_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.rif_id); + boost::hash_combine(seed, a.ip_address); + return seed; + } + }; + + template <> + struct hash + { + size_t operator()(const sai_outbound_ca_to_pa_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.dst_vnet_id); + boost::hash_combine(seed, a.dip); + return seed; + } + }; + + template <> + struct hash + { + size_t operator()(const sai_pa_validation_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.vnet_id); + boost::hash_combine(seed, a.sip); + return seed; + } + }; + + template <> + struct hash + { + size_t operator()(const sai_outbound_routing_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.eni_id); + boost::hash_combine(seed, a.destination); + return seed; + } + }; + + template <> + struct hash + { + size_t operator()(const sai_inbound_routing_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.eni_id); + boost::hash_combine(seed, a.vni); + boost::hash_combine(seed, a.sip); + return seed; + } + }; } // SAI typedef which is not available in SAI 1.5 @@ -183,6 +355,83 @@ struct SaiBulkerTraits using bulk_set_entry_attribute_fn = sai_bulk_set_inseg_entry_attribute_fn; }; +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_neighbor_entry_t; + using api_t = sai_neighbor_api_t; + using create_entry_fn = sai_create_neighbor_entry_fn; + using remove_entry_fn = sai_remove_neighbor_entry_fn; + using set_entry_attribute_fn = sai_set_neighbor_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_neighbor_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_neighbor_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_neighbor_entry_attribute_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_object_id_t; + using api_t = sai_dash_vnet_api_t; + using create_entry_fn = sai_create_vnet_fn; + using remove_entry_fn = sai_remove_vnet_fn; + using set_entry_attribute_fn = sai_set_vnet_attribute_fn; + using bulk_create_entry_fn = sai_bulk_object_create_fn; + using bulk_remove_entry_fn = sai_bulk_object_remove_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_inbound_routing_entry_t; + using api_t = sai_dash_inbound_routing_api_t; + using create_entry_fn = sai_create_inbound_routing_entry_fn; + using remove_entry_fn = sai_remove_inbound_routing_entry_fn; + using set_entry_attribute_fn = sai_set_inbound_routing_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_inbound_routing_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_inbound_routing_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_inbound_routing_entry_attribute_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_outbound_ca_to_pa_entry_t; + using api_t = sai_dash_outbound_ca_to_pa_api_t; + using create_entry_fn = sai_create_outbound_ca_to_pa_entry_fn; + using remove_entry_fn = sai_remove_outbound_ca_to_pa_entry_fn; + using set_entry_attribute_fn = sai_set_outbound_ca_to_pa_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_outbound_ca_to_pa_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_outbound_ca_to_pa_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_outbound_ca_to_pa_entry_attribute_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_pa_validation_entry_t; + using api_t = sai_dash_pa_validation_api_t; + using create_entry_fn = sai_create_pa_validation_entry_fn; + using remove_entry_fn = sai_remove_pa_validation_entry_fn; + using set_entry_attribute_fn = sai_set_pa_validation_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_pa_validation_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_pa_validation_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_pa_validation_entry_attribute_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_outbound_routing_entry_t; + using api_t = sai_dash_outbound_routing_api_t; + using create_entry_fn = sai_create_outbound_routing_entry_fn; + using remove_entry_fn = sai_remove_outbound_routing_entry_fn; + using set_entry_attribute_fn = sai_set_outbound_routing_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_outbound_routing_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_outbound_routing_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_outbound_routing_entry_attribute_fn; +}; + template class EntityBulker { @@ -596,6 +845,47 @@ inline EntityBulker::EntityBulker(sai_mpls_api_t *api, size_t ma set_entries_attribute = api->set_inseg_entries_attribute; } +template <> +inline EntityBulker::EntityBulker(sai_neighbor_api_t *api, size_t max_bulk_size) : + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_neighbor_entries; + remove_entries = api->remove_neighbor_entries; + set_entries_attribute = api->set_neighbor_entries_attribute; +} + +template <> +inline EntityBulker::EntityBulker(sai_dash_inbound_routing_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) +{ + create_entries = api->create_inbound_routing_entries; + remove_entries = api->remove_inbound_routing_entries; + set_entries_attribute = nullptr; +} + +template <> +inline EntityBulker::EntityBulker(sai_dash_outbound_ca_to_pa_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) +{ + create_entries = api->create_outbound_ca_to_pa_entries; + remove_entries = api->remove_outbound_ca_to_pa_entries; + set_entries_attribute = nullptr; +} + +template <> +inline EntityBulker::EntityBulker(sai_dash_pa_validation_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) +{ + create_entries = api->create_pa_validation_entries; + remove_entries = api->remove_pa_validation_entries; + set_entries_attribute = nullptr; +} + +template <> +inline EntityBulker::EntityBulker(sai_dash_outbound_routing_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) +{ + create_entries = api->create_outbound_routing_entries; + remove_entries = api->remove_outbound_routing_entries; + set_entries_attribute = nullptr; +} + template class ObjectBulker { @@ -926,3 +1216,12 @@ inline ObjectBulker::ObjectBulker(SaiBulkerTraits +inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size) : + switch_id(switch_id), + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_vnets; + remove_entries = api->remove_vnets; +} diff --git a/orchagent/cbf/cbfnhgorch.cpp b/orchagent/cbf/cbfnhgorch.cpp index 76435ad12d..fe396b207c 100644 --- a/orchagent/cbf/cbfnhgorch.cpp +++ b/orchagent/cbf/cbfnhgorch.cpp @@ -343,10 +343,10 @@ bool CbfNhg::sync() SWSS_LOG_ERROR("Failed to create CBF next hop group %s, rv %d", m_key.c_str(), status); - task_process_status handle_status = gCbfNhgOrch->handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gCbfNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/copporch.cpp b/orchagent/copporch.cpp index 8a58ae73a0..9d597c601f 100644 --- a/orchagent/copporch.cpp +++ b/orchagent/copporch.cpp @@ -369,7 +369,7 @@ bool CoppOrch::removePolicer(string trap_group_name) sai_attribute_t attr; sai_status_t sai_status; - sai_object_id_t policer_id = getPolicer(trap_group_name); + sai_object_id_t policer_id = getPolicer(trap_group_name).policer_id; if (SAI_NULL_OBJECT_ID == policer_id) { @@ -407,21 +407,21 @@ bool CoppOrch::removePolicer(string trap_group_name) return true; } -sai_object_id_t CoppOrch::getPolicer(string trap_group_name) +policer_object CoppOrch::getPolicer(string trap_group_name) { SWSS_LOG_ENTER(); SWSS_LOG_DEBUG("trap group name:%s:", trap_group_name.c_str()); if (m_trap_group_map.find(trap_group_name) == m_trap_group_map.end()) { - return SAI_NULL_OBJECT_ID; + return policer_object(); } SWSS_LOG_DEBUG("trap group id:%" PRIx64, m_trap_group_map[trap_group_name]); if (m_trap_group_policer_map.find(m_trap_group_map[trap_group_name]) == m_trap_group_policer_map.end()) { - return SAI_NULL_OBJECT_ID; + return policer_object(); } - SWSS_LOG_DEBUG("trap group policer id:%" PRIx64, m_trap_group_policer_map[m_trap_group_map[trap_group_name]]); + SWSS_LOG_DEBUG("trap group policer id:%" PRIx64, m_trap_group_policer_map[m_trap_group_map[trap_group_name]].policer_id); return m_trap_group_policer_map[m_trap_group_map[trap_group_name]]; } @@ -460,8 +460,28 @@ bool CoppOrch::createPolicer(string trap_group_name, vector &po } } + policer_object obj; + obj.policer_id = policer_id; + /* Save the CREATE_ONLY attributes for future use */ + for (sai_uint32_t ind = 0; ind < policer_attribs.size(); ind++) + { + auto attr = policer_attribs[ind]; + if(attr.id == SAI_POLICER_ATTR_METER_TYPE) + { + obj.meter = (sai_meter_type_t)attr.value.s32; + } + else if(attr.id == SAI_POLICER_ATTR_MODE) + { + obj.mode = (sai_policer_mode_t)attr.value.s32; + } + else if(attr.id == SAI_POLICER_ATTR_COLOR_SOURCE) + { + obj.color = (sai_policer_color_source_t)attr.value.s32; + } + } + SWSS_LOG_NOTICE("Bind policer to trap group %s:", trap_group_name.c_str()); - m_trap_group_policer_map[m_trap_group_map[trap_group_name]] = policer_id; + m_trap_group_policer_map[m_trap_group_map[trap_group_name]] = obj; return true; } @@ -1107,12 +1127,14 @@ bool CoppOrch::getAttribsFromTrapGroup (vector &fv_tuple, bool CoppOrch::trapGroupUpdatePolicer (string trap_group_name, vector &policer_attribs) { - sai_object_id_t policer_id = getPolicer(trap_group_name); - if (m_trap_group_map.find(trap_group_name) == m_trap_group_map.end()) { return false; } + + auto policer_object = getPolicer(trap_group_name); + auto policer_id = policer_object.policer_id; + if (SAI_NULL_OBJECT_ID == policer_id) { SWSS_LOG_WARN("Creating policer for existing Trap group: %" PRIx64 " (name:%s).", @@ -1128,6 +1150,35 @@ bool CoppOrch::trapGroupUpdatePolicer (string trap_group_name, for (sai_uint32_t ind = 0; ind < policer_attribs.size(); ind++) { auto policer_attr = policer_attribs[ind]; + /* + Updating the CREATE_ONLY attributes of the policer will cause a crash + If modified, throw an error log and proceed with changeable attributes + */ + if(policer_attr.id == SAI_POLICER_ATTR_METER_TYPE) + { + if (policer_object.meter != (sai_meter_type_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (meter), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + else if(policer_attr.id == SAI_POLICER_ATTR_MODE) + { + if (policer_object.mode != (sai_policer_mode_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (mode), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + else if(policer_attr.id == SAI_POLICER_ATTR_COLOR_SOURCE) + { + if (policer_object.color != (sai_policer_color_source_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (color), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + sai_status_t sai_status = sai_policer_api->set_policer_attribute(policer_id, &policer_attr); if (sai_status != SAI_STATUS_SUCCESS) diff --git a/orchagent/copporch.h b/orchagent/copporch.h index d774db64ba..c8f956b6d7 100644 --- a/orchagent/copporch.h +++ b/orchagent/copporch.h @@ -46,8 +46,18 @@ struct copp_trap_objects sai_hostif_trap_type_t trap_type; }; +struct policer_object +{ + sai_object_id_t policer_id; + sai_meter_type_t meter; + sai_policer_mode_t mode; + sai_policer_color_source_t color; + + policer_object() : policer_id(SAI_NULL_OBJECT_ID) {} +}; + /* TrapGroupPolicerTable: trap group ID, policer ID */ -typedef std::map TrapGroupPolicerTable; +typedef std::map TrapGroupPolicerTable; /* TrapIdTrapObjectsTable: trap ID, copp trap objects */ typedef std::map TrapIdTrapObjectsTable; /* TrapGroupHostIfMap: trap group ID, host interface ID */ @@ -113,7 +123,7 @@ class CoppOrch : public Orch bool createPolicer(std::string trap_group, std::vector &policer_attribs); bool removePolicer(std::string trap_group_name); - sai_object_id_t getPolicer(std::string trap_group_name); + policer_object getPolicer(std::string trap_group_name); bool createGenetlinkHostIf(std::string trap_group_name, std::vector &hostif_attribs); bool removeGenetlinkHostIf(std::string trap_group_name); diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index f5e864a357..b5844bbea3 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -4,6 +4,7 @@ #include "crmorch.h" #include "converter.h" #include "timer.h" +#include "saihelper.h" #define CRM_POLLING_INTERVAL "polling_interval" #define CRM_COUNTERS_TABLE_KEY "STATS" @@ -18,6 +19,7 @@ extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_acl_api_t *sai_acl_api; +extern event_handle_t g_events_handle; using namespace std; using namespace swss; @@ -46,6 +48,23 @@ const map crmResTypeNameMap = { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, "SRV6_MY_SID_ENTRY" }, { CrmResourceType::CRM_SRV6_NEXTHOP, "SRV6_NEXTHOP" }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, "NEXTHOP_GROUP_MAP" }, + { CrmResourceType::CRM_EXT_TABLE, "EXTENSION_TABLE" }, + { CrmResourceType::CRM_DASH_VNET, "DASH_VNET" }, + { CrmResourceType::CRM_DASH_ENI, "DASH_ENI" }, + { CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP, "DASH_ENI_ETHER_ADDRESS_MAP" }, + { CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING, "DASH_IPV4_INBOUND_ROUTING" }, + { CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING, "DASH_IPV6_INBOUND_ROUTING" }, + { CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING, "DASH_IPV4_OUTBOUND_ROUTING" }, + { CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING, "DASH_IPV6_OUTBOUND_ROUTING" }, + { CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION, "DASH_IPV4_PA_VALIDATION" }, + { CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION, "DASH_IPV6_PA_VALIDATION" }, + { CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA, "DASH_IPV4_OUTBOUND_CA_TO_PA" }, + { CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA, "DASH_IPV6_OUTBOUND_CA_TO_PA" }, + { CrmResourceType::CRM_DASH_IPV4_ACL_GROUP, "DASH_IPV4_ACL_GROUP" }, + { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, "DASH_IPV6_ACL_GROUP" }, + { CrmResourceType::CRM_DASH_IPV4_ACL_RULE, "DASH_IPV4_ACL_RULE" }, + { CrmResourceType::CRM_DASH_IPV6_ACL_RULE, "DASH_IPV6_ACL_RULE" }, + { CrmResourceType::CRM_TWAMP_ENTRY, "TWAMP_ENTRY" } }; const map crmResSaiAvailAttrMap = @@ -66,6 +85,7 @@ const map crmResSaiAvailAttrMap = { CrmResourceType::CRM_IPMC_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_IPMC_ENTRY}, { CrmResourceType::CRM_SNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY }, { CrmResourceType::CRM_DNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY }, + { CrmResourceType::CRM_TWAMP_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_TWAMP_SESSION } }; const map crmResSaiObjAttrMap = @@ -91,6 +111,23 @@ const map crmResSaiObjAttrMap = { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, SAI_OBJECT_TYPE_MY_SID_ENTRY }, { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP }, + { CrmResourceType::CRM_EXT_TABLE, SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE }, + { CrmResourceType::CRM_DASH_VNET, (sai_object_type_t)SAI_OBJECT_TYPE_VNET }, + { CrmResourceType::CRM_DASH_ENI, (sai_object_type_t)SAI_OBJECT_TYPE_ENI }, + { CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP, (sai_object_type_t)SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY }, + { CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING, (sai_object_type_t)SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY }, + { CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING, (sai_object_type_t)SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY }, + { CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING, (sai_object_type_t)SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY }, + { CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING, (sai_object_type_t)SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY }, + { CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION, (sai_object_type_t)SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY }, + { CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION, (sai_object_type_t)SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY }, + { CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA, (sai_object_type_t)SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY }, + { CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA, (sai_object_type_t)SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY }, + { CrmResourceType::CRM_DASH_IPV4_ACL_GROUP, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_GROUP }, + { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_GROUP }, + { CrmResourceType::CRM_DASH_IPV4_ACL_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_RULE }, + { CrmResourceType::CRM_DASH_IPV6_ACL_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_RULE }, + { CrmResourceType::CRM_TWAMP_ENTRY, SAI_OBJECT_TYPE_NULL } }; const map crmResAddrFamilyAttrMap = @@ -99,6 +136,8 @@ const map crmResAddrFamilyAttrMap = { CrmResourceType::CRM_IPV6_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_DASH_IPV4_ACL_GROUP, SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY }, }; const map crmResAddrFamilyValMap = @@ -107,6 +146,8 @@ const map crmResAddrFamilyValMap = { CrmResourceType::CRM_IPV6_ROUTE, SAI_IP_ADDR_FAMILY_IPV6 }, { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV4 }, { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV6 }, + { CrmResourceType::CRM_DASH_IPV4_ACL_GROUP, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, SAI_IP_ADDR_FAMILY_IPV6 }, }; const map crmThreshTypeResMap = @@ -132,6 +173,23 @@ const map crmThreshTypeResMap = { "srv6_my_sid_entry_threshold_type", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "srv6_nexthop_threshold_type", CrmResourceType::CRM_SRV6_NEXTHOP }, { "nexthop_group_map_threshold_type", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "extension_table_threshold_type", CrmResourceType::CRM_EXT_TABLE }, + { "dash_vnet_threshold_type", CrmResourceType::CRM_DASH_VNET }, + { "dash_eni_threshold_type", CrmResourceType:: CRM_DASH_ENI }, + { "dash_eni_ether_address_map_threshold_type", CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP }, + { "dash_ipv4_inbound_routing_threshold_type", CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING }, + { "dash_ipv6_inbound_routing_threshold_type", CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING }, + { "dash_ipv4_outbound_routing_threshold_type", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING }, + { "dash_ipv6_outbound_routing_threshold_type", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING }, + { "dash_ipv4_pa_validation_threshold_type", CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION }, + { "dash_ipv6_pa_validation_threshold_type", CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION }, + { "dash_ipv4_outbound_ca_to_pa_threshold_type", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA }, + { "dash_ipv6_outbound_ca_to_pa_threshold_type", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA }, + { "dash_ipv4_acl_group_threshold_type", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, + { "dash_ipv6_acl_group_threshold_type", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, + { "dash_ipv4_acl_rule_threshold_type", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, + { "dash_ipv6_acl_rule_threshold_type", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "twamp_entry_threshold_type", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshLowResMap = @@ -157,6 +215,23 @@ const map crmThreshLowResMap = {"srv6_my_sid_entry_low_threshold", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, {"srv6_nexthop_low_threshold", CrmResourceType::CRM_SRV6_NEXTHOP }, {"nexthop_group_map_low_threshold", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + {"extension_table_low_threshold", CrmResourceType::CRM_EXT_TABLE }, + { "dash_vnet_low_threshold", CrmResourceType::CRM_DASH_VNET }, + { "dash_eni_low_threshold", CrmResourceType:: CRM_DASH_ENI }, + { "dash_eni_ether_address_map_low_threshold", CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP }, + { "dash_ipv4_inbound_routing_low_threshold", CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING }, + { "dash_ipv6_inbound_routing_low_threshold", CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING }, + { "dash_ipv4_outbound_routing_low_threshold", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING }, + { "dash_ipv6_outbound_routing_low_threshold", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING }, + { "dash_ipv4_pa_validation_low_threshold", CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION }, + { "dash_ipv6_pa_validation_low_threshold", CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION }, + { "dash_ipv4_outbound_ca_to_pa_low_threshold", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA }, + { "dash_ipv6_outbound_ca_to_pa_low_threshold", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA }, + { "dash_ipv4_acl_group_low_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, + { "dash_ipv6_acl_group_low_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, + { "dash_ipv4_acl_rule_low_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, + { "dash_ipv6_acl_rule_low_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "twamp_entry_low_threshold", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshHighResMap = @@ -182,6 +257,23 @@ const map crmThreshHighResMap = {"srv6_my_sid_entry_high_threshold", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, {"srv6_nexthop_high_threshold", CrmResourceType::CRM_SRV6_NEXTHOP }, {"nexthop_group_map_high_threshold", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + {"extension_table_high_threshold", CrmResourceType::CRM_EXT_TABLE }, + { "dash_vnet_high_threshold", CrmResourceType::CRM_DASH_VNET }, + { "dash_eni_high_threshold", CrmResourceType:: CRM_DASH_ENI }, + { "dash_eni_ether_address_map_high_threshold", CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP }, + { "dash_ipv4_inbound_routing_high_threshold", CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING }, + { "dash_ipv6_inbound_routing_high_threshold", CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING }, + { "dash_ipv4_outbound_routing_high_threshold", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING }, + { "dash_ipv6_outbound_routing_high_threshold", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING }, + { "dash_ipv4_pa_validation_high_threshold", CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION }, + { "dash_ipv6_pa_validation_high_threshold", CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION }, + { "dash_ipv4_outbound_ca_to_pa_high_threshold", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA }, + { "dash_ipv6_outbound_ca_to_pa_high_threshold", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA }, + { "dash_ipv4_acl_group_high_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, + { "dash_ipv6_acl_group_high_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, + { "dash_ipv4_acl_rule_high_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, + { "dash_ipv6_acl_rule_high_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "twamp_entry_high_threshold", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshTypeMap = @@ -214,6 +306,23 @@ const map crmAvailCntsTableMap = { "crm_stats_srv6_my_sid_entry_available", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "crm_stats_srv6_nexthop_available", CrmResourceType::CRM_SRV6_NEXTHOP }, { "crm_stats_nexthop_group_map_available", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "crm_stats_extension_table_available", CrmResourceType::CRM_EXT_TABLE }, + { "crm_stats_dash_vnet_available", CrmResourceType::CRM_DASH_VNET }, + { "crm_stats_dash_eni_available", CrmResourceType:: CRM_DASH_ENI }, + { "crm_stats_dash_eni_ether_address_map_available", CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP }, + { "crm_stats_dash_ipv4_inbound_routing_available", CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING }, + { "crm_stats_dash_ipv6_inbound_routing_available", CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING }, + { "crm_stats_dash_ipv4_outbound_routing_available", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING }, + { "crm_stats_dash_ipv6_outbound_routing_available", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING }, + { "crm_stats_dash_ipv4_pa_validation_available", CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION }, + { "crm_stats_dash_ipv6_pa_validation_available", CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION }, + { "crm_stats_dash_ipv4_outbound_ca_to_pa_available", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA }, + { "crm_stats_dash_ipv6_outbound_ca_to_pa_available", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA }, + { "crm_stats_dash_ipv4_acl_group_available", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, + { "crm_stats_dash_ipv6_acl_group_available", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, + { "crm_stats_dash_ipv4_acl_rule_available", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, + { "crm_stats_dash_ipv6_acl_rule_available", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "crm_stats_twamp_entry_available", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmUsedCntsTableMap = @@ -239,6 +348,23 @@ const map crmUsedCntsTableMap = { "crm_stats_srv6_my_sid_entry_used", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "crm_stats_srv6_nexthop_used", CrmResourceType::CRM_SRV6_NEXTHOP }, { "crm_stats_nexthop_group_map_used", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "crm_stats_extension_table_used", CrmResourceType::CRM_EXT_TABLE }, + { "crm_stats_dash_vnet_used", CrmResourceType::CRM_DASH_VNET }, + { "crm_stats_dash_eni_used", CrmResourceType:: CRM_DASH_ENI }, + { "crm_stats_dash_eni_ether_address_map_used", CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP }, + { "crm_stats_dash_ipv4_inbound_routing_used", CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING }, + { "crm_stats_dash_ipv6_inbound_routing_used", CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING }, + { "crm_stats_dash_ipv4_outbound_routing_used", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING }, + { "crm_stats_dash_ipv6_outbound_routing_used", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING }, + { "crm_stats_dash_ipv4_pa_validation_used", CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION }, + { "crm_stats_dash_ipv6_pa_validation_used", CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION }, + { "crm_stats_dash_ipv4_outbound_ca_to_pa_used", CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA }, + { "crm_stats_dash_ipv6_outbound_ca_to_pa_used", CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA }, + { "crm_stats_dash_ipv4_acl_group_used", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, + { "crm_stats_dash_ipv6_acl_group_used", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, + { "crm_stats_dash_ipv4_acl_rule_used", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, + { "crm_stats_dash_ipv6_acl_rule_used", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "crm_stats_twamp_entry_used", CrmResourceType::CRM_TWAMP_ENTRY }, }; CrmOrch::CrmOrch(DBConnector *db, string tableName): @@ -339,13 +465,18 @@ void CrmOrch::handleSetCommand(const string& key, const vector& } else if (crmThreshTypeResMap.find(field) != crmThreshTypeResMap.end()) { - auto resourceType = crmThreshTypeResMap.at(field); auto thresholdType = crmThreshTypeMap.at(value); + auto resourceType = crmThreshTypeResMap.at(field); + auto &resource = m_resourcesMap.at(resourceType); - if (m_resourcesMap.at(resourceType).thresholdType != thresholdType) + if (resource.thresholdType != thresholdType) { - m_resourcesMap.at(resourceType).thresholdType = thresholdType; - m_resourcesMap.at(resourceType).exceededLogCounter = 0; + resource.thresholdType = thresholdType; + + for (auto &cnt : resource.countersMap) + { + cnt.second.exceededLogCounter = 0; + } } } else if (crmThreshLowResMap.find(field) != crmThreshLowResMap.end()) @@ -495,6 +626,100 @@ void CrmOrch::decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_ } } +void CrmOrch::incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ + SWSS_LOG_ENTER(); + + try + { + m_resourcesMap.at(resource).countersMap[getCrmP4rtTableKey(table_name)].usedCounter++; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to increment \"used\" counter for the EXT %s CRM resource.", table_name.c_str()); + return; + } +} + +void CrmOrch::decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ + SWSS_LOG_ENTER(); + + try + { + m_resourcesMap.at(resource).countersMap[getCrmP4rtTableKey(table_name)].usedCounter--; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to decrement \"used\" counter for the EXT %s CRM resource.", table_name.c_str()); + return; + } +} + +void CrmOrch::incCrmDashAclUsedCounter(CrmResourceType resource, sai_object_id_t tableId) +{ + SWSS_LOG_ENTER(); + + try + { + if (resource == CrmResourceType::CRM_DASH_IPV4_ACL_GROUP) + { + incCrmResUsedCounter(resource); + auto &rule_cnt = m_resourcesMap.at(CrmResourceType::CRM_DASH_IPV4_ACL_RULE).countersMap[getCrmDashAclGroupKey(tableId)]; + rule_cnt.usedCounter = 0; + rule_cnt.id = tableId; + } + else if (resource == CrmResourceType::CRM_DASH_IPV6_ACL_GROUP) + { + incCrmResUsedCounter(resource); + auto &rule_cnt = m_resourcesMap.at(CrmResourceType::CRM_DASH_IPV6_ACL_RULE).countersMap[getCrmDashAclGroupKey(tableId)]; + rule_cnt.usedCounter = 0; + rule_cnt.id = tableId; + } + else + { + auto &rule_cnt = m_resourcesMap.at(resource).countersMap[getCrmDashAclGroupKey(tableId)]; + ++rule_cnt.usedCounter; + } + } + catch (...) + { + SWSS_LOG_ERROR("Failed to increment \"used\" counter for the %s CRM resource (tableId:%" PRIx64 ").", crmResTypeNameMap.at(resource).c_str(), tableId); + return; + } +} + +void CrmOrch::decCrmDashAclUsedCounter(CrmResourceType resource, sai_object_id_t tableId) +{ + SWSS_LOG_ENTER(); + + try + { + if (resource == CrmResourceType::CRM_DASH_IPV4_ACL_GROUP) + { + decCrmResUsedCounter(resource); + m_resourcesMap.at(CrmResourceType::CRM_DASH_IPV4_ACL_RULE).countersMap.erase(getCrmDashAclGroupKey(tableId)); + m_countersCrmTable->del(getCrmDashAclGroupKey(tableId)); + } + else if (resource == CrmResourceType::CRM_DASH_IPV6_ACL_GROUP) + { + decCrmResUsedCounter(resource); + m_resourcesMap.at(CrmResourceType::CRM_DASH_IPV6_ACL_RULE).countersMap.erase(getCrmDashAclGroupKey(tableId)); + m_countersCrmTable->del(getCrmDashAclGroupKey(tableId)); + } + else + { + auto &rule_cnt = m_resourcesMap.at(resource).countersMap[getCrmDashAclGroupKey(tableId)]; + --rule_cnt.usedCounter; + } + } + catch (...) + { + SWSS_LOG_ERROR("Failed to decrement \"used\" counter for the %s CRM resource (tableId:%" PRIx64 ").", crmResTypeNameMap.at(resource).c_str(), tableId); + return; + } +} + void CrmOrch::doTask(SelectableTimer &timer) { SWSS_LOG_ENTER(); @@ -516,24 +741,33 @@ bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) { uint32_t attrCount = 0; - if ((type == CrmResourceType::CRM_IPV4_ROUTE) || (type == CrmResourceType::CRM_IPV6_ROUTE) || - (type == CrmResourceType::CRM_IPV4_NEIGHBOR) || (type == CrmResourceType::CRM_IPV6_NEIGHBOR)) + switch (type) { - attr.id = crmResAddrFamilyAttrMap.at(type); - attr.value.s32 = crmResAddrFamilyValMap.at(type); - attrCount = 1; - } - else if (type == CrmResourceType::CRM_MPLS_NEXTHOP) - { - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; - attrCount = 1; - } - else if (type == CrmResourceType::CRM_SRV6_NEXTHOP) - { - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; - attrCount = 1; + case CrmResourceType::CRM_IPV4_ROUTE: + case CrmResourceType::CRM_IPV6_ROUTE: + case CrmResourceType::CRM_IPV4_NEIGHBOR: + case CrmResourceType::CRM_IPV6_NEIGHBOR: + case CrmResourceType::CRM_DASH_IPV4_ACL_GROUP: + case CrmResourceType::CRM_DASH_IPV6_ACL_GROUP: + attr.id = crmResAddrFamilyAttrMap.at(type); + attr.value.s32 = crmResAddrFamilyValMap.at(type); + attrCount = 1; + break; + + case CrmResourceType::CRM_MPLS_NEXTHOP: + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; + attrCount = 1; + break; + + case CrmResourceType::CRM_SRV6_NEXTHOP: + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; + attrCount = 1; + break; + + default: + break; } status = sai_object_type_get_availability(gSwitchId, objType, attrCount, &attr, &availCount); @@ -560,7 +794,7 @@ bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get availability counter for %s CRM resourse", crmResTypeNameMap.at(type).c_str()); + SWSS_LOG_ERROR("Failed to get availability counter for %s CRM resource", crmResTypeNameMap.at(type).c_str()); return false; } @@ -572,6 +806,41 @@ bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) return true; } +bool CrmOrch::getDashAclGroupResAvailability(CrmResourceType type, CrmResourceEntry &res) +{ + sai_object_type_t objType = crmResSaiObjAttrMap.at(type); + + for (auto &cnt : res.countersMap) + { + sai_attribute_t attr; + attr.id = SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID; + attr.value.oid = cnt.second.id; + + uint64_t availCount = 0; + sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get ACL table attribute %u , rv:%d", attr.id, status); + break; + } + + cnt.second.availableCounter = static_cast(availCount); + } + + return true; +} + void CrmOrch::getResAvailableCounters() { SWSS_LOG_ENTER(); @@ -603,6 +872,20 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: case CrmResourceType::CRM_MPLS_NEXTHOP: case CrmResourceType::CRM_SRV6_NEXTHOP: + case CrmResourceType::CRM_DASH_VNET: + case CrmResourceType::CRM_DASH_ENI: + case CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP: + case CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING: + case CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING: + case CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING: + case CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING: + case CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION: + case CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION: + case CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA: + case CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA: + case CrmResourceType::CRM_DASH_IPV4_ACL_GROUP: + case CrmResourceType::CRM_DASH_IPV6_ACL_GROUP: + case CrmResourceType::CRM_TWAMP_ENTRY: { getResAvailability(res.first, res.second); break; @@ -619,6 +902,17 @@ void CrmOrch::getResAvailableCounters() attr.value.aclresource.count = CRM_ACL_RESOURCE_COUNT; attr.value.aclresource.list = resources.data(); sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); + break; + } + if (status == SAI_STATUS_BUFFER_OVERFLOW) { resources.resize(attr.value.aclresource.count); @@ -654,6 +948,16 @@ void CrmOrch::getResAvailableCounters() for (auto &cnt : res.second.countersMap) { sai_status_t status = sai_acl_api->get_acl_table_attribute(cnt.second.id, 1, &attr); + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); + break; + } if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to get ACL table attribute %u , rv:%d", attr.id, status); @@ -666,6 +970,40 @@ void CrmOrch::getResAvailableCounters() break; } + case CrmResourceType::CRM_EXT_TABLE: + { + for (auto &cnt : res.second.countersMap) + { + std::string table_name = cnt.first; + sai_object_type_t objType = crmResSaiObjAttrMap.at(res.first); + sai_attribute_t attr; + uint64_t availCount = 0; + + attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME; + attr.value.s8list.count = (uint32_t)table_name.size(); + attr.value.s8list.list = (int8_t *)const_cast(table_name.c_str()); + + sai_status_t status = sai_object_type_get_availability( + gSwitchId, objType, 1, &attr, &availCount); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get EXT table resource count %s , rv:%d", + table_name.c_str(), status); + break; + } + + cnt.second.availableCounter = static_cast(availCount); + } + break; + } + + case CrmResourceType::CRM_DASH_IPV4_ACL_RULE: + case CrmResourceType::CRM_DASH_IPV6_ACL_RULE: + { + getDashAclGroupResAvailability(res.first, res.second); + break; + } + default: SWSS_LOG_ERROR("Failed to get CRM resource type %u. Unknown resource type.\n", static_cast(res.first)); return; @@ -682,7 +1020,13 @@ void CrmOrch::updateCrmCountersTable() { try { - for (const auto &cnt : m_resourcesMap.at(i.second).countersMap) + const auto &res = m_resourcesMap.at(i.second); + if (res.resStatus == CrmResourceStatus::CRM_RES_NOT_SUPPORTED) + { + continue; + } + + for (const auto &cnt : res.countersMap) { FieldValueTuple attr(i.first, to_string(cnt.second.usedCounter)); vector attrs = { attr }; @@ -700,7 +1044,13 @@ void CrmOrch::updateCrmCountersTable() { try { - for (const auto &cnt : m_resourcesMap.at(i.second).countersMap) + const auto &res = m_resourcesMap.at(i.second); + if (res.resStatus == CrmResourceStatus::CRM_RES_NOT_SUPPORTED) + { + continue; + } + + for (const auto &cnt : res.countersMap) { FieldValueTuple attr(i.first, to_string(cnt.second.availableCounter)); vector attrs = { attr }; @@ -722,7 +1072,12 @@ void CrmOrch::checkCrmThresholds() { auto &res = i.second; - for (const auto &j : i.second.countersMap) + if (res.resStatus == CrmResourceStatus::CRM_RES_NOT_SUPPORTED) + { + continue; + } + + for (auto &j : i.second.countersMap) { auto &cnt = j.second; uint64_t utilization = 0; @@ -761,19 +1116,25 @@ void CrmOrch::checkCrmThresholds() throw runtime_error("Unknown threshold type for CRM resource"); } - if ((utilization >= res.highThreshold) && (res.exceededLogCounter < CRM_EXCEEDED_MSG_MAX)) + if ((utilization >= res.highThreshold) && (cnt.exceededLogCounter < CRM_EXCEEDED_MSG_MAX)) { + event_params_t params = { + { "percent", to_string(percentageUtil) }, + { "used_cnt", to_string(cnt.usedCounter) }, + { "free_cnt", to_string(cnt.availableCounter) }}; + SWSS_LOG_WARN("%s THRESHOLD_EXCEEDED for %s %u%% Used count %u free count %u", res.name.c_str(), threshType.c_str(), percentageUtil, cnt.usedCounter, cnt.availableCounter); - res.exceededLogCounter++; + event_publish(g_events_handle, "chk_crm_threshold", ¶ms); + cnt.exceededLogCounter++; } - else if ((utilization <= res.lowThreshold) && (res.exceededLogCounter > 0) && (res.highThreshold != res.lowThreshold)) + else if ((utilization <= res.lowThreshold) && (cnt.exceededLogCounter > 0) && (res.highThreshold != res.lowThreshold)) { SWSS_LOG_WARN("%s THRESHOLD_CLEAR for %s %u%% Used count %u free count %u", res.name.c_str(), threshType.c_str(), percentageUtil, cnt.usedCounter, cnt.availableCounter); - res.exceededLogCounter = 0; + cnt.exceededLogCounter = 0; } } // end of counters loop } // end of resources loop @@ -826,3 +1187,19 @@ string CrmOrch::getCrmAclTableKey(sai_object_id_t id) ss << "ACL_TABLE_STATS:" << "0x" << std::hex << id; return ss.str(); } + +string CrmOrch::getCrmP4rtTableKey(std::string table_name) +{ + std::stringstream ss; + ss << "EXT_TABLE_STATS:" << table_name; + return ss.str(); +} + +string CrmOrch::getCrmDashAclGroupKey(sai_object_id_t id) +{ + std::stringstream ss; + // Prepare the DASH_ACL_GROUP_STATS table key that will be used to store and access DASH ACL group counters + // in the Counters DB. + ss << "DASH_ACL_GROUP_STATS:" << "0x" << std::hex << id; + return ss.str(); +} diff --git a/orchagent/crmorch.h b/orchagent/crmorch.h index f63e2a31c2..961bfaebe4 100644 --- a/orchagent/crmorch.h +++ b/orchagent/crmorch.h @@ -5,6 +5,7 @@ #include #include "orch.h" #include "port.h" +#include "events.h" extern "C" { #include "sai.h" @@ -33,6 +34,23 @@ enum class CrmResourceType CRM_SRV6_MY_SID_ENTRY, CRM_SRV6_NEXTHOP, CRM_NEXTHOP_GROUP_MAP, + CRM_EXT_TABLE, + CRM_DASH_VNET, + CRM_DASH_ENI, + CRM_DASH_ENI_ETHER_ADDRESS_MAP, + CRM_DASH_IPV4_INBOUND_ROUTING, + CRM_DASH_IPV6_INBOUND_ROUTING, + CRM_DASH_IPV4_OUTBOUND_ROUTING, + CRM_DASH_IPV6_OUTBOUND_ROUTING, + CRM_DASH_IPV4_PA_VALIDATION, + CRM_DASH_IPV6_PA_VALIDATION, + CRM_DASH_IPV4_OUTBOUND_CA_TO_PA, + CRM_DASH_IPV6_OUTBOUND_CA_TO_PA, + CRM_DASH_IPV4_ACL_GROUP, + CRM_DASH_IPV6_ACL_GROUP, + CRM_DASH_IPV4_ACL_RULE, + CRM_DASH_IPV6_ACL_RULE, + CRM_TWAMP_ENTRY }; enum class CrmThresholdType @@ -62,6 +80,14 @@ class CrmOrch : public Orch void incCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_t tableId); // Decrement "used" counter for the per ACL table CRM resources (ACL entry/counter) void decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_t tableId); + // Increment "used" counter for the EXT table CRM resources + void incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name); + // Decrement "used" counter for the EXT table CRM resources + void decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name); + // Increment "used" counter for the per DASH ACL CRM resources (ACL group/rule) + void incCrmDashAclUsedCounter(CrmResourceType resource, sai_object_id_t groupId); + // Decrement "used" counter for the per DASH ACL CRM resources (ACL group/rule) + void decCrmDashAclUsedCounter(CrmResourceType resource, sai_object_id_t groupId); private: std::shared_ptr m_countersDb = nullptr; @@ -73,6 +99,7 @@ class CrmOrch : public Orch sai_object_id_t id = 0; uint32_t availableCounter = 0; uint32_t usedCounter = 0; + uint32_t exceededLogCounter = 0; }; struct CrmResourceEntry @@ -87,7 +114,6 @@ class CrmOrch : public Orch std::map countersMap; - uint32_t exceededLogCounter = 0; CrmResourceStatus resStatus = CrmResourceStatus::CRM_RES_SUPPORTED; }; @@ -99,9 +125,12 @@ class CrmOrch : public Orch void handleSetCommand(const std::string& key, const std::vector& data); void doTask(swss::SelectableTimer &timer); bool getResAvailability(CrmResourceType type, CrmResourceEntry &res); + bool getDashAclGroupResAvailability(CrmResourceType type, CrmResourceEntry &res); void getResAvailableCounters(); void updateCrmCountersTable(); void checkCrmThresholds(); std::string getCrmAclKey(sai_acl_stage_t stage, sai_acl_bind_point_type_t bindPoint); std::string getCrmAclTableKey(sai_object_id_t id); + std::string getCrmP4rtTableKey(std::string table_name); + std::string getCrmDashAclGroupKey(sai_object_id_t id); }; diff --git a/orchagent/dash/dashaclgroupmgr.cpp b/orchagent/dash/dashaclgroupmgr.cpp new file mode 100644 index 0000000000..79a2230763 --- /dev/null +++ b/orchagent/dash/dashaclgroupmgr.cpp @@ -0,0 +1,812 @@ +#include + +#include + +#include "dashaclgroupmgr.h" + +#include "crmorch.h" +#include "dashorch.h" +#include "dashaclorch.h" +#include "saihelper.h" +#include "pbutils.h" +#include "taskworker.h" + +extern sai_dash_acl_api_t* sai_dash_acl_api; +extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_object_id_t gSwitchId; +extern CrmOrch *gCrmOrch; + +using namespace std; +using namespace swss; +using namespace dash::acl_in; +using namespace dash::acl_out; +using namespace dash::acl_rule; +using namespace dash::acl_group; +using namespace dash::tag; +using namespace dash::types; + +const static vector all_protocols(boost::counting_iterator(0), boost::counting_iterator(UINT8_MAX + 1)); +const static vector all_ports = {{numeric_limits::min(), numeric_limits::max()}}; + +bool from_pb(const AclRule& data, DashAclRule& rule) +{ + rule.m_priority = data.priority(); + rule.m_action = (data.action() == ACTION_PERMIT) ? DashAclRule::Action::ALLOW : DashAclRule::Action::DENY; + rule.m_terminating = data.terminating(); + + if (data.protocol_size()) + { + rule.m_protocols.reserve(data.protocol_size()); + rule.m_protocols.assign(data.protocol().begin(), data.protocol().end()); + } + + if (!to_sai(data.src_addr(), rule.m_src_prefixes)) + { + return false; + } + + if (!to_sai(data.dst_addr(), rule.m_dst_prefixes)) + { + return false; + } + + if (data.src_tag_size()) + { + rule.m_src_tags.insert(data.src_tag().begin(), data.src_tag().end()); + } + + if (data.dst_tag_size()) + { + rule.m_dst_tags.insert(data.dst_tag().begin(), data.dst_tag().end()); + } + + if (!data.src_port_size()) + { + rule.m_src_ports = all_ports; + } + else if (!to_sai(data.src_port(), rule.m_src_ports)) + { + return false; + } + + if (!data.dst_port_size()) + { + rule.m_dst_ports = all_ports; + } + else if (!to_sai(data.dst_port(), rule.m_dst_ports)) + { + return false; + } + + return true; +} + +bool from_pb(const dash::acl_group::AclGroup &data, DashAclGroup& group) +{ + if (!to_sai(data.ip_version(), group.m_ip_version)) + { + return false; + } + + return true; +} + +sai_attr_id_t getSaiStage(DashAclDirection d, sai_ip_addr_family_t f, DashAclStage s) +{ + const static map, sai_attr_id_t> StageMaps = + { + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE1}, SAI_ENI_ATTR_INBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE2}, SAI_ENI_ATTR_INBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE3}, SAI_ENI_ATTR_INBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE4}, SAI_ENI_ATTR_INBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE5}, SAI_ENI_ATTR_INBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE1}, SAI_ENI_ATTR_INBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE2}, SAI_ENI_ATTR_INBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE3}, SAI_ENI_ATTR_INBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE4}, SAI_ENI_ATTR_INBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE5}, SAI_ENI_ATTR_INBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE1}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE2}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE3}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE4}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE5}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE1}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE2}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE3}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE4}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE5}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, + }; + + auto stage = StageMaps.find({d, f, s}); + if (stage == StageMaps.end()) + { + SWSS_LOG_ERROR("Invalid stage %d %d %d", static_cast(d), f, static_cast(s)); + throw runtime_error("Invalid stage"); + } + + return stage->second; +} + +DashAclRuleInfo::DashAclRuleInfo(const DashAclRule &rule) : + m_src_tags(rule.m_src_tags), + m_dst_tags(rule.m_dst_tags) +{ + SWSS_LOG_ENTER(); +} + +bool DashAclRuleInfo::isTagUsed(const std::string &tag_id) const +{ + return (m_src_tags.find(tag_id) != end(m_src_tags)) || (m_dst_tags.find(tag_id) != end(m_dst_tags)); +} + +DashAclGroupMgr::DashAclGroupMgr(DBConnector *db, DashOrch *dashorch, DashAclOrch *aclorch) : + m_dash_orch(dashorch), + m_dash_acl_orch(aclorch), + m_dash_acl_rules_table(new Table(db, APP_DASH_ACL_RULE_TABLE_NAME)) +{ + SWSS_LOG_ENTER(); +} + +void DashAclGroupMgr::init(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; + + for (auto& rule: group.m_dash_acl_rule_table) + { + rule.second.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; + } +} + +void DashAclGroupMgr::create(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + vector attrs; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY; + attrs.back().value.s32 = group.m_ip_version; + + auto status = sai_dash_acl_api->create_dash_acl_group(&group.m_dash_acl_group_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ACL group: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiCreateStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; + gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); +} + +task_process_status DashAclGroupMgr::create(const string& group_id, DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + if (exists(group_id)) + { + return task_failed; + } + + create(group); + + m_groups_table.emplace(group_id, group); + + SWSS_LOG_INFO("Created ACL group %s", group_id.c_str()); + + return task_success; +} + +void DashAclGroupMgr::remove(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + if (group.m_dash_acl_group_id == SAI_NULL_OBJECT_ID) + { + return; + } + + sai_status_t status = sai_dash_acl_api->remove_dash_acl_group(group.m_dash_acl_group_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ACL group: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; + gCrmOrch->decCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); + + group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; +} + +task_process_status DashAclGroupMgr::remove(const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s doesn't exist", group_id.c_str()); + return task_success; + } + + auto& group = group_it->second; + + if (!group.m_dash_acl_rule_table.empty()) + { + SWSS_LOG_ERROR("ACL group %s still has %zu rules", group_id.c_str(), group.m_dash_acl_rule_table.size()); + return task_need_retry; + } + + if (isBound(group)) + { + SWSS_LOG_ERROR("ACL group %s still has %zu references", group_id.c_str(), group.m_in_tables.size() + group.m_out_tables.size()); + return task_need_retry; + } + + remove(group); + + m_groups_table.erase(group_id); + SWSS_LOG_INFO("Removed ACL group %s", group_id.c_str()); + + return task_success; +} + +bool DashAclGroupMgr::exists(const string& group_id) const +{ + SWSS_LOG_ENTER(); + + return m_groups_table.find(group_id) != m_groups_table.end(); +} + +task_process_status DashAclGroupMgr::onUpdate(const string& group_id, const string& tag_id, const DashTag& tag) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + return task_success; + } + + auto& group = group_it->second; + if (isBound(group)) + { + // If the group is bound to at least one ENI refresh the full group to update the affected rules. + // When the group is bound to the ENI we need to make sure that the update of the affected rules will be atomic. + SWSS_LOG_INFO("Update full ACL group %s", group_id.c_str()); + + return refreshAclGroupFull(group_id); + } + + // If the group is not bound to ENI update the rule immediately. + SWSS_LOG_INFO("Update ACL group %s", group_id.c_str()); + for (auto& rule_it: group.m_dash_acl_rule_table) + { + auto& rule_id = rule_it.first; + auto& rule_info = rule_it.second; + if (rule_info.isTagUsed(tag_id)) + { + DashAclRule rule; + bool found = fetchRule(group_id, rule_id, rule); + if (!found) + { + SWSS_LOG_ERROR("Failed to fetch group %s rule %s", group_id.c_str(), rule_id.c_str()); + return task_failed; + } + removeRule(group, rule_info); + rule_info = createRule(group, rule); + } + } + + return task_success; +} + +task_process_status DashAclGroupMgr::refreshAclGroupFull(const string &group_id) +{ + SWSS_LOG_ENTER(); + + auto& group = m_groups_table[group_id]; + + DashAclGroup new_group = group; + init(new_group); + create(new_group); + + for (auto& rule_it: new_group.m_dash_acl_rule_table) + { + auto& rule_id = rule_it.first; + auto& rule_info = rule_it.second; + DashAclRule rule; + bool found = fetchRule(group_id, rule_id, rule); + if (!found) + { + SWSS_LOG_ERROR("Failed to fetch group %s rule %s", group_id.c_str(), rule_id.c_str()); + return task_failed; + } + + rule_info = createRule(new_group, rule); + } + + for (const auto& table: new_group.m_in_tables) + { + const auto& eni_id = table.first; + const auto& stages = table.second; + + const auto eni = m_dash_orch->getEni(eni_id); + ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); + + for (const auto& stage: stages) + { + bind(new_group, *eni, DashAclDirection::IN, stage); + } + } + + for (const auto& table: new_group.m_out_tables) + { + const auto& eni_id = table.first; + const auto& stages = table.second; + + const auto eni = m_dash_orch->getEni(eni_id); + ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); + + for (const auto& stage: stages) + { + bind(new_group, *eni, DashAclDirection::OUT, stage); + } + } + + removeAclGroupFull(group); + + group = new_group; + + return task_success; +} + +void DashAclGroupMgr::removeAclGroupFull(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + for (auto& rule: group.m_dash_acl_rule_table) + { + removeRule(group, rule.second); + } + + remove(group); +} + +DashAclRuleInfo DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + vector attrs; + vector src_prefixes = {}; + vector dst_prefixes = {}; + + DashAclRuleInfo rule_info = rule; + + auto any_ip = [] (const auto& g) + { + sai_ip_prefix_t ip_prefix = {}; + ip_prefix.addr_family = g.isIpV4() ? SAI_IP_ADDR_FAMILY_IPV4 : SAI_IP_ADDR_FAMILY_IPV6; + return ip_prefix; + }; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PRIORITY; + attrs.back().value.u32 = rule.m_priority; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_ACTION; + + if (rule.m_action == DashAclRule::Action::ALLOW) + { + attrs.back().value.s32 = rule.m_terminating ? + SAI_DASH_ACL_RULE_ACTION_PERMIT : SAI_DASH_ACL_RULE_ACTION_PERMIT_AND_CONTINUE; + } + else + { + attrs.back().value.s32 = rule.m_terminating ? + SAI_DASH_ACL_RULE_ACTION_DENY : SAI_DASH_ACL_RULE_ACTION_DENY_AND_CONTINUE; + } + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PROTOCOL; + + vector protocols; + if (rule.m_protocols.size()) { + protocols = rule.m_protocols; + } else { + protocols = all_protocols; + } + + attrs.back().value.u8list.count = static_cast(protocols.size()); + attrs.back().value.u8list.list = protocols.data(); + + if (!rule.m_src_prefixes.empty()) + { + src_prefixes.insert(src_prefixes.end(), + rule.m_src_prefixes.begin(), rule.m_src_prefixes.end()); + } + + if (!rule.m_dst_prefixes.empty()) + { + dst_prefixes.insert(dst_prefixes.end(), + rule.m_dst_prefixes.begin(), rule.m_dst_prefixes.end()); + } + + for (const auto &tag : rule.m_src_tags) + { + const auto& prefixes = m_dash_acl_orch->getDashAclTagMgr().getPrefixes(tag); + + src_prefixes.insert(src_prefixes.end(), + prefixes.begin(), prefixes.end()); + } + + for (const auto &tag : rule.m_dst_tags) + { + const auto& prefixes = m_dash_acl_orch->getDashAclTagMgr().getPrefixes(tag); + + dst_prefixes.insert(dst_prefixes.end(), + prefixes.begin(), prefixes.end()); + } + + if (src_prefixes.empty()) + { + src_prefixes.push_back(any_ip(group)); + } + + if (dst_prefixes.empty()) + { + dst_prefixes.push_back(any_ip(group)); + } + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SIP; + attrs.back().value.ipprefixlist.count = static_cast(src_prefixes.size()); + attrs.back().value.ipprefixlist.list = src_prefixes.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DIP; + attrs.back().value.ipprefixlist.count = static_cast(dst_prefixes.size()); + attrs.back().value.ipprefixlist.list = dst_prefixes.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SRC_PORT; + attrs.back().value.u16rangelist.count = static_cast(rule.m_src_ports.size()); + attrs.back().value.u16rangelist.list = rule.m_src_ports.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DST_PORT; + attrs.back().value.u16rangelist.count = static_cast(rule.m_dst_ports.size()); + attrs.back().value.u16rangelist.list = rule.m_dst_ports.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID; + attrs.back().value.oid = group.m_dash_acl_group_id; + + auto status = sai_dash_acl_api->create_dash_acl_rule(&rule_info.m_dash_acl_rule_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiCreateStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; + gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); + + return rule_info; +} + +task_process_status DashAclGroupMgr::createRule(const string& group_id, const string& rule_id, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s doesn't exist, waiting for group creating before creating rule %s", group_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + auto& group = group_it->second; + + auto acl_rule_it = group.m_dash_acl_rule_table.find(rule_id); + ABORT_IF_NOT(acl_rule_it == group.m_dash_acl_rule_table.end(), "Failed to create ACL rule %s. Rule already exist in ACL group %s", rule_id.c_str(), group_id.c_str()); + + for (const auto& tag_id : rule.m_src_tags) + { + if (!m_dash_acl_orch->getDashAclTagMgr().exists(tag_id)) + { + SWSS_LOG_INFO("ACL tag %s doesn't exist, waiting for tag creating before creating rule %s", tag_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + } + + for (const auto& tag_id : rule.m_dst_tags) + { + if (!m_dash_acl_orch->getDashAclTagMgr().exists(tag_id)) + { + SWSS_LOG_INFO("ACL tag %s doesn't exist, waiting for tag creating before creating rule %s", tag_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + } + + auto rule_info = createRule(group, rule); + + group.m_dash_acl_rule_table.emplace(rule_id, rule_info); + attachTags(group_id, rule.m_src_tags); + attachTags(group_id, rule.m_dst_tags); + + SWSS_LOG_INFO("Created ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); + + return task_success; +} + +task_process_status DashAclGroupMgr::updateRule(const string& group_id, const string& rule_id, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + if (ruleExists(group_id, rule_id)) + { + removeRule(group_id, rule_id); + } + + createRule(group_id, rule_id, rule); + + return task_success; +} + +void DashAclGroupMgr::removeRule(DashAclGroup& group, DashAclRuleInfo& rule) +{ + SWSS_LOG_ENTER(); + + if (rule.m_dash_acl_rule_id == SAI_NULL_OBJECT_ID) + { + return; + } + + // Remove the ACL group + auto status = sai_dash_acl_api->remove_dash_acl_rule(rule.m_dash_acl_rule_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_resource = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; + gCrmOrch->decCrmDashAclUsedCounter(crm_resource, group.m_dash_acl_group_id); + + rule.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; +} + +task_process_status DashAclGroupMgr::removeRule(const string& group_id, const string& rule_id) +{ + SWSS_LOG_ENTER(); + + if (!ruleExists(group_id, rule_id)) + { + SWSS_LOG_INFO("ACL rule %s:%s does not exists", group_id.c_str(), rule_id.c_str()); + return task_success; + } + + auto& group = m_groups_table[group_id]; + if (isBound(group)) + { + SWSS_LOG_INFO("Failed to remove dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + + auto& rule = group.m_dash_acl_rule_table[rule_id]; + + removeRule(group, rule); + + detachTags(group_id, rule.m_src_tags); + detachTags(group_id, rule.m_dst_tags); + + group.m_dash_acl_rule_table.erase(rule_id); + + SWSS_LOG_INFO("Removed ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); + + return task_success; +} + +bool DashAclGroupMgr::fetchRule(const std::string &group_id, const std::string &rule_id, DashAclRule &rule) +{ + auto key = group_id + ":" + rule_id; + vector tuples; + + bool exists = m_dash_acl_rules_table->get(key, tuples); + if (!exists) + { + SWSS_LOG_ERROR("Failed to fetch DASH ACL Rule %s", key.c_str()); + return false; + } + + AclRule pb_rule; + if (!parsePbMessage(tuples, pb_rule)) + { + SWSS_LOG_ERROR("Failed to parse PB message for DASH ACL rule"); + return false; + } + + if (!from_pb(pb_rule, rule)) + { + SWSS_LOG_ERROR("Failed to convert PB DASH ACL Rule"); + return false; + } + + return true; +} + +void DashAclGroupMgr::bind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = getSaiStage(direction, group.m_ip_version, stage); + attr.value.oid = group.m_dash_acl_group_id; + + auto status = sai_dash_eni_api->set_eni_attribute(eni.eni_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to bind ACL group to ENI: %d", status); + handleSaiSetStatus((sai_api_t)SAI_API_DASH_ENI, status); + } +} + +bool DashAclGroupMgr::ruleExists(const string& group_id, const string& rule_id) const +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + return false; + } + + return group_it->second.m_dash_acl_rule_table.find(rule_id) != group_it->second.m_dash_acl_rule_table.end(); +} + +task_process_status DashAclGroupMgr::bind(const string& group_id, const string& eni_id, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("Failed to bind ACL group %s to ENI %s. ACL group does not exist", group_id.c_str(), eni_id.c_str()); + return task_failed; + } + + auto& group = group_it->second; + + if (group.m_dash_acl_rule_table.empty()) + { + SWSS_LOG_INFO("Failed to bind ACL group %s to ENI %s. ACL group has no rules attached.", group_id.c_str(), eni_id.c_str()); + return task_failed; + } + + auto eni = m_dash_orch->getEni(eni_id); + if (!eni) + { + SWSS_LOG_INFO("eni %s cannot be found", eni_id.c_str()); + return task_need_retry; + } + + bind(group, *eni, direction, stage); + + auto& table = (direction == DashAclDirection::IN) ? group.m_in_tables : group.m_out_tables; + auto& eni_stages = table[eni_id]; + + eni_stages.insert(stage); + + SWSS_LOG_INFO("Bound ACL group %s to ENI %s", group_id.c_str(), eni_id.c_str()); + + return task_success; +} + +void DashAclGroupMgr::unbind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = getSaiStage(direction, group.m_ip_version, stage); + attr.value.oid = SAI_NULL_OBJECT_ID; + + auto status = sai_dash_eni_api->set_eni_attribute(eni.eni_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to unbind ACL group from ENI: %d", status); + handleSaiSetStatus((sai_api_t)SAI_API_DASH_ENI, status); + } +} + +task_process_status DashAclGroupMgr::unbind(const string& group_id, const string& eni_id, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s does not exist", group_id.c_str()); + return task_success; + } + + auto& group = group_it->second; + + auto eni_entry = m_dash_orch->getEni(eni_id); + if (!eni_entry) + { + SWSS_LOG_INFO("eni %s cannot be found", eni_id.c_str()); + return task_success; + } + + auto& table = (direction == DashAclDirection::IN) ? group.m_in_tables : group.m_out_tables; + auto eni_it = table.find(eni_id); + if (eni_it == table.end()) + { + SWSS_LOG_INFO("ACL group %s is not bound to ENI %s", group_id.c_str(), eni_id.c_str()); + return task_success; + } + + auto& eni_stages = eni_it->second; + if (eni_stages.find(stage) == eni_stages.end()) + { + SWSS_LOG_INFO("ACL group %s is not bound to ENI %s stage %d", group_id.c_str(), eni_id.c_str(), static_cast(stage)); + return task_success; + } + + unbind(group, *eni_entry, direction, stage); + + eni_stages.erase(stage); + if (eni_stages.empty()) + { + table.erase(eni_it); + } + + return task_success; +} + +bool DashAclGroupMgr::isBound(const string &group_id) +{ + SWSS_LOG_ENTER(); + + if (!exists(group_id)) + { + return false; + } + + return isBound(m_groups_table[group_id]); +} + +bool DashAclGroupMgr::isBound(const DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + return !group.m_in_tables.empty() || !group.m_out_tables.empty(); +} + +void DashAclGroupMgr::attachTags(const string &group_id, const unordered_set& tags) +{ + SWSS_LOG_ENTER(); + + for (const auto& tag_id : tags) + { + m_dash_acl_orch->getDashAclTagMgr().attach(tag_id, group_id); + } +} + +void DashAclGroupMgr::detachTags(const string &group_id, const unordered_set& tags) +{ + SWSS_LOG_ENTER(); + + for (const auto& tag_id : tags) + { + m_dash_acl_orch->getDashAclTagMgr().detach(tag_id, group_id); + } +} diff --git a/orchagent/dash/dashaclgroupmgr.h b/orchagent/dash/dashaclgroupmgr.h new file mode 100644 index 0000000000..229b38f1d4 --- /dev/null +++ b/orchagent/dash/dashaclgroupmgr.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include "dashorch.h" +#include "dashtagmgr.h" +#include "table.h" + +#include "dash_api/acl_group.pb.h" +#include "dash_api/acl_rule.pb.h" +#include "dash_api/acl_in.pb.h" +#include "dash_api/acl_out.pb.h" + +enum class DashAclStage +{ + STAGE1, + STAGE2, + STAGE3, + STAGE4, + STAGE5, +}; + +enum class DashAclDirection +{ + IN, + OUT, +}; + +struct DashAclRule +{ + enum class Action + { + ALLOW, + DENY, + }; + + sai_uint32_t m_priority; + Action m_action; + bool m_terminating; + std::vector m_protocols; + std::vector m_src_prefixes; + std::vector m_dst_prefixes; + std::unordered_set m_src_tags; + std::unordered_set m_dst_tags; + std::vector m_src_ports; + std::vector m_dst_ports; +}; + +struct DashAclRuleInfo +{ + sai_object_id_t m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; + + std::unordered_set m_src_tags; + std::unordered_set m_dst_tags; + + DashAclRuleInfo() = default; + DashAclRuleInfo(const DashAclRule &rule); + + bool isTagUsed(const std::string &tag_id) const; +}; + +struct DashAclGroup +{ + using EniTable = std::unordered_map>; + using RuleTable = std::unordered_map; + using RuleKeys = std::unordered_set; + sai_object_id_t m_dash_acl_group_id = SAI_NULL_OBJECT_ID; + + std::string m_guid; + sai_ip_addr_family_t m_ip_version; + RuleTable m_dash_acl_rule_table; + + EniTable m_in_tables; + EniTable m_out_tables; + + bool isIpV4() const + { + return m_ip_version == SAI_IP_ADDR_FAMILY_IPV4; + } + + bool isIpV6() const + { + return m_ip_version == SAI_IP_ADDR_FAMILY_IPV6; + } +}; + +bool from_pb(const dash::acl_rule::AclRule& data, DashAclRule& rule); +bool from_pb(const dash::acl_group::AclGroup &data, DashAclGroup& group); + +class DashAclOrch; + +class DashAclGroupMgr +{ + DashOrch *m_dash_orch; + DashAclOrch *m_dash_acl_orch; + std::unordered_map m_groups_table; + std::unique_ptr m_dash_acl_rules_table; + +public: + DashAclGroupMgr(swss::DBConnector *db, DashOrch *dashorch, DashAclOrch *aclorch); + + task_process_status create(const std::string& group_id, DashAclGroup& group); + task_process_status remove(const std::string& group_id); + bool exists(const std::string& group_id) const; + bool isBound(const std::string& group_id); + + task_process_status onUpdate(const std::string& group_id, const std::string& tag_id,const DashTag& tag); + + task_process_status createRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); + task_process_status updateRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); + task_process_status removeRule(const std::string& group_id, const std::string& rule_id); + bool ruleExists(const std::string& group_id, const std::string& rule_id) const; + + task_process_status bind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); + task_process_status unbind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); + +private: + void init(DashAclGroup& group); + void create(DashAclGroup& group); + void remove(DashAclGroup& group); + + DashAclRuleInfo createRule(DashAclGroup& group, DashAclRule& rule); + void removeRule(DashAclGroup& group, DashAclRuleInfo& rule); + bool fetchRule(const std::string &group_id, const std::string &rule_id, DashAclRule &rule); + + void bind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); + void unbind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); + bool isBound(const DashAclGroup& group); + void attachTags(const std::string &group_id, const std::unordered_set& tags); + void detachTags(const std::string &group_id, const std::unordered_set& tags); + + task_process_status refreshAclGroupFull(const std::string &group_id); + void removeAclGroupFull(DashAclGroup& group); +}; diff --git a/orchagent/dash/dashaclorch.cpp b/orchagent/dash/dashaclorch.cpp new file mode 100644 index 0000000000..98bce18826 --- /dev/null +++ b/orchagent/dash/dashaclorch.cpp @@ -0,0 +1,395 @@ +#include +#include +#include +#include + +#include + +#include "dashaclorch.h" +#include "taskworker.h" +#include "pbutils.h" +#include "crmorch.h" +#include "dashaclgroupmgr.h" +#include "dashtagmgr.h" +#include "saihelper.h" + +using namespace std; +using namespace swss; +using namespace dash::acl_in; +using namespace dash::acl_out; +using namespace dash::acl_rule; +using namespace dash::acl_group; +using namespace dash::tag; +using namespace dash::types; + +template +static bool extractVariables(const string &input, char delimiter, T &output, Args &... args) +{ + const auto tokens = swss::tokenize(input, delimiter); + try + { + swss::lexical_convert(tokens, output, args...); + return true; + } + catch(const exception& e) + { + return false; + } +} + +namespace swss { + +template<> +inline void lexical_convert(const string &buffer, DashAclStage &stage) +{ + SWSS_LOG_ENTER(); + + if (buffer == "1") + { + stage = DashAclStage::STAGE1; + } + else if (buffer == "2") + { + stage = DashAclStage::STAGE2; + } + else if (buffer == "3") + { + stage = DashAclStage::STAGE3; + } + else if (buffer == "4") + { + stage = DashAclStage::STAGE4; + } + else if (buffer == "5") + { + stage = DashAclStage::STAGE5; + } + else + { + SWSS_LOG_ERROR("Invalid stage : %s", buffer.c_str()); + throw invalid_argument("Invalid stage"); + } + +} + +} + +DashAclOrch::DashAclOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, ZmqServer *zmqServer) : + ZmqOrch(db, tables, zmqServer), + m_dash_orch(dash_orch), + m_group_mgr(db, dash_orch, this), + m_tag_mgr(this) + +{ + SWSS_LOG_ENTER(); +} + +DashAclGroupMgr& DashAclOrch::getDashAclGroupMgr() +{ + SWSS_LOG_ENTER(); + return m_group_mgr; +} + +DashTagMgr& DashAclOrch::getDashAclTagMgr() +{ + SWSS_LOG_ENTER(); + return m_tag_mgr; +} + +void DashAclOrch::doTask(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + const static TaskMap TaskMap = { + PbWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclIn, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclIn, this), + PbWorker::makeMemberTask(APP_DASH_ACL_OUT_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclOut, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_OUT_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclOut, this), + PbWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclGroup, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclGroup, this), + PbWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclRule, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclRule, this), + PbWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashPrefixTag, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashPrefixTag, this), + }; + + const string &table_name = consumer.getTableName(); + auto itr = consumer.m_toSync.begin(); + while (itr != consumer.m_toSync.end()) + { + task_process_status task_status = task_failed; + auto &message = itr->second; + const string &op = kfvOp(message); + + auto task = TaskMap.find(make_tuple(table_name, op)); + if (task != TaskMap.end()) + { + task_status = task->second->process(kfvKey(message), kfvFieldsValues(message)); + } + else + { + SWSS_LOG_ERROR( + "Unknown task : %s - %s", + table_name.c_str(), + op.c_str()); + } + + if (task_status == task_need_retry) + { + SWSS_LOG_DEBUG( + "Task %s - %s need retry", + table_name.c_str(), + op.c_str()); + ++itr; + } + else + { + if (task_status != task_success) + { + SWSS_LOG_WARN("Task %s - %s fail", + table_name.c_str(), + op.c_str()); + } + else + { + SWSS_LOG_DEBUG( + "Task %s - %s success", + table_name.c_str(), + op.c_str()); + } + + itr = consumer.m_toSync.erase(itr); + } + } +} + +task_process_status DashAclOrch::taskUpdateDashAclIn( + const string &key, + const AclIn &data) +{ + SWSS_LOG_ENTER(); + + for (const auto& gid: { data.v4_acl_group_id(), data.v6_acl_group_id() }) + { + if (gid.empty()) + { + continue; + } + auto status = bindAclToEni(DashAclDirection::IN, key, gid); + if (status != task_success) + { + return status; + } + } + + return task_success; +} + +task_process_status DashAclOrch::taskRemoveDashAclIn( + const string &key) +{ + SWSS_LOG_ENTER(); + + return unbindAclFromEni(DashAclDirection::IN, key); +} + +task_process_status DashAclOrch::taskUpdateDashAclOut( + const string &key, + const AclOut &data) +{ + SWSS_LOG_ENTER(); + + for (const auto& gid: { data.v4_acl_group_id(), data.v6_acl_group_id() }) + { + if (gid.empty()) + { + continue; + } + auto status = bindAclToEni(DashAclDirection::OUT, key, gid); + if (status != task_success) + { + return status; + } + } + + return task_success; +} + +task_process_status DashAclOrch::taskRemoveDashAclOut( + const string &key) +{ + SWSS_LOG_ENTER(); + + return unbindAclFromEni(DashAclDirection::OUT, key); +} + +task_process_status DashAclOrch::taskUpdateDashAclGroup( + const string &key, + const AclGroup &data) +{ + SWSS_LOG_ENTER(); + + if (m_group_mgr.exists(key)) + { + SWSS_LOG_WARN("Cannot update attributes of ACL group %s", key.c_str()); + return task_failed; + } + + DashAclGroup group = {}; + if (!from_pb(data, group)) + { + return task_failed; + } + + return m_group_mgr.create(key, group); +} + +task_process_status DashAclOrch::taskRemoveDashAclGroup( + const string &key) +{ + SWSS_LOG_ENTER(); + + return m_group_mgr.remove(key); +} + +task_process_status DashAclOrch::taskUpdateDashAclRule( + const string &key, + const AclRule &data) +{ + SWSS_LOG_ENTER(); + + string group_id, rule_id; + if (!extractVariables(key, ':', group_id, rule_id)) + { + SWSS_LOG_ERROR("Failed to parse key %s", key.c_str()); + return task_failed; + } + + DashAclRule rule = {}; + + if (!from_pb(data, rule)) + { + return task_failed; + } + + if (m_group_mgr.isBound(group_id)) + { + SWSS_LOG_INFO("Failed to set dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); + return task_failed; + } + + if (m_group_mgr.ruleExists(group_id, rule_id)) + { + return m_group_mgr.updateRule(group_id, rule_id, rule); + } + else + { + return m_group_mgr.createRule(group_id, rule_id, rule); + } +} + +task_process_status DashAclOrch::taskRemoveDashAclRule( + const string &key) +{ + SWSS_LOG_ENTER(); + + string group_id, rule_id; + if (!extractVariables(key, ':', group_id, rule_id)) + { + SWSS_LOG_ERROR("Failed to parse key %s", key.c_str()); + return task_failed; + } + + return m_group_mgr.removeRule(group_id, rule_id); +} + +task_process_status DashAclOrch::taskUpdateDashPrefixTag( + const std::string &tag_id, + const PrefixTag &data) +{ + SWSS_LOG_ENTER(); + + DashTag tag = {}; + + if (!from_pb(data, tag)) + { + return task_failed; + } + + if (m_tag_mgr.exists(tag_id)) + { + return m_tag_mgr.update(tag_id, tag); + } + else + { + return m_tag_mgr.create(tag_id, tag); + } +} + +task_process_status DashAclOrch::taskRemoveDashPrefixTag( + const std::string &key) +{ + SWSS_LOG_ENTER(); + + return m_tag_mgr.remove(key); +} + +task_process_status DashAclOrch::bindAclToEni(DashAclDirection direction, const std::string table_id, const std::string &acl_group_id) +{ + SWSS_LOG_ENTER(); + + string eni; + DashAclStage stage; + + if (!extractVariables(table_id, ':', eni, stage)) + { + SWSS_LOG_ERROR("Invalid key : %s", table_id.c_str()); + return task_failed; + } + + DashAclEntry table = { .m_acl_group_id = acl_group_id }; + + auto rv = m_group_mgr.bind(table.m_acl_group_id, eni, direction, stage); + if (rv != task_success) + { + return rv; + } + + DashAclTable& tables = (direction == DashAclDirection::IN) ? m_dash_acl_in_table : m_dash_acl_out_table; + tables[table_id] = table; + + return rv; +} + +task_process_status DashAclOrch::unbindAclFromEni(DashAclDirection direction, const std::string table_id) +{ + SWSS_LOG_ENTER(); + + string eni; + DashAclStage stage; + if (!extractVariables(table_id, ':', eni, stage)) + { + SWSS_LOG_ERROR("Invalid key : %s", table_id.c_str()); + return task_failed; + } + + DashAclTable& acl_table = (direction == DashAclDirection::IN) ? m_dash_acl_in_table : m_dash_acl_out_table; + + auto itr = acl_table.find(table_id); + if (itr == acl_table.end()) + { + SWSS_LOG_WARN("ACL %s doesn't exist", table_id.c_str()); + return task_success; + } + auto acl = itr->second; + + auto rv = m_group_mgr.unbind(acl.m_acl_group_id, eni, direction, stage); + if (rv != task_success) + { + return rv; + } + + acl_table.erase(itr); + + return rv; +} diff --git a/orchagent/dash/dashaclorch.h b/orchagent/dash/dashaclorch.h new file mode 100644 index 0000000000..b3859c5c2d --- /dev/null +++ b/orchagent/dash/dashaclorch.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "zmqorch.h" +#include "zmqserver.h" + +#include "dashorch.h" +#include "dashaclgroupmgr.h" +#include "dashtagmgr.h" +#include "dash_api/acl_group.pb.h" +#include "dash_api/acl_rule.pb.h" +#include "dash_api/acl_in.pb.h" +#include "dash_api/acl_out.pb.h" + +struct DashAclEntry { + std::string m_acl_group_id; +}; + +using DashAclTable = std::unordered_map; + +class DashAclOrch : public ZmqOrch +{ +public: + using TaskArgs = std::vector; + + DashAclOrch(swss::DBConnector *db, const std::vector &tables, DashOrch *dash_orch, swss::ZmqServer *zmqServer); + DashAclGroupMgr& getDashAclGroupMgr(); + DashTagMgr& getDashAclTagMgr(); + +private: + void doTask(ConsumerBase &consumer); + + task_process_status taskUpdateDashAclIn( + const std::string &key, + const dash::acl_in::AclIn &data); + task_process_status taskRemoveDashAclIn( + const std::string &key); + + task_process_status taskUpdateDashAclOut( + const std::string &key, + const dash::acl_out::AclOut &data); + task_process_status taskRemoveDashAclOut( + const std::string &key); + + task_process_status taskUpdateDashAclGroup( + const std::string &key, + const dash::acl_group::AclGroup &data); + task_process_status taskRemoveDashAclGroup( + const std::string &key); + + task_process_status taskUpdateDashAclRule( + const std::string &key, + const dash::acl_rule::AclRule &data); + task_process_status taskRemoveDashAclRule( + const std::string &key); + + task_process_status taskUpdateDashPrefixTag( + const std::string &key, + const dash::tag::PrefixTag &data); + + task_process_status taskRemoveDashPrefixTag( + const std::string &key); + + task_process_status bindAclToEni( + DashAclDirection direction, + const std::string table_id, + const std::string &acl_group_id); + task_process_status unbindAclFromEni( + DashAclDirection direction, + const std::string table_id); + + DashAclTable m_dash_acl_in_table; + DashAclTable m_dash_acl_out_table; + + DashAclGroupMgr m_group_mgr; + DashTagMgr m_tag_mgr; + + DashOrch *m_dash_orch; +}; diff --git a/orchagent/dash/dashorch.cpp b/orchagent/dash/dashorch.cpp new file mode 100644 index 0000000000..95dde9f888 --- /dev/null +++ b/orchagent/dash/dashorch.cpp @@ -0,0 +1,682 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "converter.h" +#include "dashorch.h" +#include "macaddress.h" +#include "orch.h" +#include "sai.h" +#include "saiextensions.h" +#include "swssnet.h" +#include "tokenize.h" +#include "crmorch.h" +#include "saihelper.h" + +#include "taskworker.h" +#include "pbutils.h" + +using namespace std; +using namespace swss; + +extern std::unordered_map gVnetNameToId; +extern sai_dash_vip_api_t* sai_dash_vip_api; +extern sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; +extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_object_id_t gSwitchId; +extern size_t gMaxBulkSize; +extern CrmOrch *gCrmOrch; + +DashOrch::DashOrch(DBConnector *db, vector &tableName, ZmqServer *zmqServer) : ZmqOrch(db, tableName, zmqServer) +{ + SWSS_LOG_ENTER(); +} + +bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::appliance::Appliance &entry) +{ + SWSS_LOG_ENTER(); + + if (appliance_entries_.find(appliance_id) != appliance_entries_.end()) + { + SWSS_LOG_WARN("Appliance Entry already exists for %s", appliance_id.c_str()); + return true; + } + + uint32_t attr_count = 1; + sai_vip_entry_t vip_entry; + vip_entry.switch_id = gSwitchId; + if (!to_sai(entry.sip(), vip_entry.vip)) + { + return false; + } + sai_attribute_t appliance_attr; + vector appliance_attrs; + sai_status_t status; + appliance_attr.id = SAI_VIP_ENTRY_ATTR_ACTION; + appliance_attr.value.u32 = SAI_VIP_ENTRY_ACTION_ACCEPT; + status = sai_dash_vip_api->create_vip_entry(&vip_entry, attr_count, &appliance_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create vip entry for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_VIP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + sai_direction_lookup_entry_t direction_lookup_entry; + direction_lookup_entry.switch_id = gSwitchId; + direction_lookup_entry.vni = entry.vm_vni(); + appliance_attr.id = SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION; + appliance_attr.value.u32 = SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION; + status = sai_dash_direction_lookup_api->create_direction_lookup_entry(&direction_lookup_entry, attr_count, &appliance_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create direction lookup entry for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_DIRECTION_LOOKUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + appliance_entries_[appliance_id] = entry; + SWSS_LOG_NOTICE("Created vip and direction lookup entries for %s", appliance_id.c_str()); + + return true; +} + +bool DashOrch::removeApplianceEntry(const string& appliance_id) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + dash::appliance::Appliance entry; + + if (appliance_entries_.find(appliance_id) == appliance_entries_.end()) + { + SWSS_LOG_WARN("Appliance id does not exist: %s", appliance_id.c_str()); + return true; + } + + entry = appliance_entries_[appliance_id]; + sai_vip_entry_t vip_entry; + vip_entry.switch_id = gSwitchId; + if (!to_sai(entry.sip(), vip_entry.vip)) + { + return false; + } + status = sai_dash_vip_api->remove_vip_entry(&vip_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove vip entry for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_VIP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + sai_direction_lookup_entry_t direction_lookup_entry; + direction_lookup_entry.switch_id = gSwitchId; + direction_lookup_entry.vni = entry.vm_vni(); + status = sai_dash_direction_lookup_api->remove_direction_lookup_entry(&direction_lookup_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove direction lookup entry for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_DIRECTION_LOOKUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + appliance_entries_.erase(appliance_id); + SWSS_LOG_NOTICE("Removed vip and direction lookup entries for %s", appliance_id.c_str()); + + return true; +} + +void DashOrch::doTaskApplianceTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string appliance_id = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + dash::appliance::Appliance entry; + + if (!parsePbMessage(kfvFieldsValues(t), entry)) + { + SWSS_LOG_WARN("Requires protobuff at appliance :%s", appliance_id.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addApplianceEntry(appliance_id, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeApplianceEntry(appliance_id)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +bool DashOrch::addRoutingTypeEntry(const string& routing_type, const dash::route_type::RouteType &entry) +{ + SWSS_LOG_ENTER(); + + if (routing_type_entries_.find(routing_type) != routing_type_entries_.end()) + { + SWSS_LOG_WARN("Routing type entry already exists for %s", routing_type.c_str()); + return true; + } + + routing_type_entries_[routing_type] = entry; + SWSS_LOG_NOTICE("Routing type entry added %s", routing_type.c_str()); + + return true; +} + +bool DashOrch::removeRoutingTypeEntry(const string& routing_type) +{ + SWSS_LOG_ENTER(); + + if (routing_type_entries_.find(routing_type) == routing_type_entries_.end()) + { + SWSS_LOG_WARN("Routing type entry does not exist for %s", routing_type.c_str()); + return true; + } + + routing_type_entries_.erase(routing_type); + SWSS_LOG_NOTICE("Routing type entry removed for %s", routing_type.c_str()); + + return true; +} + +void DashOrch::doTaskRoutingTypeTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string routing_type = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + dash::route_type::RouteType entry; + + if (!parsePbMessage(kfvFieldsValues(t), entry)) + { + SWSS_LOG_WARN("Requires protobuff at routing type :%s", routing_type.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addRoutingTypeEntry(routing_type, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeRoutingTypeEntry(routing_type)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +bool DashOrch::setEniAdminState(const string& eni, const EniEntry& entry) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t eni_attr; + eni_attr.id = SAI_ENI_ATTR_ADMIN_STATE; + eni_attr.value.booldata = entry.metadata.admin_state(); + + sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entries_[eni].eni_id, + &eni_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set ENI admin state for %s", eni.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + eni_entries_[eni].metadata.set_admin_state(entry.metadata.admin_state()); + SWSS_LOG_NOTICE("Set ENI %s admin state to %s", eni.c_str(), entry.metadata.admin_state() ? "UP" : "DOWN"); + + return true; +} + +bool DashOrch::addEniObject(const string& eni, EniEntry& entry) +{ + SWSS_LOG_ENTER(); + + const string &vnet = entry.metadata.vnet(); + + if (!vnet.empty() && gVnetNameToId.find(vnet) == gVnetNameToId.end()) + { + SWSS_LOG_INFO("Retry as vnet %s not found", vnet.c_str()); + return false; + } + + if (appliance_entries_.empty()) + { + SWSS_LOG_INFO("Retry as no appliance table entry found"); + return false; + } + + sai_object_id_t &eni_id = entry.eni_id; + sai_attribute_t eni_attr; + vector eni_attrs; + + eni_attr.id = SAI_ENI_ATTR_VNET_ID; + eni_attr.value.oid = gVnetNameToId[entry.metadata.vnet()]; + eni_attrs.push_back(eni_attr); + + bool has_qos = qos_entries_.find(entry.metadata.qos()) != qos_entries_.end(); + if (has_qos) + { + eni_attr.id = SAI_ENI_ATTR_PPS; + eni_attr.value.u32 = qos_entries_[entry.metadata.qos()].bw(); + eni_attrs.push_back(eni_attr); + + eni_attr.id = SAI_ENI_ATTR_CPS; + eni_attr.value.u32 = qos_entries_[entry.metadata.qos()].cps(); + eni_attrs.push_back(eni_attr); + + eni_attr.id = SAI_ENI_ATTR_FLOWS; + eni_attr.value.u32 = qos_entries_[entry.metadata.qos()].flows(); + eni_attrs.push_back(eni_attr); + } + + eni_attr.id = SAI_ENI_ATTR_ADMIN_STATE; + eni_attr.value.booldata = (entry.metadata.admin_state() == dash::eni::State::STATE_ENABLED); + eni_attrs.push_back(eni_attr); + + eni_attr.id = SAI_ENI_ATTR_VM_UNDERLAY_DIP; + if (!to_sai(entry.metadata.underlay_ip(), eni_attr.value.ipaddr)) + { + return false; + } + eni_attrs.push_back(eni_attr); + + eni_attr.id = SAI_ENI_ATTR_VM_VNI; + auto app_entry = appliance_entries_.begin()->second; + eni_attr.value.u32 = app_entry.vm_vni(); + eni_attrs.push_back(eni_attr); + + sai_status_t status = sai_dash_eni_api->create_eni(&eni_id, gSwitchId, + (uint32_t)eni_attrs.size(), eni_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ENI object for %s", eni.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI); + + SWSS_LOG_NOTICE("Created ENI object for %s", eni.c_str()); + + return true; +} + +bool DashOrch::addEniAddrMapEntry(const string& eni, const EniEntry& entry) +{ + SWSS_LOG_ENTER(); + + uint32_t attr_count = 1; + sai_eni_ether_address_map_entry_t eni_ether_address_map_entry; + eni_ether_address_map_entry.switch_id = gSwitchId; + memcpy(eni_ether_address_map_entry.address, entry.metadata.mac_address().c_str(), sizeof(sai_mac_t)); + + sai_attribute_t eni_ether_address_map_entry_attr; + eni_ether_address_map_entry_attr.id = SAI_ENI_ETHER_ADDRESS_MAP_ENTRY_ATTR_ENI_ID; + eni_ether_address_map_entry_attr.value.oid = entry.eni_id; + + sai_status_t status = sai_dash_eni_api->create_eni_ether_address_map_entry(&eni_ether_address_map_entry, attr_count, + &eni_ether_address_map_entry_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ENI ether address map entry for %s", MacAddress::to_string(reinterpret_cast(entry.metadata.mac_address().c_str())).c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP); + + SWSS_LOG_NOTICE("Created ENI ether address map entry for %s", eni.c_str()); + + return true; +} + +bool DashOrch::addEni(const string& eni, EniEntry &entry) +{ + SWSS_LOG_ENTER(); + + auto it = eni_entries_.find(eni); + if (it != eni_entries_.end() && it->second.metadata.admin_state() != entry.metadata.admin_state()) + { + return setEniAdminState(eni, entry); + } + + else if (it != eni_entries_.end()) + { + SWSS_LOG_WARN("ENI %s already exists", eni.c_str()); + return true; + } + + if (!addEniObject(eni, entry) || !addEniAddrMapEntry(eni, entry)) + { + return false; + } + eni_entries_[eni] = entry; + + return true; +} + +const EniEntry *DashOrch::getEni(const string& eni) const +{ + SWSS_LOG_ENTER(); + + auto it = eni_entries_.find(eni); + if (it == eni_entries_.end()) + { + return nullptr; + } + + return &it->second; +} + +bool DashOrch::removeEniObject(const string& eni) +{ + SWSS_LOG_ENTER(); + + EniEntry entry = eni_entries_[eni]; + sai_status_t status = sai_dash_eni_api->remove_eni(entry.eni_id); + if (status != SAI_STATUS_SUCCESS) + { + //Retry later if object is in use + if (status == SAI_STATUS_OBJECT_IN_USE) + { + return false; + } + SWSS_LOG_ERROR("Failed to remove ENI object for %s", eni.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI); + + SWSS_LOG_NOTICE("Removed ENI object for %s", eni.c_str()); + + return true; +} + +bool DashOrch::removeEniAddrMapEntry(const string& eni) +{ + SWSS_LOG_ENTER(); + + EniEntry entry = eni_entries_[eni]; + sai_eni_ether_address_map_entry_t eni_ether_address_map_entry; + eni_ether_address_map_entry.switch_id = gSwitchId; + memcpy(eni_ether_address_map_entry.address, entry.metadata.mac_address().c_str(), sizeof(sai_mac_t)); + + sai_status_t status = sai_dash_eni_api->remove_eni_ether_address_map_entry(&eni_ether_address_map_entry); + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND || status == SAI_STATUS_INVALID_PARAMETER) + { + // Entry might have already been deleted. Do not retry + return true; + } + SWSS_LOG_ERROR("Failed to remove ENI ether address map entry for %s", eni.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP); + + SWSS_LOG_NOTICE("Removed ENI ether address map entry for %s", eni.c_str()); + + return true; +} + +bool DashOrch::removeEni(const string& eni) +{ + SWSS_LOG_ENTER(); + + if (eni_entries_.find(eni) == eni_entries_.end()) + { + SWSS_LOG_WARN("ENI %s does not exist", eni.c_str()); + return true; + } + if (!removeEniAddrMapEntry(eni) || !removeEniObject(eni)) + { + return false; + } + eni_entries_.erase(eni); + + return true; +} + +void DashOrch::doTaskEniTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto t = it->second; + string eni = kfvKey(t); + string op = kfvOp(t); + if (op == SET_COMMAND) + { + EniEntry entry; + + if (!parsePbMessage(kfvFieldsValues(t), entry.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at ENI :%s", eni.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addEni(eni, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeEni(eni)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +bool DashOrch::addQosEntry(const string& qos_name, const dash::qos::Qos &entry) +{ + SWSS_LOG_ENTER(); + + if (qos_entries_.find(qos_name) != qos_entries_.end()) + { + return true; + } + + qos_entries_[qos_name] = entry; + SWSS_LOG_NOTICE("Added QOS entries for %s", qos_name.c_str()); + + return true; +} + +bool DashOrch::removeQosEntry(const string& qos_name) +{ + SWSS_LOG_ENTER(); + + if (qos_entries_.find(qos_name) == qos_entries_.end()) + { + return true; + } + qos_entries_.erase(qos_name); + SWSS_LOG_NOTICE("Removed QOS entries for %s", qos_name.c_str()); + + return true; +} + +void DashOrch::doTaskQosTable(ConsumerBase& consumer) +{ + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string qos_name = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + dash::qos::Qos entry; + + if (!parsePbMessage(kfvFieldsValues(t), entry)) + { + SWSS_LOG_WARN("Requires protobuff at QOS :%s", qos_name.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addQosEntry(qos_name, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeQosEntry(qos_name)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +void DashOrch::doTask(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + + SWSS_LOG_INFO("Table name: %s", tn.c_str()); + + if (tn == APP_DASH_APPLIANCE_TABLE_NAME) + { + doTaskApplianceTable(consumer); + } + else if (tn == APP_DASH_ROUTING_TYPE_TABLE_NAME) + { + doTaskRoutingTypeTable(consumer); + } + else if (tn == APP_DASH_ENI_TABLE_NAME) + { + doTaskEniTable(consumer); + } + else if (tn == APP_DASH_QOS_TABLE_NAME) + { + doTaskQosTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); + } +} diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h new file mode 100644 index 0000000000..eca365225c --- /dev/null +++ b/orchagent/dash/dashorch.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include "bulker.h" +#include "dbconnector.h" +#include "ipaddress.h" +#include "ipaddresses.h" +#include "ipprefix.h" +#include "macaddress.h" +#include "timer.h" +#include "zmqorch.h" +#include "zmqserver.h" + +#include "dash_api/appliance.pb.h" +#include "dash_api/route_type.pb.h" +#include "dash_api/eni.pb.h" +#include "dash_api/qos.pb.h" + +struct EniEntry +{ + sai_object_id_t eni_id; + dash::eni::Eni metadata; +}; + +typedef std::map ApplianceTable; +typedef std::map RoutingTypeTable; +typedef std::map EniTable; +typedef std::map QosTable; + +class DashOrch : public ZmqOrch +{ +public: + DashOrch(swss::DBConnector *db, std::vector &tables, swss::ZmqServer *zmqServer); + const EniEntry *getEni(const std::string &eni) const; + +private: + ApplianceTable appliance_entries_; + RoutingTypeTable routing_type_entries_; + EniTable eni_entries_; + QosTable qos_entries_; + void doTask(ConsumerBase &consumer); + void doTaskApplianceTable(ConsumerBase &consumer); + void doTaskRoutingTypeTable(ConsumerBase &consumer); + void doTaskEniTable(ConsumerBase &consumer); + void doTaskQosTable(ConsumerBase &consumer); + bool addApplianceEntry(const std::string& appliance_id, const dash::appliance::Appliance &entry); + bool removeApplianceEntry(const std::string& appliance_id); + bool addRoutingTypeEntry(const std::string& routing_type, const dash::route_type::RouteType &entry); + bool removeRoutingTypeEntry(const std::string& routing_type); + bool addEniObject(const std::string& eni, EniEntry& entry); + bool addEniAddrMapEntry(const std::string& eni, const EniEntry& entry); + bool addEni(const std::string& eni, EniEntry &entry); + bool removeEniObject(const std::string& eni); + bool removeEniAddrMapEntry(const std::string& eni); + bool removeEni(const std::string& eni); + bool setEniAdminState(const std::string& eni, const EniEntry& entry); + bool addQosEntry(const std::string& qos_name, const dash::qos::Qos &entry); + bool removeQosEntry(const std::string& qos_name); +}; diff --git a/orchagent/dash/dashrouteorch.cpp b/orchagent/dash/dashrouteorch.cpp new file mode 100644 index 0000000000..6f99435fb0 --- /dev/null +++ b/orchagent/dash/dashrouteorch.cpp @@ -0,0 +1,644 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "converter.h" +#include "dashrouteorch.h" +#include "macaddress.h" +#include "orch.h" +#include "sai.h" +#include "saiextensions.h" +#include "swssnet.h" +#include "tokenize.h" +#include "dashorch.h" +#include "crmorch.h" +#include "saihelper.h" + +#include "taskworker.h" +#include "pbutils.h" +#include "dash_api/route_type.pb.h" + +using namespace std; +using namespace swss; + +extern std::unordered_map gVnetNameToId; +extern sai_dash_outbound_routing_api_t* sai_dash_outbound_routing_api; +extern sai_dash_inbound_routing_api_t* sai_dash_inbound_routing_api; +extern sai_object_id_t gSwitchId; +extern size_t gMaxBulkSize; +extern CrmOrch *gCrmOrch; + +static std::unordered_map sOutboundAction = +{ + { dash::route_type::RoutingType::ROUTING_TYPE_VNET, SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET }, + { dash::route_type::RoutingType::ROUTING_TYPE_VNET_DIRECT, SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET_DIRECT }, + { dash::route_type::RoutingType::ROUTING_TYPE_DIRECT, SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_DIRECT }, + { dash::route_type::RoutingType::ROUTING_TYPE_DROP, SAI_OUTBOUND_ROUTING_ENTRY_ACTION_DROP } +}; + +DashRouteOrch::DashRouteOrch(DBConnector *db, vector &tableName, DashOrch *dash_orch, ZmqServer *zmqServer) : + outbound_routing_bulker_(sai_dash_outbound_routing_api, gMaxBulkSize), + inbound_routing_bulker_(sai_dash_inbound_routing_api, gMaxBulkSize), + ZmqOrch(db, tableName, zmqServer), + dash_orch_(dash_orch) +{ + SWSS_LOG_ENTER(); +} + +bool DashRouteOrch::addOutboundRouting(const string& key, OutboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (routing_entries_.find(key) != routing_entries_.end()); + if (exists) + { + SWSS_LOG_WARN("Outbound routing entry already exists for %s", key.c_str()); + return true; + } + if (!dash_orch_->getEni(ctxt.eni)) + { + SWSS_LOG_INFO("Retry as ENI entry %s not found", ctxt.eni.c_str()); + return false; + } + if (ctxt.metadata.has_vnet() && gVnetNameToId.find(ctxt.metadata.vnet()) == gVnetNameToId.end()) + { + SWSS_LOG_INFO("Retry as vnet %s not found", ctxt.metadata.vnet().c_str()); + return false; + } + + sai_outbound_routing_entry_t outbound_routing_entry; + outbound_routing_entry.switch_id = gSwitchId; + outbound_routing_entry.eni_id = dash_orch_->getEni(ctxt.eni)->eni_id; + swss::copy(outbound_routing_entry.destination, ctxt.destination); + sai_attribute_t outbound_routing_attr; + vector outbound_routing_attrs; + auto& object_statuses = ctxt.object_statuses; + + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION; + outbound_routing_attr.value.u32 = sOutboundAction[ctxt.metadata.action_type()]; + outbound_routing_attrs.push_back(outbound_routing_attr); + + if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_DIRECT) + { + // Intentional empty line, To direct action type, don't need set extra attributes + } + else if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET + && ctxt.metadata.has_vnet() + && !ctxt.metadata.vnet().empty()) + { + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID; + outbound_routing_attr.value.oid = gVnetNameToId[ctxt.metadata.vnet()]; + outbound_routing_attrs.push_back(outbound_routing_attr); + } + else if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET_DIRECT + && ctxt.metadata.has_vnet_direct() + && !ctxt.metadata.vnet_direct().vnet().empty() + && (ctxt.metadata.vnet_direct().overlay_ip().has_ipv4() || ctxt.metadata.vnet_direct().overlay_ip().has_ipv6())) + { + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID; + outbound_routing_attr.value.oid = gVnetNameToId[ctxt.metadata.vnet_direct().vnet()]; + outbound_routing_attrs.push_back(outbound_routing_attr); + + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_OVERLAY_IP; + if (!to_sai(ctxt.metadata.vnet_direct().overlay_ip(), outbound_routing_attr.value.ipaddr)) + { + return false; + } + outbound_routing_attrs.push_back(outbound_routing_attr); + } + else + { + SWSS_LOG_WARN("Attribute action for outbound routing entry %s", key.c_str()); + return false; + } + + object_statuses.emplace_back(); + outbound_routing_bulker_.create_entry(&object_statuses.back(), &outbound_routing_entry, + (uint32_t)outbound_routing_attrs.size(), outbound_routing_attrs.data()); + + return false; +} + +bool DashRouteOrch::addOutboundRoutingPost(const string& key, const OutboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + // Retry if item exists in the bulker + return false; + } + + SWSS_LOG_ERROR("Failed to create outbound routing entry for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_OUTBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + OutboundRoutingEntry entry = { dash_orch_->getEni(ctxt.eni)->eni_id, ctxt.destination, ctxt.metadata }; + routing_entries_[key] = entry; + + gCrmOrch->incCrmResUsedCounter(ctxt.destination.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING); + + SWSS_LOG_INFO("Outbound routing entry for %s added", key.c_str()); + + return true; +} + +bool DashRouteOrch::removeOutboundRouting(const string& key, OutboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (routing_entries_.find(key) != routing_entries_.end()); + if (!exists) + { + SWSS_LOG_INFO("Failed to find outbound routing entry %s to remove", key.c_str()); + return true; + } + + auto& object_statuses = ctxt.object_statuses; + OutboundRoutingEntry entry = routing_entries_[key]; + sai_outbound_routing_entry_t outbound_routing_entry; + outbound_routing_entry.switch_id = gSwitchId; + outbound_routing_entry.eni_id = entry.eni; + swss::copy(outbound_routing_entry.destination, entry.destination); + object_statuses.emplace_back(); + outbound_routing_bulker_.remove_entry(&object_statuses.back(), &outbound_routing_entry); + + return false; +} + +bool DashRouteOrch::removeOutboundRoutingPost(const string& key, const OutboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_NOT_EXECUTED) + { + // Retry if bulk operation did not execute + return false; + } + SWSS_LOG_ERROR("Failed to remove outbound routing entry for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_OUTBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(ctxt.destination.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING); + + routing_entries_.erase(key); + SWSS_LOG_INFO("Outbound routing entry for %s removed", key.c_str()); + + return true; +} + +void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + std::map, + OutboundRoutingBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const string& key = kfvKey(tuple); + auto op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto &ctxt = rc.first->second; + + if (!inserted) + { + ctxt.clear(); + } + + string& eni = ctxt.eni; + IpPrefix& destination = ctxt.destination; + + vector keys = tokenize(key, ':'); + eni = keys[0]; + string ip_str; + size_t pos = key.find(":", eni.length()); + ip_str = key.substr(pos + 1); + destination = IpPrefix(ip_str); + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at OutboundRouting :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addOutboundRouting(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeOutboundRouting(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + outbound_routing_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + KeyOpFieldsValuesTuple t = it_prev->second; + string key = kfvKey(t); + string op = kfvOp(t); + auto found = toBulk.find(make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + const auto& ctxt = found->second; + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + it_prev++; + continue; + } + + if (op == SET_COMMAND) + { + if (addOutboundRoutingPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (removeOutboundRoutingPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + } + } +} + +bool DashRouteOrch::addInboundRouting(const string& key, InboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (routing_rule_entries_.find(key) != routing_rule_entries_.end()); + if (exists) + { + SWSS_LOG_WARN("Inbound routing entry already exists for %s", key.c_str()); + return true; + } + if (!dash_orch_->getEni(ctxt.eni)) + { + SWSS_LOG_INFO("Retry as ENI entry %s not found", ctxt.eni.c_str()); + return false; + } + if (ctxt.metadata.has_vnet() && gVnetNameToId.find(ctxt.metadata.vnet()) == gVnetNameToId.end()) + { + SWSS_LOG_INFO("Retry as vnet %s not found", ctxt.metadata.vnet().c_str()); + return false; + } + + sai_inbound_routing_entry_t inbound_routing_entry; + + inbound_routing_entry.switch_id = gSwitchId; + inbound_routing_entry.eni_id = dash_orch_->getEni(ctxt.eni)->eni_id; + inbound_routing_entry.vni = ctxt.vni; + swss::copy(inbound_routing_entry.sip, ctxt.sip); + swss::copy(inbound_routing_entry.sip_mask, ctxt.sip_mask); + inbound_routing_entry.priority = ctxt.metadata.priority(); + auto& object_statuses = ctxt.object_statuses; + + sai_attribute_t inbound_routing_attr; + vector inbound_routing_attrs; + + inbound_routing_attr.id = SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION; + inbound_routing_attr.value.u32 = ctxt.metadata.pa_validation() ? SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP_PA_VALIDATE : SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP; + inbound_routing_attrs.push_back(inbound_routing_attr); + + if (ctxt.metadata.has_vnet()) + { + inbound_routing_attr.id = SAI_INBOUND_ROUTING_ENTRY_ATTR_SRC_VNET_ID; + inbound_routing_attr.value.oid = gVnetNameToId[ctxt.metadata.vnet()]; + inbound_routing_attrs.push_back(inbound_routing_attr); + } + + object_statuses.emplace_back(); + inbound_routing_bulker_.create_entry(&object_statuses.back(), &inbound_routing_entry, + (uint32_t)inbound_routing_attrs.size(), inbound_routing_attrs.data()); + + return false; +} + +bool DashRouteOrch::addInboundRoutingPost(const string& key, const InboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + // Retry if item exists in the bulker + return false; + } + + SWSS_LOG_ERROR("Failed to create inbound routing entry"); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_INBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + InboundRoutingEntry entry = { dash_orch_->getEni(ctxt.eni)->eni_id, ctxt.vni, ctxt.sip, ctxt.sip_mask, ctxt.metadata }; + routing_rule_entries_[key] = entry; + + gCrmOrch->incCrmResUsedCounter(ctxt.sip.isV4() ? CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING); + + SWSS_LOG_INFO("Inbound routing entry for %s added", key.c_str()); + + return true; +} + +bool DashRouteOrch::removeInboundRouting(const string& key, InboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (routing_rule_entries_.find(key) != routing_rule_entries_.end()); + if (!exists) + { + SWSS_LOG_INFO("Failed to find inbound routing entry %s to remove", key.c_str()); + return true; + } + + auto& object_statuses = ctxt.object_statuses; + InboundRoutingEntry entry = routing_rule_entries_[key]; + sai_inbound_routing_entry_t inbound_routing_entry; + inbound_routing_entry.switch_id = gSwitchId; + inbound_routing_entry.eni_id = entry.eni; + inbound_routing_entry.vni = entry.vni; + swss::copy(inbound_routing_entry.sip, entry.sip); + swss::copy(inbound_routing_entry.sip_mask, entry.sip_mask); + inbound_routing_entry.priority = entry.metadata.priority(); + object_statuses.emplace_back(); + inbound_routing_bulker_.remove_entry(&object_statuses.back(), &inbound_routing_entry); + + return false; +} + +bool DashRouteOrch::removeInboundRoutingPost(const string& key, const InboundRoutingBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_NOT_EXECUTED) + { + // Retry if bulk operation did not execute + return false; + } + SWSS_LOG_ERROR("Failed to remove inbound routing entry for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_INBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(ctxt.sip.isV4() ? CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING); + + routing_rule_entries_.erase(key); + SWSS_LOG_INFO("Inbound routing entry for %s removed", key.c_str()); + + return true; +} + +void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + std::map, + InboundRoutingBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const string& key = kfvKey(tuple); + auto op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto &ctxt = rc.first->second; + + if (!inserted) + { + ctxt.clear(); + } + + string& eni = ctxt.eni; + uint32_t& vni = ctxt.vni; + IpAddress& sip = ctxt.sip; + IpAddress& sip_mask = ctxt.sip_mask; + IpPrefix prefix; + + vector keys = tokenize(key, ':'); + eni = keys[0]; + vni = to_uint(keys[1]); + string ip_str; + size_t pos = key.find(":", keys[0].length() + keys[1].length() + 1); + ip_str = key.substr(pos + 1); + prefix = IpPrefix(ip_str); + + sip = prefix.getIp(); + sip_mask = prefix.getMask(); + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at InboundRouting :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addInboundRouting(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeInboundRouting(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + inbound_routing_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + KeyOpFieldsValuesTuple t = it_prev->second; + string key = kfvKey(t); + string op = kfvOp(t); + auto found = toBulk.find(make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + const auto& ctxt = found->second; + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + it_prev++; + continue; + } + + if (op == SET_COMMAND) + { + if (addInboundRoutingPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (removeInboundRoutingPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + } + } +} + +void DashRouteOrch::doTask(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + + SWSS_LOG_INFO("Table name: %s", tn.c_str()); + + if (tn == APP_DASH_ROUTE_TABLE_NAME) + { + doTaskRouteTable(consumer); + } + else if (tn == APP_DASH_ROUTE_RULE_TABLE_NAME) + { + doTaskRouteRuleTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); + } +} diff --git a/orchagent/dash/dashrouteorch.h b/orchagent/dash/dashrouteorch.h new file mode 100644 index 0000000000..d61fcaa936 --- /dev/null +++ b/orchagent/dash/dashrouteorch.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "bulker.h" +#include "dbconnector.h" +#include "ipaddress.h" +#include "ipaddresses.h" +#include "ipprefix.h" +#include "macaddress.h" +#include "timer.h" +#include "dashorch.h" +#include "zmqorch.h" +#include "zmqserver.h" + +#include "dash_api/route.pb.h" +#include "dash_api/route_rule.pb.h" + + +struct OutboundRoutingEntry +{ + sai_object_id_t eni; + swss::IpPrefix destination; + dash::route::Route metadata; +}; + +struct InboundRoutingEntry +{ + sai_object_id_t eni; + uint32_t vni; + swss::IpAddress sip; + swss::IpAddress sip_mask; + dash::route_rule::RouteRule metadata; +}; + +typedef std::map RoutingTable; +typedef std::map RoutingRuleTable; + +struct OutboundRoutingBulkContext +{ + std::string eni; + swss::IpPrefix destination; + dash::route::Route metadata; + std::deque object_statuses; + OutboundRoutingBulkContext() {} + OutboundRoutingBulkContext(const OutboundRoutingBulkContext&) = delete; + OutboundRoutingBulkContext(OutboundRoutingBulkContext&&) = delete; + + void clear() + { + object_statuses.clear(); + } +}; + +struct InboundRoutingBulkContext +{ + std::string eni; + uint32_t vni; + swss::IpAddress sip; + swss::IpAddress sip_mask; + dash::route_rule::RouteRule metadata; + std::deque object_statuses; + InboundRoutingBulkContext() {} + InboundRoutingBulkContext(const InboundRoutingBulkContext&) = delete; + InboundRoutingBulkContext(InboundRoutingBulkContext&&) = delete; + + void clear() + { + object_statuses.clear(); + } +}; + +class DashRouteOrch : public ZmqOrch +{ +public: + DashRouteOrch(swss::DBConnector *db, std::vector &tables, DashOrch *dash_orch, swss::ZmqServer *zmqServer); + +private: + RoutingTable routing_entries_; + RoutingRuleTable routing_rule_entries_; + EntityBulker outbound_routing_bulker_; + EntityBulker inbound_routing_bulker_; + DashOrch *dash_orch_; + + void doTask(ConsumerBase &consumer); + void doTaskRouteTable(ConsumerBase &consumer); + void doTaskRouteRuleTable(ConsumerBase &consumer); + bool addOutboundRouting(const std::string& key, OutboundRoutingBulkContext& ctxt); + bool addOutboundRoutingPost(const std::string& key, const OutboundRoutingBulkContext& ctxt); + bool removeOutboundRouting(const std::string& key, OutboundRoutingBulkContext& ctxt); + bool removeOutboundRoutingPost(const std::string& key, const OutboundRoutingBulkContext& ctxt); + bool addInboundRouting(const std::string& key, InboundRoutingBulkContext& ctxt); + bool addInboundRoutingPost(const std::string& key, const InboundRoutingBulkContext& ctxt); + bool removeInboundRouting(const std::string& key, InboundRoutingBulkContext& ctxt); + bool removeInboundRoutingPost(const std::string& key, const InboundRoutingBulkContext& ctxt); +}; diff --git a/orchagent/dash/dashtagmgr.cpp b/orchagent/dash/dashtagmgr.cpp new file mode 100644 index 0000000000..c83d36d987 --- /dev/null +++ b/orchagent/dash/dashtagmgr.cpp @@ -0,0 +1,154 @@ +#include "dashtagmgr.h" + +#include "dashaclorch.h" +#include "saihelper.h" + +using namespace std; +using namespace swss; + +bool from_pb(const dash::tag::PrefixTag& data, DashTag& tag) +{ + if (!to_sai(data.ip_version(), tag.m_ip_version)) + { + return false; + } + + if(!to_sai(data.prefix_list(), tag.m_prefixes)) + { + return false; + } + + return true; +} + +DashTagMgr::DashTagMgr(DashAclOrch *aclorch) : + m_dash_acl_orch(aclorch) +{ + SWSS_LOG_ENTER(); +} + +task_process_status DashTagMgr::create(const string& tag_id, const DashTag& tag) +{ + SWSS_LOG_ENTER(); + + if (exists(tag_id)) + { + return task_failed; + } + + m_tag_table.emplace(tag_id, tag); + + SWSS_LOG_INFO("Created prefix tag %s", tag_id.c_str()); + + return task_success; +} + +task_process_status DashTagMgr::update(const string& tag_id, const DashTag& new_tag) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_INFO("Updating existing prefix tag %s", tag_id.c_str()); + + auto tag_it = m_tag_table.find(tag_id); + if (tag_it == m_tag_table.end()) + { + SWSS_LOG_ERROR("Prefix tag %s does not exist ", tag_id.c_str()); + return task_failed; + } + + auto& tag = tag_it->second; + + if (tag.m_ip_version != new_tag.m_ip_version) + { + SWSS_LOG_WARN("'ip_version' changing is not supported for tag %s", tag_id.c_str()); + return task_failed; + } + + // Update tag prefixes + tag.m_prefixes = new_tag.m_prefixes; + + for (auto& group_it: tag.m_group_refcnt) + { + const auto& group_id = group_it.first; + auto handle_status = m_dash_acl_orch->getDashAclGroupMgr().onUpdate(group_id, tag_id, tag); + if (handle_status != task_success) + { + return handle_status; + } + } + + return task_success; +} + +task_process_status DashTagMgr::remove(const string& tag_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + if (tag_it == m_tag_table.end()) + { + SWSS_LOG_WARN("Prefix tag %s does not exist ", tag_id.c_str()); + return task_success; + } + + if (!tag_it->second.m_group_refcnt.empty()) + { + SWSS_LOG_WARN("Prefix tag %s is still in use by ACL rule(s)", tag_id.c_str()); + return task_need_retry; + } + + m_tag_table.erase(tag_it); + + return task_success; +} + +bool DashTagMgr::exists(const string& tag_id) const +{ + SWSS_LOG_ENTER(); + + return m_tag_table.find(tag_id) != m_tag_table.end(); +} + +const vector& DashTagMgr::getPrefixes(const string& tag_id) const +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + + return tag_it->second.m_prefixes; +} + +task_process_status DashTagMgr::attach(const string& tag_id, const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + auto& tag = tag_it->second; + + ++tag.m_group_refcnt[group_id]; + + SWSS_LOG_NOTICE("Tag %s is used by ACL group %s refcnt: %u", tag_id.c_str(), group_id.c_str(), tag.m_group_refcnt[group_id]); + return task_success; +} + +task_process_status DashTagMgr::detach(const string& tag_id, const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + auto& tag = tag_it->second; + auto group_it = tag.m_group_refcnt.find(group_id); + ABORT_IF_NOT(group_it != tag.m_group_refcnt.end(), "Group %s is not attached to the tag %s", group_id.c_str(), tag_id.c_str()); + + --group_it->second; + if (!group_it->second) + { + tag.m_group_refcnt.erase(group_it); + SWSS_LOG_NOTICE("Tag %s is no longer used by ACL group %s", tag_id.c_str(), group_id.c_str()); + } + + return task_success; +} diff --git a/orchagent/dash/dashtagmgr.h b/orchagent/dash/dashtagmgr.h new file mode 100644 index 0000000000..4b69efdaa8 --- /dev/null +++ b/orchagent/dash/dashtagmgr.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include "dashorch.h" +#include "pbutils.h" + +#include "dash_api/prefix_tag.pb.h" + +struct DashTag { + sai_ip_addr_family_t m_ip_version; + std::vector m_prefixes; + std::unordered_map m_group_refcnt; +}; + +bool from_pb(const dash::tag::PrefixTag& data, DashTag& tag); + +class DashAclOrch; + +class DashTagMgr +{ +public: + + DashTagMgr(DashAclOrch *aclorch); + + task_process_status create(const std::string& tag_id, const DashTag& tag); + task_process_status update(const std::string& tag_id, const DashTag& tag); + task_process_status remove(const std::string& tag_id); + bool exists(const std::string& tag_id) const; + + const std::vector& getPrefixes(const std::string& tag_id) const; + + task_process_status attach(const std::string& tag_id, const std::string& group_id); + task_process_status detach(const std::string& tag_id, const std::string& group_id); + +private: + DashAclOrch *m_dash_acl_orch; + std::unordered_map m_tag_table; +}; diff --git a/orchagent/dash/dashvnetorch.cpp b/orchagent/dash/dashvnetorch.cpp new file mode 100644 index 0000000000..e06f1b1e38 --- /dev/null +++ b/orchagent/dash/dashvnetorch.cpp @@ -0,0 +1,765 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "converter.h" +#include "dashvnetorch.h" +#include "ipaddress.h" +#include "macaddress.h" +#include "orch.h" +#include "sai.h" +#include "saiextensions.h" +#include "swssnet.h" +#include "tokenize.h" +#include "dashorch.h" +#include "crmorch.h" +#include "saihelper.h" + +#include "taskworker.h" +#include "pbutils.h" + +using namespace std; +using namespace swss; + +std::unordered_map gVnetNameToId; +extern sai_dash_vnet_api_t* sai_dash_vnet_api; +extern sai_dash_outbound_ca_to_pa_api_t* sai_dash_outbound_ca_to_pa_api; +extern sai_dash_pa_validation_api_t* sai_dash_pa_validation_api; +extern sai_object_id_t gSwitchId; +extern size_t gMaxBulkSize; +extern CrmOrch *gCrmOrch; + +DashVnetOrch::DashVnetOrch(DBConnector *db, vector &tables, ZmqServer *zmqServer) : + vnet_bulker_(sai_dash_vnet_api, gSwitchId, gMaxBulkSize), + outbound_ca_to_pa_bulker_(sai_dash_outbound_ca_to_pa_api, gMaxBulkSize), + pa_validation_bulker_(sai_dash_pa_validation_api, gMaxBulkSize), + ZmqOrch(db, tables, zmqServer) +{ + SWSS_LOG_ENTER(); +} + +bool DashVnetOrch::addVnet(const string& vnet_name, DashVnetBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (vnet_table_.find(vnet_name) != vnet_table_.end()); + if (exists) + { + SWSS_LOG_WARN("Vnet already exists for %s", vnet_name.c_str()); + return true; + } + + uint32_t attr_count = 1; + auto& object_ids = ctxt.object_ids; + sai_attribute_t dash_vnet_attr; + dash_vnet_attr.id = SAI_VNET_ATTR_VNI; + dash_vnet_attr.value.u32 = ctxt.metadata.vni(); + object_ids.emplace_back(); + vnet_bulker_.create_entry(&object_ids.back(), attr_count, &dash_vnet_attr); + + return false; +} + +bool DashVnetOrch::addVnetPost(const string& vnet_name, const DashVnetBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_ids = ctxt.object_ids; + if (object_ids.empty()) + { + return false; + } + + auto it_id = object_ids.begin(); + sai_object_id_t id = *it_id++; + if (id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create vnet entry for %s", vnet_name.c_str()); + return false; + } + + VnetEntry entry = { id, ctxt.metadata }; + vnet_table_[vnet_name] = entry; + gVnetNameToId[vnet_name] = id; + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_DASH_VNET); + + SWSS_LOG_INFO("Vnet entry added for %s", vnet_name.c_str()); + + return true; +} + +bool DashVnetOrch::removeVnet(const string& vnet_name, DashVnetBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (vnet_table_.find(vnet_name) != vnet_table_.end()); + if (!exists) + { + SWSS_LOG_WARN("Failed to find vnet entry %s to remove", vnet_name.c_str()); + return true; + } + + auto& object_statuses = ctxt.object_statuses; + sai_object_id_t vni; + VnetEntry entry = vnet_table_[vnet_name]; + vni = entry.vni; + object_statuses.emplace_back(); + vnet_bulker_.remove_entry(&object_statuses.back(), vni); + + return false; +} + +bool DashVnetOrch::removeVnetPost(const string& vnet_name, const DashVnetBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + // Retry later if object has non-zero reference to it + if (status == SAI_STATUS_NOT_EXECUTED) + { + return false; + } + SWSS_LOG_ERROR("Failed to remove vnet entry for %s", vnet_name.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_VNET, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_DASH_VNET); + + vnet_table_.erase(vnet_name); + gVnetNameToId.erase(vnet_name); + SWSS_LOG_INFO("Vnet entry removed for %s", vnet_name.c_str()); + + return true; +} + +void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + // Map to store vnet bulk op results + std::map, + DashVnetBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const string& key = kfvKey(tuple); + auto op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto& vnet_ctxt = rc.first->second; + + if (!inserted) + { + vnet_ctxt.clear(); + } + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), vnet_ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at Vnet :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addVnet(key, vnet_ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeVnet(key, vnet_ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Invalid command %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + vnet_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + KeyOpFieldsValuesTuple t = it_prev->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto found = toBulk.find(make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + const auto& vnet_ctxt = found->second; + const auto& object_statuses = vnet_ctxt.object_statuses; + const auto& object_ids = vnet_ctxt.object_ids; + + if (op == SET_COMMAND) + { + if (object_ids.empty()) + { + it_prev++; + continue; + } + if (addVnetPost(key, vnet_ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (object_statuses.empty()) + { + it_prev++; + continue; + } + if (removeVnetPost(key, vnet_ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + } + } +} + +void DashVnetOrch::addOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + sai_outbound_ca_to_pa_entry_t outbound_ca_to_pa_entry; + outbound_ca_to_pa_entry.dst_vnet_id = gVnetNameToId[ctxt.vnet_name]; + outbound_ca_to_pa_entry.switch_id = gSwitchId; + swss::copy(outbound_ca_to_pa_entry.dip, ctxt.dip); + auto& object_statuses = ctxt.outbound_ca_to_pa_object_statuses; + sai_attribute_t outbound_ca_to_pa_attr; + vector outbound_ca_to_pa_attrs; + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP; + to_sai(ctxt.metadata.underlay_ip(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC; + memcpy(outbound_ca_to_pa_attr.value.mac, ctxt.metadata.mac_address().c_str(), sizeof(sai_mac_t)); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_USE_DST_VNET_VNI; + outbound_ca_to_pa_attr.value.booldata = ctxt.metadata.use_dst_vni(); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + object_statuses.emplace_back(); + outbound_ca_to_pa_bulker_.create_entry(&object_statuses.back(), &outbound_ca_to_pa_entry, + (uint32_t)outbound_ca_to_pa_attrs.size(), outbound_ca_to_pa_attrs.data()); +} + +void DashVnetOrch::addPaValidation(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + auto& object_statuses = ctxt.pa_validation_object_statuses; + string underlay_ip_str = to_string(ctxt.metadata.underlay_ip()); + string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip_str; + auto it = pa_refcount_table_.find(pa_ref_key); + if (it != pa_refcount_table_.end()) + { + /* + * PA validation entry already exisits. Just increment refcount and add + * a dummy success status to satisfy postop + */ + object_statuses.emplace_back(SAI_STATUS_SUCCESS); + pa_refcount_table_[pa_ref_key]++; + SWSS_LOG_INFO("Increment PA refcount to %u for PA IP %s", + pa_refcount_table_[pa_ref_key], + underlay_ip_str.c_str()); + return; + } + + uint32_t attr_count = 1; + sai_pa_validation_entry_t pa_validation_entry; + pa_validation_entry.vnet_id = gVnetNameToId[ctxt.vnet_name]; + pa_validation_entry.switch_id = gSwitchId; + to_sai(ctxt.metadata.underlay_ip(), pa_validation_entry.sip); + sai_attribute_t pa_validation_attr; + + pa_validation_attr.id = SAI_PA_VALIDATION_ENTRY_ATTR_ACTION; + pa_validation_attr.value.u32 = SAI_PA_VALIDATION_ENTRY_ACTION_PERMIT; + + object_statuses.emplace_back(); + pa_validation_bulker_.create_entry(&object_statuses.back(), &pa_validation_entry, + attr_count, &pa_validation_attr); + pa_refcount_table_[pa_ref_key] = 1; + SWSS_LOG_INFO("Initialize PA refcount to 1 for PA IP %s", + underlay_ip_str.c_str()); +} + +bool DashVnetOrch::addVnetMap(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (vnet_map_table_.find(key) != vnet_map_table_.end()); + if (!exists) + { + bool vnet_exists = (gVnetNameToId.find(ctxt.vnet_name) != gVnetNameToId.end()); + if (vnet_exists) + { + addOutboundCaToPa(key, ctxt); + addPaValidation(key, ctxt); + } + else + { + SWSS_LOG_INFO("Not creating VNET map for %s since VNET %s doesn't exist", key.c_str(), ctxt.vnet_name.c_str()); + } + return false; + } + /* + * If the VNET map is already added, don't add it to the bulker and + * return true so it's removed from the consumer + */ + return true; +} + +bool DashVnetOrch::addOutboundCaToPaPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.outbound_ca_to_pa_object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + // Retry if item exists in the bulker + return false; + } + + SWSS_LOG_ERROR("Failed to create CA to PA entry for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_OUTBOUND_CA_TO_PA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->incCrmResUsedCounter(ctxt.dip.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA); + + SWSS_LOG_INFO("Outbound CA to PA map entry for %s added", key.c_str()); + + return true; +} + +bool DashVnetOrch::addPaValidationPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.pa_validation_object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + string underlay_ip_str = to_string(ctxt.metadata.underlay_ip()); + string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip_str; + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + /* PA validation entry add failed. Remove PA refcount entry */ + pa_refcount_table_.erase(pa_ref_key); + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + // Retry if item exists in the bulker + return false; + } + + SWSS_LOG_ERROR("Failed to create PA validation entry for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_PA_VALIDATION, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->incCrmResUsedCounter(ctxt.metadata.underlay_ip().has_ipv4() ? CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION : CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION); + + SWSS_LOG_INFO("PA validation entry for %s added", key.c_str()); + + return true; +} + +bool DashVnetOrch::addVnetMapPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool status = addOutboundCaToPaPost(key, ctxt) && addPaValidationPost(key, ctxt); + if (!status) + { + SWSS_LOG_ERROR("addVnetMapPost failed for %s ", key.c_str()); + return false; + } + + string vnet_name = ctxt.vnet_name; + VnetMapEntry entry = { gVnetNameToId[vnet_name], ctxt.dip, ctxt.metadata }; + vnet_map_table_[key] = entry; + SWSS_LOG_INFO("Vnet map added for %s", key.c_str()); + + return true; +} + +void DashVnetOrch::removeOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + auto& object_statuses = ctxt.outbound_ca_to_pa_object_statuses; + sai_outbound_ca_to_pa_entry_t outbound_ca_to_pa_entry; + outbound_ca_to_pa_entry.dst_vnet_id = vnet_map_table_[key].dst_vnet_id; + outbound_ca_to_pa_entry.switch_id = gSwitchId; + swss::copy(outbound_ca_to_pa_entry.dip, vnet_map_table_[key].dip); + + object_statuses.emplace_back(); + outbound_ca_to_pa_bulker_.remove_entry(&object_statuses.back(), &outbound_ca_to_pa_entry); +} + +void DashVnetOrch::removePaValidation(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + auto& object_statuses = ctxt.pa_validation_object_statuses; + string underlay_ip = to_string(vnet_map_table_[key].metadata.underlay_ip()); + string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip; + auto it = pa_refcount_table_.find(pa_ref_key); + if (it == pa_refcount_table_.end()) + { + return; + } + else + { + if (--pa_refcount_table_[pa_ref_key] > 0) + { + /* + * PA validation entry already exisits. Just decrement refcount and add + * a dummy success status to satisfy postop + */ + object_statuses.emplace_back(SAI_STATUS_SUCCESS); + SWSS_LOG_INFO("Decrement PA refcount to %u for PA IP %s", + pa_refcount_table_[pa_ref_key], + underlay_ip.c_str()); + return; + } + else + { + sai_pa_validation_entry_t pa_validation_entry; + pa_validation_entry.vnet_id = vnet_map_table_[key].dst_vnet_id; + pa_validation_entry.switch_id = gSwitchId; + to_sai(vnet_map_table_[key].metadata.underlay_ip(), pa_validation_entry.sip); + + object_statuses.emplace_back(); + pa_validation_bulker_.remove_entry(&object_statuses.back(), &pa_validation_entry); + SWSS_LOG_INFO("PA refcount refcount is zero for PA IP %s, removing refcount table entry", + underlay_ip.c_str()); + pa_refcount_table_.erase(pa_ref_key); + } + } +} + +bool DashVnetOrch::removeVnetMap(const string& key, VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (vnet_map_table_.find(key) != vnet_map_table_.end()); + if (!exists) + { + SWSS_LOG_INFO("Failed to find vnet mapping %s to remove", key.c_str()); + return true; + } + + removePaValidation(key, ctxt); + removeOutboundCaToPa(key, ctxt); + + return false; +} + +bool DashVnetOrch::removeOutboundCaToPaPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.outbound_ca_to_pa_object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + // Retry later if object has non-zero reference to it + if (status == SAI_STATUS_NOT_EXECUTED) + { + return false; + } + + SWSS_LOG_ERROR("Failed to remove outbound routing entry for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_OUTBOUND_CA_TO_PA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(vnet_map_table_[key].dip.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA); + + SWSS_LOG_INFO("Outbound CA to PA map entry for %s removed", key.c_str()); + + return true; +} + +bool DashVnetOrch::removePaValidationPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + string underlay_ip = to_string(vnet_map_table_[key].metadata.underlay_ip()); + string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip; + const auto& object_statuses = ctxt.pa_validation_object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + // Retry later if object has non-zero reference to it + if (status == SAI_STATUS_NOT_EXECUTED) + { + return false; + } + + SWSS_LOG_ERROR("Failed to remove PA validation entry for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_PA_VALIDATION, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(vnet_map_table_[key].metadata.underlay_ip().has_ipv4() ? CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION : CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION); + + SWSS_LOG_INFO("PA validation entry for %s removed", key.c_str()); + + return true; +} + +bool DashVnetOrch::removeVnetMapPost(const string& key, const VnetMapBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool status = removeOutboundCaToPaPost(key, ctxt) && removePaValidationPost(key, ctxt); + if (!status) + { + return false; + } + vnet_map_table_.erase(key); + SWSS_LOG_INFO("Vnet map removed for %s", key.c_str()); + + return true; +} + +void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + std::map, + VnetMapBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const string& key = kfvKey(tuple); + auto op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto& ctxt = rc.first->second; + + if (!inserted) + { + ctxt.clear(); + } + + string& vnet_name = ctxt.vnet_name; + IpAddress& dip = ctxt.dip; + + vector keys = tokenize(key, ':'); + vnet_name = keys[0]; + size_t pos = key.find(":", vnet_name.length()); + string ip_str = key.substr(pos + 1); + dip = IpAddress(ip_str); + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at VnetMap :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addVnetMap(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeVnetMap(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Invalid command %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + outbound_ca_to_pa_bulker_.flush(); + pa_validation_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + KeyOpFieldsValuesTuple t = it_prev->second; + string key = kfvKey(t); + string op = kfvOp(t); + auto found = toBulk.find(make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + const auto& ctxt = found->second; + const auto& outbound_ca_to_pa_object_statuses = ctxt.outbound_ca_to_pa_object_statuses; + const auto& pa_validation_object_statuses = ctxt.pa_validation_object_statuses; + if (outbound_ca_to_pa_object_statuses.empty() || pa_validation_object_statuses.empty()) + { + it_prev++; + continue; + } + + if (op == SET_COMMAND) + { + if (addVnetMapPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (removeVnetMapPost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + } + } +} + +void DashVnetOrch::doTask(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + + SWSS_LOG_INFO("Table name: %s", tn.c_str()); + + if (tn == APP_DASH_VNET_TABLE_NAME) + { + doTaskVnetTable(consumer); + } + else if (tn == APP_DASH_VNET_MAPPING_TABLE_NAME) + { + doTaskVnetMapTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); + } +} diff --git a/orchagent/dash/dashvnetorch.h b/orchagent/dash/dashvnetorch.h new file mode 100644 index 0000000000..3a5a06fd98 --- /dev/null +++ b/orchagent/dash/dashvnetorch.h @@ -0,0 +1,106 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "bulker.h" +#include "dbconnector.h" +#include "ipaddress.h" +#include "ipaddresses.h" +#include "macaddress.h" +#include "timer.h" +#include "zmqorch.h" +#include "zmqserver.h" + +#include "dash_api/vnet.pb.h" +#include "dash_api/vnet_mapping.pb.h" + +struct VnetEntry +{ + sai_object_id_t vni; + dash::vnet::Vnet metadata; +}; + +struct VnetMapEntry +{ + sai_object_id_t dst_vnet_id; + swss::IpAddress dip; + dash::vnet_mapping::VnetMapping metadata; +}; + +typedef std::unordered_map DashVnetTable; +typedef std::unordered_map DashVnetMapTable; +typedef std::unordered_map PaRefCountTable; + +struct DashVnetBulkContext +{ + std::string vnet_name; + dash::vnet::Vnet metadata; + std::deque object_ids; + std::deque object_statuses; + DashVnetBulkContext() {} + + DashVnetBulkContext(const DashVnetBulkContext&) = delete; + DashVnetBulkContext(DashVnetBulkContext&&) = delete; + + void clear() + { + object_ids.clear(); + object_statuses.clear(); + } +}; + +struct VnetMapBulkContext +{ + std::string vnet_name; + swss::IpAddress dip; + dash::vnet_mapping::VnetMapping metadata; + std::deque outbound_ca_to_pa_object_statuses; + std::deque pa_validation_object_statuses; + VnetMapBulkContext() {} + + VnetMapBulkContext(const VnetMapBulkContext&) = delete; + VnetMapBulkContext(VnetMapBulkContext&&) = delete; + + void clear() + { + outbound_ca_to_pa_object_statuses.clear(); + pa_validation_object_statuses.clear(); + } +}; + +class DashVnetOrch : public ZmqOrch +{ +public: + DashVnetOrch(swss::DBConnector *db, std::vector &tables, swss::ZmqServer *zmqServer); + +private: + DashVnetTable vnet_table_; + DashVnetMapTable vnet_map_table_; + PaRefCountTable pa_refcount_table_; + ObjectBulker vnet_bulker_; + EntityBulker outbound_ca_to_pa_bulker_; + EntityBulker pa_validation_bulker_; + + void doTask(ConsumerBase &consumer); + void doTaskVnetTable(ConsumerBase &consumer); + void doTaskVnetMapTable(ConsumerBase &consumer); + bool addVnet(const std::string& key, DashVnetBulkContext& ctxt); + bool addVnetPost(const std::string& key, const DashVnetBulkContext& ctxt); + bool removeVnet(const std::string& key, DashVnetBulkContext& ctxt); + bool removeVnetPost(const std::string& key, const DashVnetBulkContext& ctxt); + void addOutboundCaToPa(const std::string& key, VnetMapBulkContext& ctxt); + bool addOutboundCaToPaPost(const std::string& key, const VnetMapBulkContext& ctxt); + void removeOutboundCaToPa(const std::string& key, VnetMapBulkContext& ctxt); + bool removeOutboundCaToPaPost(const std::string& key, const VnetMapBulkContext& ctxt); + void addPaValidation(const std::string& key, VnetMapBulkContext& ctxt); + bool addPaValidationPost(const std::string& key, const VnetMapBulkContext& ctxt); + void removePaValidation(const std::string& key, VnetMapBulkContext& ctxt); + bool removePaValidationPost(const std::string& key, const VnetMapBulkContext& ctxt); + bool addVnetMap(const std::string& key, VnetMapBulkContext& ctxt); + bool addVnetMapPost(const std::string& key, const VnetMapBulkContext& ctxt); + bool removeVnetMap(const std::string& key, VnetMapBulkContext& ctxt); + bool removeVnetMapPost(const std::string& key, const VnetMapBulkContext& ctxt); +}; diff --git a/orchagent/dash/pbutils.cpp b/orchagent/dash/pbutils.cpp new file mode 100644 index 0000000000..e8cd98f9e8 --- /dev/null +++ b/orchagent/dash/pbutils.cpp @@ -0,0 +1,122 @@ +#include "pbutils.h" + + +using namespace std; +using namespace swss; +using namespace google::protobuf; + +bool to_sai(const dash::types::IpVersion &pb_version, sai_ip_addr_family_t &sai_ip_family) +{ + switch (pb_version) + { + case dash::types::IP_VERSION_IPV4: + sai_ip_family = SAI_IP_ADDR_FAMILY_IPV4; + break; + case dash::types::IP_VERSION_IPV6: + sai_ip_family = SAI_IP_ADDR_FAMILY_IPV6; + break; + default: + return false; + } + + return true; +} + +bool to_sai(const dash::types::IpAddress &pb_address, sai_ip_address_t &sai_address) +{ + SWSS_LOG_ENTER(); + + if (pb_address.has_ipv4()) + { + sai_address.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + sai_address.addr.ip4 = pb_address.ipv4(); + } + else if (pb_address.has_ipv6()) + { + sai_address.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(sai_address.addr.ip6, pb_address.ipv6().c_str(), sizeof(sai_address.addr.ip6)); + } + else + { + SWSS_LOG_WARN("The protobuf IP address %s is invalid", pb_address.DebugString().c_str()); + return false; + } + + return true; +} + +bool to_sai(const dash::types::IpPrefix &pb_prefix, sai_ip_prefix_t &sai_prefix) +{ + SWSS_LOG_ENTER(); + + if (pb_prefix.ip().has_ipv4() && pb_prefix.mask().has_ipv4()) + { + sai_prefix.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + sai_prefix.addr.ip4 = pb_prefix.ip().ipv4(); + sai_prefix.mask.ip4 = pb_prefix.mask().ipv4(); + } + else if (pb_prefix.ip().has_ipv6() && pb_prefix.mask().has_ipv6()) + { + sai_prefix.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(sai_prefix.addr.ip6, pb_prefix.ip().ipv6().c_str(), sizeof(sai_prefix.addr.ip6)); + memcpy(sai_prefix.mask.ip6, pb_prefix.mask().ipv6().c_str(), sizeof(sai_prefix.mask.ip6)); + } + else + { + SWSS_LOG_WARN("The protobuf IP prefix %s is invalid", pb_prefix.DebugString().c_str()); + return false; + } + + return true; +} + +bool to_sai(const RepeatedPtrField &pb_prefixes, vector &sai_prefixes) +{ + SWSS_LOG_ENTER(); + + sai_prefixes.clear(); + sai_prefixes.reserve(pb_prefixes.size()); + + for (auto &pb_prefix : pb_prefixes) + { + sai_ip_prefix_t sai_prefix; + if (!to_sai(pb_prefix, sai_prefix)) + { + sai_prefixes.clear(); + return false; + } + sai_prefixes.push_back(sai_prefix); + } + + return true; +} + +ip_addr_t to_swss(const dash::types::IpAddress &pb_address) +{ + SWSS_LOG_ENTER(); + + ip_addr_t ip_address; + if (pb_address.has_ipv4()) + { + ip_address.family = AF_INET; + ip_address.ip_addr.ipv4_addr = pb_address.ipv4(); + } + else if (pb_address.has_ipv6()) + { + ip_address.family = AF_INET6; + memcpy(ip_address.ip_addr.ipv6_addr, pb_address.ipv6().c_str(), sizeof(ip_address.ip_addr.ipv6_addr)); + } + else + { + SWSS_LOG_THROW("The protobuf IP address %s is invalid", pb_address.DebugString().c_str()); + } + + return ip_address; +} + +std::string to_string(const dash::types::IpAddress &pb_address) +{ + SWSS_LOG_ENTER(); + + return IpAddress(to_swss(pb_address)).to_string(); +} diff --git a/orchagent/dash/pbutils.h b/orchagent/dash/pbutils.h new file mode 100644 index 0000000000..080cac4666 --- /dev/null +++ b/orchagent/dash/pbutils.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include "dash_api/types.pb.h" + +bool to_sai(const dash::types::IpVersion &pb_version, sai_ip_addr_family_t &sai_ip_family); + +bool to_sai(const dash::types::IpAddress &pb_address, sai_ip_address_t &sai_address); + +bool to_sai(const dash::types::IpPrefix &pb_prefix, sai_ip_prefix_t &sai_prefix); + +bool to_sai(const google::protobuf::RepeatedPtrField &pb_prefixes, std::vector &sai_prefixes); + +template +bool to_sai(const dash::types::ValueOrRange &pb_range, RangeType &sai_range) +{ + SWSS_LOG_ENTER(); + + using range_type = typename std::conditional::value, uint32_t, + typename std::conditional::value, int32_t, + typename std::conditional::value, uint16_t, + void>::type>::type>::type; + + if (pb_range.has_range()) + { + if (pb_range.range().min() > pb_range.range().max() || pb_range.range().min() < std::numeric_limits::min() || pb_range.range().max() > std::numeric_limits::max()) + { + SWSS_LOG_WARN("The range %s is invalid", pb_range.range().DebugString().c_str()); + return false; + } + sai_range.min = static_cast(pb_range.range().min()); + sai_range.max = static_cast(pb_range.range().max()); + } + else + { + if (pb_range.value() < std::numeric_limits::min() || pb_range.value() > std::numeric_limits::max()) + { + SWSS_LOG_WARN("The value %s is invalid", std::to_string(pb_range.value()).c_str()); + return false; + } + sai_range.min = static_cast(pb_range.value()); + sai_range.max = static_cast(pb_range.value()); + } + + return true; +} + +template +bool to_sai(const google::protobuf::RepeatedPtrField &pb_ranges, std::vector &sai_ranges) +{ + SWSS_LOG_ENTER(); + + sai_ranges.clear(); + sai_ranges.reserve(pb_ranges.size()); + + for (auto &pb_range: pb_ranges) + { + RangeType sai_range; + if (!to_sai(pb_range, sai_range)) + { + sai_ranges.clear(); + return false; + } + sai_ranges.push_back(sai_range); + } + + return true; +} + +swss::ip_addr_t to_swss(const dash::types::IpAddress &pb_address); + +std::string to_string(const dash::types::IpAddress &pb_address); diff --git a/orchagent/dash/taskworker.h b/orchagent/dash/taskworker.h new file mode 100644 index 0000000000..03a3e4ff74 --- /dev/null +++ b/orchagent/dash/taskworker.h @@ -0,0 +1,133 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include + +class TaskWorker +{ +public: + virtual task_process_status process( + const std::string &key, + const std::vector &data) = 0; +}; + +using TaskKey = std::tuple; +using TaskFunc = std::shared_ptr; +using TaskMap = std::map; + +#define PbIdentifier "pb" + +template +bool parsePbMessage( + const std::vector &data, + MessageType &msg) +{ + SWSS_LOG_ENTER(); + + auto pb = swss::fvsGetValue(data, PbIdentifier); + if (pb) + { + if (msg.ParseFromString(*pb)) + { + return true; + } + else + { + SWSS_LOG_WARN("Failed to parse protobuf message from string: %s", pb->c_str()); + } + } + else + { + SWSS_LOG_WARN("Protobuf field cannot be found"); + } + + return false; +} + +template +class PbWorker : public TaskWorker +{ +public: + using Task = std::function; + + PbWorker(const Task &func) : m_func(func) {} + + virtual task_process_status process( + const std::string &key, + const std::vector &data) + { + SWSS_LOG_ENTER(); + + MessageType msg; + if (parsePbMessage(data, msg)) + { + return m_func(key, msg); + } + else + { + SWSS_LOG_WARN("This orch requires protobuff message at :%s", key.c_str()); + } + + return task_process_status::task_invalid_entry; + } + + template + static TaskMap::value_type makeMemberTask( + const std::string &table, + const std::string &op, + MemberFunc func, + ObjType *obj) + { + return std::make_pair( + std::make_tuple(table, op), + std::make_shared >( + std::bind(func, obj, std::placeholders::_1, std::placeholders::_2))); + } + +private: + Task m_func; +}; + +class KeyOnlyWorker : public TaskWorker +{ +public: + using Task = std::function; + + KeyOnlyWorker(const Task &func) : m_func(func) {} + + virtual task_process_status process( + const std::string &key, + const std::vector &data) + { + SWSS_LOG_ENTER(); + + return m_func(key); + } + + template + static TaskMap::value_type makeMemberTask( + const std::string &table, + const std::string &op, + MemberFunc func, + ObjType *obj) + { + return std::make_pair( + std::make_tuple(table, op), + std::make_shared( + std::bind(func, obj, std::placeholders::_1))); + } + +private: + Task m_func; +}; diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 1adb84ec08..b47f61a635 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -9,19 +9,35 @@ #include "schema.h" #include "sai_serialize.h" #include "timer.h" +#include "saihelper.h" +#include "converter.h" +#include "stringutility.h" #define FABRIC_POLLING_INTERVAL_DEFAULT (30) +#define FABRIC_PORT_PREFIX "PORT" #define FABRIC_PORT_ERROR 0 #define FABRIC_PORT_SUCCESS 1 #define FABRIC_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_PORT_STAT_COUNTER" #define FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 #define FABRIC_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_QUEUE_STAT_COUNTER" #define FABRIC_QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 100000 -#define FABRIC_PORT_TABLE "FABRIC_PORT_TABLE" +#define FABRIC_DEBUG_POLLING_INTERVAL_DEFAULT (60) + +// constants for link monitoring +#define MAX_SKIP_CRCERR_ON_LNKUP_POLLS 20 +#define MAX_SKIP_FECERR_ON_LNKUP_POLLS 20 +// the follow constants will be replaced with the number in config_db +#define FEC_ISOLATE_POLLS 2 +#define FEC_UNISOLATE_POLLS 8 +#define ISOLATION_POLLS_CFG 1 +#define RECOVERY_POLLS_CFG 8 +#define ERROR_RATE_CRC_CELLS_CFG 1 +#define ERROR_RATE_RX_CELLS_CFG 61035156 extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_port_api_t *sai_port_api; +extern sai_queue_api_t *sai_queue_api; const vector port_stat_ids = { @@ -42,33 +58,45 @@ static const vector queue_stat_ids = SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL, }; -FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector &tableNames) : +FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector &tableNames, + bool fabricPortStatEnabled, bool fabricQueueStatEnabled) : Orch(appl_db, tableNames), port_stat_manager(FABRIC_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), queue_stat_manager(FABRIC_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, FABRIC_QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), - m_timer(new SelectableTimer(timespec { .tv_sec = FABRIC_POLLING_INTERVAL_DEFAULT, .tv_nsec = 0 })) + m_timer(new SelectableTimer(timespec { .tv_sec = FABRIC_POLLING_INTERVAL_DEFAULT, .tv_nsec = 0 })), + m_debugTimer(new SelectableTimer(timespec { .tv_sec = FABRIC_DEBUG_POLLING_INTERVAL_DEFAULT, .tv_nsec = 0 })) { SWSS_LOG_ENTER(); SWSS_LOG_NOTICE( "FabricPortsOrch constructor" ); m_state_db = shared_ptr(new DBConnector("STATE_DB", 0)); - m_stateTable = unique_ptr(new Table(m_state_db.get(), FABRIC_PORT_TABLE)); + m_stateTable = unique_ptr
(new Table(m_state_db.get(), APP_FABRIC_PORT_TABLE_NAME)); m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); - m_laneQueueCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_NAME_MAP)); - m_lanePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_PORT_MAP)); + m_portNameQueueCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_QUEUE_NAME_MAP)); + m_portNamePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_PORT_NAME_MAP)); + m_fabricCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_TABLE)); m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); - m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), FABRIC_PORT_TABLE)); + m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), APP_FABRIC_PORT_TABLE_NAME)); + m_appl_db = shared_ptr(new DBConnector("APPL_DB", 0)); + m_applTable = unique_ptr
(new Table(m_appl_db.get(), APP_FABRIC_MONITOR_PORT_TABLE_NAME)); + + m_fabricPortStatEnabled = fabricPortStatEnabled; + m_fabricQueueStatEnabled = fabricQueueStatEnabled; getFabricPortList(); auto executor = new ExecutableTimer(m_timer, this, "FABRIC_POLL"); Orch::addExecutor(executor); m_timer->start(); + + auto debug_executor = new ExecutableTimer(m_debugTimer, this, "FABRIC_DEBUG_POLL"); + Orch::addExecutor(debug_executor); + m_debugTimer->start(); } int FabricPortsOrch::getFabricPortList() @@ -135,8 +163,6 @@ int FabricPortsOrch::getFabricPortList() m_getFabricPortListDone = true; - updateFabricPortState(); - return FABRIC_PORT_SUCCESS; } @@ -147,32 +173,96 @@ bool FabricPortsOrch::allPortsReady() void FabricPortsOrch::generatePortStats() { - // FIX_ME: This function installs flex counters for port stats - // on fabric ports for fabric asics and voq asics (that connect - // to fabric asics via fabric ports). These counters will be - // installed in FLEX_COUNTER_DB, and queried by syncd and updated - // to COUNTERS_DB. - // However, currently BCM SAI doesn't update its code to query - // port stats (metrics in list port_stat_ids) yet. - // Also, BCM sets too low value for "Max logical port count" (256), - // causing syncd to crash on voq asics that now include regular front - // panel ports, fabric ports, and multiple logical ports. - // So, this function will just do nothing for now, and we will readd - // code to install port stats counters when BCM completely supports. + if (!m_fabricPortStatEnabled) return; + + SWSS_LOG_NOTICE("Generate fabric port stats"); + + vector portNamePortCounterMap; + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + std::ostringstream portName; + portName << FABRIC_PORT_PREFIX << lane; + portNamePortCounterMap.emplace_back(portName.str(), sai_serialize_object_id(port)); + + // Install flex counters for port stats + std::unordered_set counter_stats; + for (const auto& it: port_stat_ids) + { + counter_stats.emplace(sai_serialize_port_stat(it)); + } + port_stat_manager.setCounterIdList(port, CounterType::PORT, counter_stats); + } + m_portNamePortCounterTable->set("", portNamePortCounterMap); } void FabricPortsOrch::generateQueueStats() { + if (!m_fabricQueueStatEnabled) return; if (m_isQueueStatsGenerated) return; if (!m_getFabricPortListDone) return; - // FIX_ME: Similar to generatePortStats(), generateQueueStats() installs - // flex counters for queue stats on fabric ports for fabric asics and voq asics. - // However, currently BCM SAI doesn't fully support queue stats query. - // Query on queue type and index is not supported for fabric asics while - // voq asics are not completely supported. - // So, this function will just do nothing for now, and we will readd - // code to install queue stats counters when BCM completely supports. + SWSS_LOG_NOTICE("Generate queue map for fabric ports"); + + sai_status_t status; + sai_attribute_t attr; + + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + // Each serdes has some pipes (queues) for unicast and multicast. + // But normally fabric serdes uses only one pipe. + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES; + status = sai_port_api->get_port_attribute(port, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + throw runtime_error("FabricPortsOrch get port queue number failure"); + } + int num_queues = attr.value.u32; + + if (num_queues > 0) + { + vector m_queue_ids; + m_queue_ids.resize(num_queues); + + attr.id = SAI_PORT_ATTR_QOS_QUEUE_LIST; + attr.value.objlist.count = (uint32_t) num_queues; + attr.value.objlist.list = m_queue_ids.data(); + + status = sai_port_api->get_port_attribute(port, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + throw runtime_error("FabricPortsOrch get port queue list failure"); + } + + // Maintain queue map and install flex counters for queue stats + vector portNameQueueMap; + + // Fabric serdes queue type is SAI_QUEUE_TYPE_FABRIC_TX. Since we always + // maintain only one queue for fabric serdes, m_queue_ids size is 1. + // And so, there is no need to query SAI_QUEUE_ATTR_TYPE and SAI_QUEUE_ATTR_INDEX + // for queue. Actually, SAI does not support query these attributes on fabric serdes. + int queueIndex = 0; + std::ostringstream portName; + portName << FABRIC_PORT_PREFIX << lane << ":" << queueIndex; + const auto queue = sai_serialize_object_id(m_queue_ids[queueIndex]); + portNameQueueMap.emplace_back(portName.str(), queue); + + // We collect queue counters like occupancy level + std::unordered_set counter_stats; + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + queue_stat_manager.setCounterIdList(m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + + m_portNameQueueCounterTable->set("", portNameQueueMap); + } + } m_isQueueStatsGenerated = true; } @@ -199,10 +289,10 @@ void FabricPortsOrch::updateFabricPortState() int lane = p.first; sai_object_id_t port = p.second; - string key = "PORT" + to_string(lane); + string key = FABRIC_PORT_PREFIX + to_string(lane); std::vector values; - uint32_t remote_peer; - uint32_t remote_port; + uint32_t remote_peer = 0; + uint32_t remote_port = 0; attr.id = SAI_PORT_ATTR_FABRIC_ATTACHED; status = sai_port_api->get_port_attribute(port, 1, &attr); @@ -268,6 +358,377 @@ void FabricPortsOrch::updateFabricPortState() } } +void FabricPortsOrch::updateFabricDebugCounters() +{ + if (!m_getFabricPortListDone) return; + + SWSS_LOG_ENTER(); + + // Get time + time_t now; + struct timespec time_now; + if (clock_gettime(CLOCK_MONOTONIC, &time_now) < 0) + { + return; + } + now = time_now.tv_sec; + + int fecIsolatedPolls = FEC_ISOLATE_POLLS; // monPollThreshIsolation + int fecUnisolatePolls = FEC_UNISOLATE_POLLS; // monPollThreshRecovery + int isolationPollsCfg = ISOLATION_POLLS_CFG; // monPollThreshIsolation + int recoveryPollsCfg = RECOVERY_POLLS_CFG; // monPollThreshRecovery + int errorRateCrcCellsCfg = ERROR_RATE_CRC_CELLS_CFG; // monErrThreshCrcCells + int errorRateRxCellsCfg = ERROR_RATE_RX_CELLS_CFG; // monErrThreshRxCells + std::vector constValues; + SWSS_LOG_INFO("updateFabricDebugCounters"); + + // Get debug countesrs (e.g. # of cells with crc errors, # of cells) + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + string key = FABRIC_PORT_PREFIX + to_string(lane); + // so basically port is the oid + vector fieldValues; + static const array cntNames = + { + "SAI_PORT_STAT_IF_IN_ERRORS", // cells with crc errors + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS", // rx data cells + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES" // cell with uncorrectable errors + }; + if (!m_fabricCounterTable->get(sai_serialize_object_id(port), fieldValues)) + { + SWSS_LOG_INFO("no port %s", sai_serialize_object_id(port).c_str()); + } + + uint64_t rxCells = 0; + uint64_t crcErrors = 0; + uint64_t codeErrors = 0; + for (const auto& fv : fieldValues) + { + const auto field = fvField(fv); + const auto value = fvValue(fv); + for (size_t cnt = 0; cnt != cntNames.size(); cnt++) + { + if (field == "SAI_PORT_STAT_IF_IN_ERRORS") + { + crcErrors = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS") + { + rxCells = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES") + { + codeErrors = stoull(value); + } + SWSS_LOG_INFO("port %s %s %lld %lld %lld at %s", + sai_serialize_object_id(port).c_str(), field.c_str(), (long long)crcErrors, + (long long)rxCells, (long long)codeErrors, asctime(gmtime(&now))); + } + } + // now we get the values of: + // *totalNumCells *cellsWithCrcErrors *cellsWithUncorrectableErrors + // + // Check if the error rate (crcErrors/numRxCells) is greater than configured error threshold + // (errorRateCrcCellsCfg/errorRateRxCellsCfg). + // This is changing to check (crcErrors * errorRateRxCellsCfg) > (numRxCells * errorRateCrcCellsCfg) + // Default value is: (crcErrors * 61035156) > (numRxCells * 1) + // numRxCells = snmpBcmRxDataCells + snmpBcmRxControlCells + // As we don't have snmpBcmRxControlCells polled right now, + // we can use snmpBcmRxDataCells only and add snmpBcmRxControlCells later when it is getting polled. + // + // In STATE_DB, add several new attribute for each port: + // consecutivePollsWithErrors POLL_WITH_ERRORS + // consecutivePollsWithNoErrors POLL_WITH_NO_ERRORS + // consecutivePollsWithFecErrs POLL_WITH_FEC_ERRORS + // consecutivePollsWithNoFecErrs POLL_WITH_NOFEC_ERRORS + // + // skipErrorsOnLinkupCount SKIP_ERR_ON_LNKUP_CNT -- for skip all errors during boot up time + // skipCrcErrorsOnLinkupCount SKIP_CRC_ERR_ON_LNKUP_CNT + // skipFecErrorsOnLinkupCount SKIP_FEC_ERR_ON_LNKUP_CNT + // removeProblemLinkCount RM_PROBLEM_LNK_CNT -- this is for feature of remove a flaky link permanently + + int consecutivePollsWithErrors = 0; + int consecutivePollsWithNoErrors = 0; + int consecutivePollsWithFecErrs = 0; + int consecutivePollsWithNoFecErrs = 0; + + int skipCrcErrorsOnLinkupCount = 0; + int skipFecErrorsOnLinkupCount = 0; + uint64_t prevRxCells = 0; + uint64_t prevCrcErrors = 0; + uint64_t prevCodeErrors = 0; + + uint64_t testCrcErrors = 0; + uint64_t testCodeErrors = 0; + + int autoIsolated = 0; + string lnkStatus = "down"; + string testState = "product"; + + // Get the consecutive polls from the state db + std::vector values; + string valuePt; + bool exist = m_stateTable->get(key, values); + if (!exist) + { + SWSS_LOG_INFO("No state infor for port %s", key.c_str()); + return; + } + for (auto val : values) + { + valuePt = fvValue(val); + if (fvField(val) == "STATUS") + { + lnkStatus = valuePt; + continue; + } + if (fvField(val) == "POLL_WITH_ERRORS") + { + consecutivePollsWithErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NO_ERRORS") + { + consecutivePollsWithNoErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_FEC_ERRORS") + { + consecutivePollsWithFecErrs = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NOFEC_ERRORS") + { + consecutivePollsWithNoFecErrs = to_uint(valuePt); + continue; + } + if (fvField(val) == "SKIP_CRC_ERR_ON_LNKUP_CNT") + { + skipCrcErrorsOnLinkupCount = to_uint(valuePt); + continue; + } + if (fvField(val) == "SKIP_FEC_ERR_ON_LNKUP_CNT") + { + skipFecErrorsOnLinkupCount = to_uint(valuePt); + continue; + } + if (fvField(val) == "RX_CELLS") + { + prevRxCells = to_uint(valuePt); + continue; + } + if (fvField(val) == "CRC_ERRORS") + { + prevCrcErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "CODE_ERRORS") + { + prevCodeErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "AUTO_ISOLATED") + { + autoIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently isolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "TEST_CRC_ERRORS") + { + testCrcErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "TEST_CODE_ERRORS") + { + testCodeErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "TEST") + { + testState = valuePt; + continue; + } + } + + // checking crc errors + int maxSkipCrcCnt = MAX_SKIP_CRCERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipCrcCnt = 2; + } + if (skipCrcErrorsOnLinkupCount < maxSkipCrcCnt) + { + skipCrcErrorsOnLinkupCount += 1; + valuePt = to_string(skipCrcErrorsOnLinkupCount); + m_stateTable->hset(key, "SKIP_CRC_ERR_ON_LNKUP_CNT", valuePt.c_str()); + SWSS_LOG_INFO("port %s updates SKIP_CRC_ERR_ON_LNKUP_CNT to %s %d", + key.c_str(), valuePt.c_str(), skipCrcErrorsOnLinkupCount); + // update error counters. + prevCrcErrors = crcErrors; + } + else + { + uint64_t diffRxCells = 0; + uint64_t diffCrcCells = 0; + + diffRxCells = rxCells - prevRxCells; + if (testState == "TEST"){ + diffCrcCells = testCrcErrors - prevCrcErrors; + prevCrcErrors = 0; + isolationPollsCfg = isolationPollsCfg + 1; + } + else + { + diffCrcCells = crcErrors - prevCrcErrors; + prevCrcErrors = crcErrors; + } + bool isErrorRateMore = + ((diffCrcCells * errorRateRxCellsCfg) > + (diffRxCells * errorRateCrcCellsCfg)); + if (isErrorRateMore) + { + if (consecutivePollsWithErrors < isolationPollsCfg) + { + consecutivePollsWithErrors += 1; + consecutivePollsWithNoErrors = 0; + } + } else { + if (consecutivePollsWithNoErrors < recoveryPollsCfg) + { + consecutivePollsWithNoErrors += 1; + consecutivePollsWithErrors = 0; + } + } + SWSS_LOG_INFO("port %s diffCrcCells %lld", key.c_str(), (long long)diffCrcCells); + SWSS_LOG_INFO("consecutivePollsWithCRCErrs %d consecutivePollsWithNoCRCErrs %d", + consecutivePollsWithErrors, consecutivePollsWithNoErrors); + } + + // checking FEC errors + int maxSkipFecCnt = MAX_SKIP_FECERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipFecCnt = 2; + } + if (skipFecErrorsOnLinkupCount < maxSkipFecCnt) + { + skipFecErrorsOnLinkupCount += 1; + valuePt = to_string(skipFecErrorsOnLinkupCount); + m_stateTable->hset(key, "SKIP_FEC_ERR_ON_LNKUP_CNT", valuePt.c_str()); + SWSS_LOG_INFO("port %s updates SKIP_FEC_ERR_ON_LNKUP_CNT to %s", + key.c_str(), valuePt.c_str()); + // update error counters + prevCodeErrors = codeErrors; + } + else + { + uint64_t diffCodeErrors = 0; + if (testState == "TEST"){ + diffCodeErrors = testCodeErrors - prevCodeErrors; + prevCodeErrors = 0; + fecIsolatedPolls = fecIsolatedPolls + 1; + } + else + { + diffCodeErrors = codeErrors - prevCodeErrors; + prevCodeErrors = codeErrors; + } + SWSS_LOG_INFO("port %s diffCodeErrors %lld", key.c_str(), (long long)diffCodeErrors); + if (diffCodeErrors > 0) + { + if (consecutivePollsWithFecErrs < fecIsolatedPolls) + { + consecutivePollsWithFecErrs += 1; + consecutivePollsWithNoFecErrs = 0; + } + } + else if (diffCodeErrors <= 0) + { + if (consecutivePollsWithNoFecErrs < fecUnisolatePolls) + { + consecutivePollsWithNoFecErrs += 1; + consecutivePollsWithFecErrs = 0; + } + } + SWSS_LOG_INFO("consecutivePollsWithFecErrs %d consecutivePollsWithNoFecErrs %d", + consecutivePollsWithFecErrs,consecutivePollsWithNoFecErrs); + SWSS_LOG_INFO("fecUnisolatePolls %d", fecUnisolatePolls); + } + + // take care serdes link shut state setting + if (lnkStatus == "up") + { + // debug information + SWSS_LOG_INFO("port %s status up autoIsolated %d", + key.c_str(), autoIsolated); + SWSS_LOG_INFO("consecutivePollsWithErrors %d consecutivePollsWithFecErrs %d", + consecutivePollsWithErrors, consecutivePollsWithFecErrs); + SWSS_LOG_INFO("consecutivePollsWithNoErrors %d consecutivePollsWithNoFecErrs %d", + consecutivePollsWithNoErrors, consecutivePollsWithNoFecErrs); + if (autoIsolated == 0 && (consecutivePollsWithErrors >= isolationPollsCfg + || consecutivePollsWithFecErrs >= fecIsolatedPolls)) + { + // Link needs to be isolated. + SWSS_LOG_INFO("port %s auto isolated", key.c_str()); + autoIsolated = 1; + valuePt = to_string(autoIsolated); + m_stateTable->hset(key, "AUTO_ISOLATED", valuePt); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %s", key.c_str(), valuePt.c_str()); + // Call SAI api here to actually isolated the link + } + else if (autoIsolated == 1 && consecutivePollsWithNoErrors >= recoveryPollsCfg + && consecutivePollsWithNoFecErrs >= fecUnisolatePolls) + { + // Link is isolated, but no longer needs to be. + SWSS_LOG_INFO("port %s healthy again", key.c_str()); + autoIsolated = 0; + valuePt = to_string(autoIsolated); + m_stateTable->hset(key, "AUTO_ISOLATED", valuePt); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %s", key.c_str(), valuePt.c_str()); + // Can we call SAI api here to unisolate the link? + } + } + else + { + SWSS_LOG_INFO("link down"); + } + + // Update state_db with new data + valuePt = to_string(consecutivePollsWithErrors); + m_stateTable->hset(key, "POLL_WITH_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithNoErrors); + m_stateTable->hset(key, "POLL_WITH_NO_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_NO_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithFecErrs); + m_stateTable->hset(key, "POLL_WITH_FEC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_FEC_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithNoFecErrs); + m_stateTable->hset(key, "POLL_WITH_NOFEC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_NOFEC_ERRORS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(rxCells); + m_stateTable->hset(key, "RX_CELLS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set RX_CELLS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCrcErrors); + m_stateTable->hset(key, "CRC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CRC_ERRORS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCodeErrors); + m_stateTable->hset(key, "CODE_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CODE_ERRORS %s", + key.c_str(), valuePt.c_str()); + } +} + void FabricPortsOrch::doTask() { } @@ -280,13 +741,30 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) { SWSS_LOG_ENTER(); - if (!m_getFabricPortListDone) + if (timer.getFd() == m_timer->getFd()) { - getFabricPortList(); - } + if (!m_getFabricPortListDone) + { + getFabricPortList(); + } - if (m_getFabricPortListDone) + if (m_getFabricPortListDone) + { + updateFabricPortState(); + } + } + else if (timer.getFd() == m_debugTimer->getFd()) { - updateFabricPortState(); + if (!m_getFabricPortListDone) + { + // Skip collecting debug information + // as we don't have all fabric ports yet. + return; + } + + if (m_getFabricPortListDone) + { + updateFabricDebugCounters(); + } } } diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index c641ee566d..4c274cba00 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -12,21 +12,29 @@ class FabricPortsOrch : public Orch, public Subject { public: - FabricPortsOrch(DBConnector *appl_db, vector &tableNames); + FabricPortsOrch(DBConnector *appl_db, vector &tableNames, + bool fabricPortStatEnabled=true, bool fabricQueueStatEnabled=true); bool allPortsReady(); void generateQueueStats(); private: + bool m_fabricPortStatEnabled; + bool m_fabricQueueStatEnabled; + shared_ptr m_state_db; shared_ptr m_counter_db; shared_ptr m_flex_db; + shared_ptr m_appl_db; unique_ptr
m_stateTable; - unique_ptr
m_laneQueueCounterTable; - unique_ptr
m_lanePortCounterTable; + unique_ptr
m_portNameQueueCounterTable; + unique_ptr
m_portNamePortCounterTable; + unique_ptr
m_fabricCounterTable; + unique_ptr
m_applTable; unique_ptr m_flexCounterTable; swss::SelectableTimer *m_timer = nullptr; + swss::SelectableTimer *m_debugTimer = nullptr; FlexCounterManager port_stat_manager; FlexCounterManager queue_stat_manager; @@ -42,6 +50,7 @@ class FabricPortsOrch : public Orch, public Subject int getFabricPortList(); void generatePortStats(); void updateFabricPortState(); + void updateFabricDebugCounters(); void doTask() override; void doTask(Consumer &consumer); diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index 720d6c5c66..03c854fee3 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -42,8 +42,8 @@ FdbOrch::FdbOrch(DBConnector* applDbConnector, vector app Orch::addExecutor(flushNotifier); /* Add FDB notifications support from ASIC */ - DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); - m_fdbNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + m_notificationsDb = make_shared("ASIC_DB", 0); + m_fdbNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); auto fdbNotifier = new Notifier(m_fdbNotificationConsumer, this, "FDB_NOTIFICATIONS"); Orch::addExecutor(fdbNotifier); } @@ -109,6 +109,7 @@ bool FdbOrch::storeFdbEntryState(const FdbUpdate& update) fdbdata.bridge_port_id = update.port.m_bridge_port_id; fdbdata.type = update.type; + fdbdata.sai_fdb_type = update.sai_fdb_type; fdbdata.origin = FDB_ORIGIN_LEARN; fdbdata.remote_ip = ""; fdbdata.esi = ""; @@ -206,20 +207,19 @@ Handles the SAI_FDB_EVENT_FLUSHED notification recieved from syncd */ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, const sai_object_id_t& bridge_port_id, - const MacAddress& mac) + const MacAddress& mac, + const sai_fdb_entry_type_t& sai_fdb_type) { // Consolidated flush will have a zero mac MacAddress flush_mac("00:00:00:00:00:00"); - /* TODO: Read the SAI_FDB_FLUSH_ATTR_ENTRY_TYPE attr from the flush notif - and clear the entries accordingly, currently only non-static entries are flushed - */ if (bridge_port_id == SAI_NULL_OBJECT_ID && bv_id == SAI_NULL_OBJECT_ID) { for (auto itr = m_entries.begin(); itr != m_entries.end();) { auto curr = itr++; - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -233,7 +233,8 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->second.bridge_port_id == bridge_port_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -248,7 +249,8 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->first.bv_id == bv_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -263,7 +265,8 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->first.bv_id == bv_id && curr->second.bridge_port_id == bridge_port_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -274,7 +277,8 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, void FdbOrch::update(sai_fdb_event_t type, const sai_fdb_entry_t* entry, - sai_object_id_t bridge_port_id) + sai_object_id_t bridge_port_id, + const sai_fdb_entry_type_t &sai_fdb_type) { SWSS_LOG_ENTER(); @@ -365,6 +369,7 @@ void FdbOrch::update(sai_fdb_event_t type, attr.id = SAI_FDB_ENTRY_ATTR_TYPE; attr.value.s32 = SAI_FDB_ENTRY_TYPE_DYNAMIC; + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; attrs.push_back(attr); attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; @@ -399,6 +404,7 @@ void FdbOrch::update(sai_fdb_event_t type, update.add = true; update.entry.port_name = update.port.m_alias; + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; update.type = "dynamic"; update.port.m_fdb_count++; m_portsOrch->setPort(update.port.m_alias, update.port); @@ -553,6 +559,7 @@ void FdbOrch::update(sai_fdb_event_t type, { SWSS_LOG_WARN("FdbOrch MOVE notification: mac %s is not found in bv_id 0x%" PRIx64, update.entry.mac.to_string().c_str(), entry->bv_id); + break; } else if (!m_portsOrch->getPortByBridgePortId(existing_entry->second.bridge_port_id, port_old)) { @@ -560,6 +567,43 @@ void FdbOrch::update(sai_fdb_event_t type, return; } + /* If the existing MAC is MCLAG remote, change its type to dynamic. */ + if (existing_entry->second.origin == FDB_ORIGIN_MCLAG_ADVERTIZED) + { + if (existing_entry->second.bridge_port_id != bridge_port_id) + { + sai_status_t status; + sai_fdb_entry_t fdb_entry; + fdb_entry.switch_id = gSwitchId; + memcpy(fdb_entry.mac_address, entry->mac_address, sizeof(sai_mac_t)); + fdb_entry.bv_id = entry->bv_id; + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE; + attr.value.booldata = false; + attrs.push_back(attr); + + attr.id = SAI_FDB_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_FDB_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); + + attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; + attr.value.oid = bridge_port_id; + attrs.push_back(attr); + + for(auto itr : attrs) + { + status = sai_fdb_api->set_fdb_entry_attribute(&fdb_entry, &itr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("macUpdate-Failed for MCLAG mac attr.id=0x%x for FDB %s in 0x%" PRIx64 "on %s, rv:%d", + itr.id, update.entry.mac.to_string().c_str(), entry->bv_id, update.port.m_alias.c_str(), status); + } + } + } + } + update.add = true; update.entry.port_name = update.port.m_alias; if (!port_old.m_alias.empty()) @@ -569,6 +613,7 @@ void FdbOrch::update(sai_fdb_event_t type, } update.port.m_fdb_count++; m_portsOrch->setPort(update.port.m_alias, update.port); + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; storeFdbEntryState(update); notify(SUBJECT_TYPE_FDB_CHANGE, &update); @@ -592,7 +637,7 @@ void FdbOrch::update(sai_fdb_event_t type, SWSS_LOG_INFO("FDB Flush: [ %s , %s ] = { port: %s }", update.entry.mac.to_string().c_str(), vlanName.c_str(), update.port.m_alias.c_str()); - handleSyncdFlushNotif(entry->bv_id, bridge_port_id, update.entry.mac); + handleSyncdFlushNotif(entry->bv_id, bridge_port_id, update.entry.mac, sai_fdb_type); break; } @@ -636,29 +681,23 @@ bool FdbOrch::getPort(const MacAddress& mac, uint16_t vlan, Port& port) return false; } - sai_fdb_entry_t entry; - entry.switch_id = gSwitchId; - memcpy(entry.mac_address, mac.getMac(), sizeof(sai_mac_t)); + FdbEntry entry; + entry.mac = mac; entry.bv_id = port.m_vlan_info.vlan_oid; - sai_attribute_t attr; - attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; - - sai_status_t status = sai_fdb_api->get_fdb_entry_attribute(&entry, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + auto it = m_entries.find(entry); + if (it == m_entries.end()) { - SWSS_LOG_ERROR("Failed to get bridge port ID for FDB entry %s, rv:%d", - mac.to_string().c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_FDB, status); - if (handle_status != task_process_status::task_success) - { - return false; - } + // This message is now expected in many cases since orchagent will process events such as + // learning new neighbor entries prior to updating the m_entries FDB cache. + SWSS_LOG_INFO("Failed to get cached bridge port ID for FDB entry %s", + mac.to_string().c_str()); + return false; } - if (!m_portsOrch->getPortByBridgePortId(attr.value.oid, port)) + if (!m_portsOrch->getPortByBridgePortId(it->second.bridge_port_id, port)) { - SWSS_LOG_ERROR("Failed to get port by bridge port ID 0x%" PRIx64, attr.value.oid); + SWSS_LOG_ERROR("Failed to get port by bridge port ID 0x%" PRIx64, it->second.bridge_port_id); return false; } @@ -1002,6 +1041,7 @@ void FdbOrch::doTask(NotificationConsumer& consumer) { uint32_t count; sai_fdb_event_notification_data_t *fdbevent = nullptr; + sai_fdb_entry_type_t sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; sai_deserialize_fdb_event_ntf(data, count, &fdbevent); @@ -1014,11 +1054,14 @@ void FdbOrch::doTask(NotificationConsumer& consumer) if (fdbevent[i].attr[j].id == SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID) { oid = fdbevent[i].attr[j].value.oid; - break; + } + else if (fdbevent[i].attr[j].id == SAI_FDB_ENTRY_ATTR_TYPE) + { + sai_fdb_type = (sai_fdb_entry_type_t)fdbevent[i].attr[j].value.s32; } } - this->update(fdbevent[i].event_type, &fdbevent[i].fdb_entry, oid); + this->update(fdbevent[i].event_type, &fdbevent[i].fdb_entry, oid, sai_fdb_type); } sai_deserialize_free_fdb_event_ntf(count, fdbevent); @@ -1127,7 +1170,10 @@ void FdbOrch::updatePortOperState(const PortOperStateUpdate& update) if (update.operStatus == SAI_PORT_OPER_STATUS_DOWN) { swss::Port p = update.port; - flushFDBEntries(p.m_bridge_port_id, SAI_NULL_OBJECT_ID); + if (p.m_bridge_port_id != SAI_NULL_OBJECT_ID) + { + flushFDBEntries(p.m_bridge_port_id, SAI_NULL_OBJECT_ID); + } // Get BVID of each VLAN that this port is a member of // and call notifyObserversFDBFlush @@ -1346,6 +1392,7 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, { attr.value.s32 = (fdbData.type == "dynamic") ? SAI_FDB_ENTRY_TYPE_DYNAMIC : SAI_FDB_ENTRY_TYPE_STATIC; } + fdbData.sai_fdb_type = (sai_fdb_entry_type_t)attr.value.s32; attrs.push_back(attr); @@ -1462,6 +1509,11 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, { //If the MAC is dynamic_local change the origin accordingly //MAC is added/updated as dynamic to allow aging. + SWSS_LOG_INFO("MAC-Update Modify to dynamic FDB %s in %s on from-%s:to-%s from-%s:to-%s origin-%d-to-%d", + entry.mac.to_string().c_str(), vlan.m_alias.c_str(), oldPort.m_alias.c_str(), + port_name.c_str(), oldType.c_str(), fdbData.type.c_str(), + oldOrigin, fdbData.origin); + storeFdbData.origin = FDB_ORIGIN_LEARN; storeFdbData.type = "dynamic"; } @@ -1470,8 +1522,10 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, string key = "Vlan" + to_string(vlan.m_vlan_info.vlan_id) + ":" + entry.mac.to_string(); - if ((fdbData.origin != FDB_ORIGIN_MCLAG_ADVERTIZED) && - (fdbData.origin != FDB_ORIGIN_VXLAN_ADVERTIZED)) + if (((fdbData.origin != FDB_ORIGIN_MCLAG_ADVERTIZED) && + (fdbData.origin != FDB_ORIGIN_VXLAN_ADVERTIZED)) || + ((fdbData.origin == FDB_ORIGIN_MCLAG_ADVERTIZED) && + (fdbData.type == "dynamic_local"))) { /* State-DB is updated only for Local Mac addresses */ // Write to StateDb diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index d9f7398237..9e71bc8c6b 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -36,6 +36,7 @@ struct FdbUpdate Port port; string type; bool add; + sai_fdb_entry_type_t sai_fdb_type; }; struct FdbFlushUpdate @@ -63,6 +64,7 @@ struct FdbData string remote_ip; string esi; unsigned int vni; + sai_fdb_entry_type_t sai_fdb_type; }; struct SavedFdbEntry @@ -91,7 +93,7 @@ class FdbOrch: public Orch, public Subject, public Observer } bool bake() override; - void update(sai_fdb_event_t, const sai_fdb_entry_t *, sai_object_id_t); + void update(sai_fdb_event_t, const sai_fdb_entry_t *, sai_object_id_t, const sai_fdb_entry_type_t &); void update(SubjectType type, void *cntx); bool getPort(const MacAddress&, uint16_t, Port&); @@ -111,6 +113,7 @@ class FdbOrch: public Orch, public Subject, public Observer Table m_mclagFdbStateTable; NotificationConsumer* m_flushNotificationsConsumer; NotificationConsumer* m_fdbNotificationConsumer; + shared_ptr m_notificationsDb; void doTask(Consumer& consumer); void doTask(NotificationConsumer& consumer); @@ -125,7 +128,8 @@ class FdbOrch: public Orch, public Subject, public Observer void notifyTunnelOrch(Port& port); void clearFdbEntry(const FdbEntry&); - void handleSyncdFlushNotif(const sai_object_id_t&, const sai_object_id_t&, const MacAddress& ); + void handleSyncdFlushNotif(const sai_object_id_t&, const sai_object_id_t&, const MacAddress&, + const sai_fdb_entry_type_t&); }; #endif /* SWSS_FDBORCH_H */ diff --git a/orchagent/flex_counter/flowcounterrouteorch.cpp b/orchagent/flex_counter/flowcounterrouteorch.cpp index b82d66e27a..9f5e6e2355 100644 --- a/orchagent/flex_counter/flowcounterrouteorch.cpp +++ b/orchagent/flex_counter/flowcounterrouteorch.cpp @@ -602,18 +602,25 @@ bool FlowCounterRouteOrch::isRouteAlreadyBound(const RoutePattern &route_pattern { SWSS_LOG_ENTER(); - auto iter = mBoundRouteCounters.find(route_pattern); - if (iter == mBoundRouteCounters.end()) + auto iter_bound = mBoundRouteCounters.find(route_pattern); + if (iter_bound != mBoundRouteCounters.end()) { - auto pending_iter = mPendingAddToFlexCntr.find(route_pattern); - if (pending_iter != mPendingAddToFlexCntr.end()) + if (iter_bound->second.find(ip_prefix) != iter_bound->second.end()) { - return pending_iter->second.find(ip_prefix) != pending_iter->second.end(); + return true; + } + } + + auto iter_pending = mPendingAddToFlexCntr.find(route_pattern); + if (iter_pending != mPendingAddToFlexCntr.end()) + { + if (iter_pending->second.find(ip_prefix) != iter_pending->second.end()) + { + return true; } - return false; } - return iter->second.find(ip_prefix) != iter->second.end(); + return false; } void FlowCounterRouteOrch::createRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t current_bound_count) @@ -636,7 +643,7 @@ void FlowCounterRouteOrch::createRouteFlowCounterByPattern(const RoutePattern &r { return; } - + if (route_pattern.is_match(route_pattern.vrf_id, entry.first)) { if (isRouteAlreadyBound(route_pattern, entry.first)) @@ -885,7 +892,7 @@ void FlowCounterRouteOrch::handleRouteRemove(sai_object_id_t vrf_id, const IpPre { return; } - + for (const auto &route_pattern : mRoutePatternSet) { if (route_pattern.is_match(vrf_id, ip_prefix)) @@ -953,6 +960,7 @@ bool FlowCounterRouteOrch::parseRouteKeyForRoutePattern(const std::string &key, else { vrf_name = key.substr(0, found); + ip_prefix = IpPrefix(key.substr(found+1)); auto *vrf_orch = gDirectory.get(); if (!key.compare(0, strlen(VRF_PREFIX), VRF_PREFIX) && vrf_orch->isVRFexists(vrf_name)) { @@ -966,8 +974,6 @@ bool FlowCounterRouteOrch::parseRouteKeyForRoutePattern(const std::string &key, return false; } } - - ip_prefix = IpPrefix(key.substr(found+1)); } return true; diff --git a/orchagent/flex_counter/flowcounterrouteorch.h b/orchagent/flex_counter/flowcounterrouteorch.h index 1ef0452d4a..38ac413ab9 100644 --- a/orchagent/flex_counter/flowcounterrouteorch.h +++ b/orchagent/flex_counter/flowcounterrouteorch.h @@ -141,7 +141,7 @@ class FlowCounterRouteOrch : public Orch RouterFlowCounterCache mBoundRouteCounters; /* Cache for those route flow counters pending update to FLEX DB */ RouterFlowCounterCache mPendingAddToFlexCntr; - /* IP2ME, MUX */ // TODO: remove MUX support + /* IP2ME */ MiscRouteEntryMap mMiscRoutes; // Save here for route flow counter /* Flex counter manager for route flow counter */ FlexCounterManager mRouteFlowCounterMgr; diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index e30b9a0cb1..bc974181f1 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -29,7 +29,9 @@ extern FlowCounterRouteOrch *gFlowCounterRouteOrch; #define PORT_KEY "PORT" #define PORT_BUFFER_DROP_KEY "PORT_BUFFER_DROP" #define QUEUE_KEY "QUEUE" +#define QUEUE_WATERMARK "QUEUE_WATERMARK" #define PG_WATERMARK_KEY "PG_WATERMARK" +#define PG_DROP_KEY "PG_DROP" #define RIF_KEY "RIF" #define ACL_KEY "ACL" #define TUNNEL_KEY "TUNNEL" @@ -65,6 +67,7 @@ FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), + m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), @@ -162,11 +165,25 @@ void FlexCounterOrch::doTask(Consumer &consumer) { gPortsOrch->generateQueueMap(getQueueConfigurations()); m_queue_enabled = true; + gPortsOrch->addQueueFlexCounters(getQueueConfigurations()); + } + else if(key == QUEUE_WATERMARK) + { + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_watermark_enabled = true; + gPortsOrch->addQueueWatermarkFlexCounters(getQueueConfigurations()); + } + else if(key == PG_DROP_KEY) + { + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_enabled = true; + gPortsOrch->addPriorityGroupFlexCounters(getPgConfigurations()); } else if(key == PG_WATERMARK_KEY) { gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); m_pg_watermark_enabled = true; + gPortsOrch->addPriorityGroupWatermarkFlexCounters(getPgConfigurations()); } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -250,14 +267,24 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } -bool FlexCounterOrch::getPgWatermarkCountersState() const +bool FlexCounterOrch::getQueueCountersState() const { - return m_pg_watermark_enabled; + return m_queue_enabled; } -bool FlexCounterOrch::getQueueCountersState() const +bool FlexCounterOrch::getQueueWatermarkCountersState() const { - return m_queue_enabled; + return m_queue_watermark_enabled; +} + +bool FlexCounterOrch::getPgCountersState() const +{ + return m_pg_enabled; +} + +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; } bool FlexCounterOrch::bake() @@ -302,11 +329,41 @@ bool FlexCounterOrch::bake() return consumer->addToSync(entries); } +static bool isCreateOnlyConfigDbBuffers(Table& deviceMetadataConfigTable) +{ + std::string createOnlyConfigDbBuffersValue; + + try + { + if (deviceMetadataConfigTable.hget("localhost", "create_only_config_db_buffers", createOnlyConfigDbBuffersValue)) + { + if (createOnlyConfigDbBuffersValue == "true") + { + return true; + } + } + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error: %s", e.what()); + } + + return false; +} + map FlexCounterOrch::getQueueConfigurations() { SWSS_LOG_ENTER(); map queuesStateVector; + + if (!isCreateOnlyConfigDbBuffers(m_deviceMetadataConfigTable)) + { + FlexCounterQueueStates flexCounterQueueState(0); + queuesStateVector.insert(make_pair(createAllAvailableBuffersStr, flexCounterQueueState)); + return queuesStateVector; + } + std::vector portQueueKeys; m_bufferQueueConfigTable.getKeys(portQueueKeys); @@ -361,6 +418,14 @@ map FlexCounterOrch::getPgConfigurations() SWSS_LOG_ENTER(); map pgsStateVector; + + if (!isCreateOnlyConfigDbBuffers(m_deviceMetadataConfigTable)) + { + FlexCounterPgStates flexCounterPgState(0); + pgsStateVector.insert(make_pair(createAllAvailableBuffersStr, flexCounterPgState)); + return pgsStateVector; + } + std::vector portPgKeys; m_bufferPgConfigTable.getKeys(portPgKeys); diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index a6e7527a05..06a1ddadbc 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,8 @@ extern "C" { #include "sai.h" } +const std::string createAllAvailableBuffersStr = "create_all_available_buffers"; + class FlexCounterQueueStates { public: @@ -42,8 +44,10 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; - bool getPgWatermarkCountersState() const; bool getQueueCountersState() const; + bool getQueueWatermarkCountersState() const; + bool getPgCountersState() const; + bool getPgWatermarkCountersState() const; std::map getQueueConfigurations(); std::map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} @@ -57,13 +61,16 @@ class FlexCounterOrch: public Orch std::shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; - bool m_pg_watermark_enabled = false; bool m_queue_enabled = false; + bool m_queue_watermark_enabled = false; + bool m_pg_enabled = false; + bool m_pg_watermark_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; Table m_bufferQueueConfigTable; Table m_bufferPgConfigTable; + Table m_deviceMetadataConfigTable; }; #endif diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index a8bcafa1e3..2a5e34ff93 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -183,7 +183,7 @@ void IntfsOrch::increaseRouterIntfsRefCount(const string &alias) SWSS_LOG_ENTER(); m_syncdIntfses[alias].ref_count++; - SWSS_LOG_DEBUG("Router interface %s ref count is increased to %d", + SWSS_LOG_INFO("Router interface %s ref count is increased to %d", alias.c_str(), m_syncdIntfses[alias].ref_count); } @@ -192,7 +192,7 @@ void IntfsOrch::decreaseRouterIntfsRefCount(const string &alias) SWSS_LOG_ENTER(); m_syncdIntfses[alias].ref_count--; - SWSS_LOG_DEBUG("Router interface %s ref count is decreased to %d", + SWSS_LOG_INFO("Router interface %s ref count is decreased to %d", alias.c_str(), m_syncdIntfses[alias].ref_count); } @@ -368,6 +368,21 @@ bool IntfsOrch::setIntfVlanFloodType(const Port &port, sai_vlan_flood_control_ty } } + // Also set ipv6 multicast flood type + attr.id = SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE; + attr.value.s32 = vlan_flood_type; + + status = sai_vlan_api->set_vlan_attribute(port.m_vlan_info.vlan_oid, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set multicast flood type for VLAN %u, rv:%d", port.m_vlan_info.vlan_id, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + return true; } @@ -470,6 +485,11 @@ bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPre { SWSS_LOG_ENTER(); + if (m_removingIntfses.find(alias) != m_removingIntfses.end()) + { + return false; + } + Port port; gPortsOrch->getPort(alias, port); @@ -691,7 +711,7 @@ void IntfsOrch::doTask(Consumer &consumer) MacAddress mac; uint32_t mtu = 0; - bool adminUp; + bool adminUp = false; bool adminStateChanged = false; uint32_t nat_zone_id = 0; string proxy_arp = ""; @@ -860,10 +880,11 @@ void IntfsOrch::doTask(Consumer &consumer) { if (!ip_prefix_in_key && isSubIntf) { - if (adminStateChanged == false) + if (!adminStateChanged) { adminUp = port.m_admin_state_up; } + if (!gPortsOrch->addSubPort(port, alias, vlan, adminUp, mtu)) { it++; @@ -891,6 +912,12 @@ void IntfsOrch::doTask(Consumer &consumer) it++; continue; } + + if (!adminStateChanged) + { + adminUp = port.m_admin_state_up; + } + if (!vnet_orch->setIntf(alias, vnet_name, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu)) { it++; @@ -904,7 +931,7 @@ void IntfsOrch::doTask(Consumer &consumer) } else { - if (adminStateChanged == false) + if (!adminStateChanged) { adminUp = port.m_admin_state_up; } @@ -1076,10 +1103,12 @@ void IntfsOrch::doTask(Consumer &consumer) { if (removeIntf(alias, port.m_vr_id, ip_prefix_in_key ? &ip_prefix : nullptr)) { + m_removingIntfses.erase(alias); it = consumer.m_toSync.erase(it); } else { + m_removingIntfses.insert(alias); it++; continue; } @@ -1271,13 +1300,12 @@ bool IntfsOrch::removeRouterIntfs(Port &port) if (m_syncdIntfses[port.m_alias].ref_count > 0) { - SWSS_LOG_NOTICE("Router interface is still referenced"); + SWSS_LOG_NOTICE("Router interface %s is still referenced with ref count %d", port.m_alias.c_str(), m_syncdIntfses[port.m_alias].ref_count); return false; } const auto id = sai_serialize_object_id(port.m_rif_id); removeRifFromFlexCounter(id, port.m_alias); - cleanUpRifFromCounterDb(id, port.m_alias); sai_status_t status = sai_router_intfs_api->remove_router_interface(port.m_rif_id); if (status != SAI_STATUS_SUCCESS) @@ -1500,45 +1528,11 @@ void IntfsOrch::removeRifFromFlexCounter(const string &id, const string &name) SWSS_LOG_DEBUG("Unregistered interface %s from Flex counter", name.c_str()); } -/* - TODO A race condition can exist when swss removes the counter from COUNTERS DB - and at the same time syncd is inserting a new entry in COUNTERS DB. Therefore - all the rif counters cleanup code should move to syncd -*/ -void IntfsOrch::cleanUpRifFromCounterDb(const string &id, const string &name) -{ - SWSS_LOG_ENTER(); - string counter_key = getRifCounterTableKey(id); - string rate_key = getRifRateTableKey(id); - string rate_init_key = getRifRateInitTableKey(id); - m_counter_db->del(counter_key); - m_counter_db->del(rate_key); - m_counter_db->del(rate_init_key); - SWSS_LOG_NOTICE("CleanUp interface %s oid %s from counter db", name.c_str(),id.c_str()); -} - string IntfsOrch::getRifFlexCounterTableKey(string key) { return string(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP) + ":" + key; } -string IntfsOrch::getRifCounterTableKey(string key) -{ - return "COUNTERS:" + key; -} - -string IntfsOrch::getRifRateTableKey(string key) -{ - return "RATES:" + key; -} - -string IntfsOrch::getRifRateInitTableKey(string key) -{ - return "RATES:" + key + ":RIF"; -} - - - void IntfsOrch::generateInterfaceMap() { m_updateMapsTimer->start(); diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index ba28c8dde6..ea15ada14b 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -92,11 +92,9 @@ class IntfsOrch : public Orch unique_ptr m_flexCounterTable; unique_ptr m_flexCounterGroupTable; + std::set m_removingIntfses; + std::string getRifFlexCounterTableKey(std::string s); - std::string getRifCounterTableKey(std::string s); - std::string getRifRateTableKey(std::string s); - std::string getRifRateInitTableKey(std::string s); - void cleanUpRifFromCounterDb(const string &id, const string &name); bool addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopbackAction); bool removeRouterIntfs(Port &port); diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 5a074450da..0add517a05 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -19,8 +19,6 @@ extern "C" { #include #include -#include "timestamp.h" - #include #include @@ -52,25 +50,14 @@ MacAddress gVxlanMacAddress; extern size_t gMaxBulkSize; #define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; - -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gResponsePublisherRecord = false; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -bool gResponsePublisherLogRotate = false; +extern int gBatchSize; + bool gSyncMode = false; sai_redis_communication_mode_t gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC; string gAsicInstance; extern bool gIsNatSupported; -ofstream gRecordOfs; -string gRecordFile; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; - #define SAIREDIS_RECORD_ENABLE 0x1 #define SWSS_RECORD_ENABLE (0x1 << 1) #define RESPONSE_PUBLISHER_RECORD_ENABLE (0x1 << 2) @@ -84,7 +71,7 @@ string gMyAsicName = ""; void usage() { - cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size]" << endl; + cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size] [-q zmq_server_address]" << endl; cout << " -h: display this message" << endl; cout << " -r record_type: record orchagent logs with type (default 3)" << endl; cout << " Bit 0: sairedis.rec, Bit 1: swss.rec, Bit 2: responsepublisher.rec. For example:" << endl; @@ -102,6 +89,7 @@ void usage() cout << " -f swss_rec_filename: swss record log filename(default 'swss.rec')" << endl; cout << " -j sairedis_rec_filename: sairedis record log filename(default sairedis.rec)" << endl; cout << " -k max bulk size in bulk mode (default 1000)" << endl; + cout << " -q zmq_server_address: ZMQ server address (default disable ZMQ)" << endl; } void sighup_handler(int signo) @@ -109,9 +97,9 @@ void sighup_handler(int signo) /* * Don't do any logging since they are using mutexes. */ - gLogRotate = true; - gSaiRedisLogRotate = true; - gResponsePublisherLogRotate = true; + Recorder::Instance().swss.setRotate(true); + Recorder::Instance().sairedis.setRotate(true); + Recorder::Instance().respub.setRotate(true); } void syncd_apply_view() @@ -127,7 +115,7 @@ void syncd_apply_view() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to notify syncd APPLY_VIEW %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } } @@ -171,13 +159,21 @@ void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) { Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); - if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + try { - //Switch type is not configured. Consider it default = "switch" (regular switch) + if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + { + //Switch type is not configured. Consider it default = "switch" (regular switch) + switch_type = "switch"; + } + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error: %s", e.what()); switch_type = "switch"; } - if (switch_type != "voq" && switch_type != "fabric" && switch_type != "chassis-packet" && switch_type != "switch") + if (switch_type != "voq" && switch_type != "fabric" && switch_type != "chassis-packet" && switch_type != "switch" && switch_type != "dpu") { SWSS_LOG_ERROR("Invalid switch type %s configured", switch_type.c_str()); //If configured switch type is none of the supported, assume regular switch @@ -197,64 +193,72 @@ bool getSystemPortConfigList(DBConnector *cfgDb, DBConnector *appDb, vector zmq_server = nullptr; + if (enable_zmq) + { + SWSS_LOG_NOTICE("Instantiate ZMQ server : %s", zmq_server_address.c_str()); + zmq_server = make_shared(zmq_server_address.c_str()); + } + else + { + SWSS_LOG_NOTICE("ZMQ disabled"); + } + + // Get switch_type + getCfgSwitchType(&config_db, gMySwitchType); sai_attribute_t attr; vector attrs; @@ -431,47 +489,12 @@ int main(int argc, char **argv) attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; attr.value.booldata = true; attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY; - attr.value.ptr = (void *)on_fdb_event; - attrs.push_back(attr); - // Initialize recording parameters. - gSairedisRecord = - (record_type & SAIREDIS_RECORD_ENABLE) == SAIREDIS_RECORD_ENABLE; - gSwssRecord = (record_type & SWSS_RECORD_ENABLE) == SWSS_RECORD_ENABLE; - gResponsePublisherRecord = - (record_type & RESPONSE_PUBLISHER_RECORD_ENABLE) == - RESPONSE_PUBLISHER_RECORD_ENABLE; - - /* Disable/enable SwSS recording */ - if (gSwssRecord) + if (gMySwitchType != "dpu") { - gRecordFile = record_location + "/" + swss_rec_filename; - gRecordOfs.open(gRecordFile, std::ofstream::out | std::ofstream::app); - if (!gRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to open SwSS recording file %s", gRecordFile.c_str()); - exit(EXIT_FAILURE); - } - gRecordOfs << getTimestamp() << "|recording started" << endl; - } - - // Disable/Enable response publisher recording. - if (gResponsePublisherRecord) - { - gResponsePublisherRecordFile = record_location + "/" + responsepublisher_rec_filename; - gResponsePublisherRecordOfs.open(gResponsePublisherRecordFile, std::ofstream::out | std::ofstream::app); - if (!gResponsePublisherRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to open Response Publisher recording file %s", - gResponsePublisherRecordFile.c_str()); - gResponsePublisherRecord = false; - } - else - { - gResponsePublisherRecordOfs << getTimestamp() << "|recording started" - << endl; - } + attr.id = SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY; + attr.value.ptr = (void *)on_fdb_event; + attrs.push_back(attr); } attr.id = SAI_SWITCH_ATTR_PORT_STATE_CHANGE_NOTIFY; @@ -482,14 +505,6 @@ int main(int argc, char **argv) attr.value.ptr = (void *)on_switch_shutdown_request; attrs.push_back(attr); - // Instantiate database connectors - DBConnector appl_db("APPL_DB", 0); - DBConnector config_db("CONFIG_DB", 0); - DBConnector state_db("STATE_DB", 0); - - // Get switch_type - getCfgSwitchType(&config_db, gMySwitchType); - if (gMySwitchType != "fabric" && gMacAddress) { attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; @@ -563,6 +578,10 @@ int main(int argc, char **argv) attr.id = SAI_SWITCH_ATTR_TYPE; attr.value.u32 = SAI_SWITCH_TYPE_FABRIC; attrs.push_back(attr); + + attr.id = SAI_SWITCH_ATTR_SWITCH_ID; + attr.value.u32 = gVoqMySwitchId; + attrs.push_back(attr); } /* Must be last Attribute */ @@ -570,7 +589,16 @@ int main(int argc, char **argv) attr.value.u64 = gSwitchId; attrs.push_back(attr); - if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet") + auto delay_factor = 1; + bool asan_enabled = false; + + if (getenv("ASAN_OPTIONS")) + { + asan_enabled = true; + delay_factor = 2; + } + + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu" || asan_enabled) { /* We set this long timeout in order for orchagent to wait enough time for * response from syncd. It is needed since switch create takes more time @@ -578,7 +606,7 @@ int main(int argc, char **argv) * and systems ports to initialize */ - if (gMySwitchType == "voq" || gMySwitchType == "chassis-packet") + if (gMySwitchType == "voq" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu") { attr.value.u64 = (5 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); } @@ -586,7 +614,12 @@ int main(int argc, char **argv) { attr.value.u64 = (10 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); } + else + { + attr.value.u64 = SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT; + } + attr.value.u64 = attr.value.u64*delay_factor; attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); @@ -604,11 +637,11 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create a switch, rv:%d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } SWSS_LOG_NOTICE("Create a switch, id:%" PRIu64, gSwitchId); - if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet") + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu") { /* Set syncd response timeout back to the default value */ attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; @@ -635,7 +668,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to get MAC address from switch, rv:%d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } else { @@ -650,7 +683,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Fail to get switch virtual router ID %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } gVirtualRouterId = attr.value.oid; @@ -692,7 +725,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create underlay router interface %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } SWSS_LOG_NOTICE("Created underlay router interface ID %" PRIx64, gUnderlayIfId); @@ -705,15 +738,17 @@ int main(int argc, char **argv) shared_ptr orchDaemon; if (gMySwitchType != "fabric") { - orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get()); + orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get(), zmq_server.get()); if (gMySwitchType == "voq") { orchDaemon->setFabricEnabled(true); + orchDaemon->setFabricPortStatEnabled(true); + orchDaemon->setFabricQueueStatEnabled(false); } } else { - orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get()); + orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get(), zmq_server.get()); } if (!orchDaemon->init()) diff --git a/orchagent/mirrororch.cpp b/orchagent/mirrororch.cpp index 75ff671fc8..d2981330e4 100644 --- a/orchagent/mirrororch.cpp +++ b/orchagent/mirrororch.cpp @@ -329,7 +329,7 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) vector portv; int portCount = 0; m_portsOrch->getLagMember(port, portv); - for (const auto p : portv) + for (const auto &p : portv) { if (checkPortExistsInSrcPortList(p.m_alias, srcPortList)) { @@ -361,7 +361,10 @@ bool MirrorOrch::isHwResourcesAvailable() ); if (status != SAI_STATUS_SUCCESS) { - if (status == SAI_STATUS_NOT_SUPPORTED) + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) { SWSS_LOG_WARN("Mirror session resource availability monitoring is not supported. Skipping ..."); return true; @@ -583,13 +586,30 @@ void MirrorOrch::setSessionState(const string& name, const MirrorEntry& session, if (attr.empty() || attr == MIRROR_SESSION_MONITOR_PORT) { Port port; - m_portsOrch->getPort(session.neighborInfo.portId, port); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + if (!m_portsOrch->getRecircPort(port, Port::Role::Rec)) + { + SWSS_LOG_ERROR("Failed to get recirc port for mirror session %s", name.c_str()); + return; + } + } + else + { + m_portsOrch->getPort(session.neighborInfo.portId, port); + } fvVector.emplace_back(MIRROR_SESSION_MONITOR_PORT, port.m_alias); } if (attr.empty() || attr == MIRROR_SESSION_DST_MAC_ADDRESS) { - value = session.neighborInfo.mac.to_string(); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + value = gMacAddress.to_string(); + } else + { + value = session.neighborInfo.mac.to_string(); + } fvVector.emplace_back(MIRROR_SESSION_DST_MAC_ADDRESS, value); } @@ -808,7 +828,7 @@ bool MirrorOrch::setUnsetPortMirror(Port port, { vector portv; m_portsOrch->getLagMember(port, portv); - for (const auto p : portv) + for (const auto &p : portv) { if (p.m_type != Port::PHY) { @@ -926,9 +946,9 @@ bool MirrorOrch::activateSession(const string& name, MirrorEntry& session) if (gMySwitchType == "voq") { Port recirc_port; - if (!m_portsOrch->getRecircPort(recirc_port, "Rec")) + if (!m_portsOrch->getRecircPort(recirc_port, Port::Role::Rec)) { - SWSS_LOG_ERROR("Failed to get recirc prot"); + SWSS_LOG_ERROR("Failed to get recirc port"); return false; } attr.value.oid = recirc_port.m_port_id; @@ -999,9 +1019,9 @@ bool MirrorOrch::activateSession(const string& name, MirrorEntry& session) attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; // Use router mac as mirror dst mac in voq switch. - if (gMySwitchType == "voq") + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) { - memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); + memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); } else { @@ -1115,13 +1135,19 @@ bool MirrorOrch::updateSessionDstMac(const string& name, MirrorEntry& session) sai_attribute_t attr; attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; - memcpy(attr.value.mac, session.neighborInfo.mac.getMac(), sizeof(sai_mac_t)); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); + } else + { + memcpy(attr.value.mac, session.neighborInfo.mac.getMac(), sizeof(sai_mac_t)); + } sai_status_t status = sai_mirror_api->set_mirror_session_attribute(session.sessionId, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to update mirror session %s destination MAC to %s, rv:%d", - name.c_str(), session.neighborInfo.mac.to_string().c_str(), status); + name.c_str(), sai_serialize_mac(attr.value.mac).c_str(), status); task_process_status handle_status = handleSaiSetStatus(SAI_API_MIRROR, status); if (handle_status != task_success) { @@ -1130,7 +1156,7 @@ bool MirrorOrch::updateSessionDstMac(const string& name, MirrorEntry& session) } SWSS_LOG_NOTICE("Update mirror session %s destination MAC to %s", - name.c_str(), session.neighborInfo.mac.to_string().c_str()); + name.c_str(), sai_serialize_mac(attr.value.mac).c_str()); setSessionState(name, session, MIRROR_SESSION_DST_MAC_ADDRESS); @@ -1148,7 +1174,20 @@ bool MirrorOrch::updateSessionDstPort(const string& name, MirrorEntry& session) sai_attribute_t attr; attr.id = SAI_MIRROR_SESSION_ATTR_MONITOR_PORT; - attr.value.oid = session.neighborInfo.portId; + // Set monitor port to recirc port in voq switch. + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + if (!m_portsOrch->getRecircPort(port, Port::Role::Rec)) + { + SWSS_LOG_ERROR("Failed to get recirc port for mirror session %s", name.c_str()); + return false; + } + attr.value.oid = port.m_port_id; + } + else + { + attr.value.oid = session.neighborInfo.portId; + } sai_status_t status = sai_mirror_api-> set_mirror_session_attribute(session.sessionId, &attr); diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 296d5a3cf3..ea3ade347c 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -24,6 +24,7 @@ #include "routeorch.h" #include "fdborch.h" #include "qosorch.h" +#include "warm_restart.h" /* Global variables */ extern Directory gDirectory; @@ -116,6 +117,10 @@ static sai_status_t create_route(IpPrefix &pfx, sai_object_id_t nh) sai_status_t status = sai_route_api->create_route_entry(&route_entry, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_NOTICE("Tunnel route to %s already exists", pfx.to_string().c_str()); + return SAI_STATUS_SUCCESS; + } SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", pfx.getIp().to_string().c_str(), nh, status); return status; @@ -145,6 +150,10 @@ static sai_status_t remove_route(IpPrefix &pfx) sai_status_t status = sai_route_api->remove_route_entry(&route_entry); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_NOTICE("Tunnel route to %s already removed", pfx.to_string().c_str()); + return SAI_STATUS_SUCCESS; + } SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", pfx.getIp().to_string().c_str(), status); return status; @@ -163,6 +172,34 @@ static sai_status_t remove_route(IpPrefix &pfx) return status; } +/** + * @brief sets the given route to point to the given nexthop + * @param pfx IpPrefix of the route + * @param nexthop NextHopKey of the nexthop + * @return SAI_STATUS_SUCCESS on success + */ +static sai_status_t set_route(const IpPrefix& pfx, sai_object_id_t next_hop_id) +{ + /* set route entry to point to nh */ + sai_route_entry_t route_entry; + sai_attribute_t route_attr; + + route_entry.vr_id = gVirtualRouterId; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, pfx); + + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = next_hop_id; + + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s nh %" PRIx64 " rv:%d", + pfx.to_string().c_str(), next_hop_id, status); + } + return status; +} + static sai_object_id_t create_tunnel( const IpAddress* p_dst_ip, const IpAddress* p_src_ip, @@ -212,6 +249,10 @@ static sai_object_id_t create_tunnel( attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); + attr.id = SAI_TUNNEL_ATTR_DECAP_TTL_MODE; + attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; + tunnel_attrs.push_back(attr); + if (dscp_mode_name == "uniform" || dscp_mode_name == "pipe") { sai_tunnel_dscp_mode_t dscp_mode; @@ -226,6 +267,10 @@ static sai_object_id_t create_tunnel( attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; attr.value.s32 = dscp_mode; tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_DECAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); } attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; @@ -351,8 +396,8 @@ static bool remove_nh_tunnel(sai_object_id_t nh_id, IpAddress& ipAddr) return true; } -MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, std::set skip_neighbors) - :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip), skip_neighbors_(skip_neighbors) +MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, MuxCableType cable_type) + :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip), cable_type_(cable_type) { mux_orch_ = gDirectory.get(); mux_cb_orch_ = gDirectory.get(); @@ -365,9 +410,18 @@ MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress state_machine_handlers_.insert(handler_pair(MUX_STATE_INIT_STANDBY, &MuxCable::stateStandby)); state_machine_handlers_.insert(handler_pair(MUX_STATE_ACTIVE_STANDBY, &MuxCable::stateStandby)); - /* Set initial state to "standby" */ - stateStandby(); - state_ = MuxState::MUX_STATE_STANDBY; + if (WarmStart::isWarmStart()) { + /* Warmboot case, Set initial state to "init" + * State will be updated to previous value upon APP DB sync + */ + state_ = MuxState::MUX_STATE_INIT; + } + else + { + /* Set initial state to "standby" */ + stateStandby(); + state_ = MuxState::MUX_STATE_STANDBY; + } } bool MuxCable::stateInitActive() @@ -443,19 +497,25 @@ void MuxCable::setState(string new_state) new_state = muxStateValToString.at(ns); auto it = muxStateTransition.find(make_pair(state_, ns)); - if (it == muxStateTransition.end()) { // Update HW Mux cable state anyways mux_cb_orch_->updateMuxState(mux_name_, new_state); - SWSS_LOG_ERROR("State transition from %s to %s is not-handled ", - muxStateValToString.at(state_).c_str(), new_state.c_str()); + if (strcmp(new_state.c_str(), muxStateValToString.at(state_).c_str()) == 0) + { + SWSS_LOG_NOTICE("[%s] Maintaining current MUX state", mux_name_.c_str()); + } + else + { + SWSS_LOG_ERROR("State transition from %s to %s is not-handled ", + muxStateValToString.at(state_).c_str(), new_state.c_str()); + } return; } mux_cb_orch_->updateMuxMetricState(mux_name_, new_state, true); - MuxState state = state_; + prev_state_ = state_; state_ = ns; st_chg_in_progress_ = true; @@ -463,7 +523,7 @@ void MuxCable::setState(string new_state) if (!(this->*(state_machine_handlers_[it->second]))()) { //Reset back to original state - state_ = state; + state_ = prev_state_; st_chg_in_progress_ = false; st_chg_failed_ = true; throw std::runtime_error("Failed to handle state transition"); @@ -479,6 +539,51 @@ void MuxCable::setState(string new_state) return; } +void MuxCable::rollbackStateChange() +{ + if (prev_state_ == MuxState::MUX_STATE_FAILED || prev_state_ == MuxState::MUX_STATE_PENDING) + { + SWSS_LOG_ERROR("[%s] Rollback to %s not supported", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + return; + } + SWSS_LOG_WARN("[%s] Rolling back state change to %s", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + mux_cb_orch_->updateMuxMetricState(mux_name_, muxStateValToString.at(prev_state_), true); + st_chg_in_progress_ = true; + state_ = prev_state_; + bool success = false; + switch (prev_state_) + { + case MuxState::MUX_STATE_ACTIVE: + success = stateActive(); + break; + case MuxState::MUX_STATE_INIT: + case MuxState::MUX_STATE_STANDBY: + success = stateStandby(); + break; + case MuxState::MUX_STATE_FAILED: + case MuxState::MUX_STATE_PENDING: + // Check at the start of the function means we will never reach here + SWSS_LOG_ERROR("[%s] Rollback to %s not supported", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + return; + } + st_chg_in_progress_ = false; + if (success) + { + st_chg_failed_ = false; + } + else + { + st_chg_failed_ = true; + SWSS_LOG_ERROR("[%s] Rollback to %s failed", + mux_name_.c_str(), muxStateValToString.at(prev_state_).c_str()); + } + mux_cb_orch_->updateMuxMetricState(mux_name_, muxStateValToString.at(state_), false); + mux_cb_orch_->updateMuxState(mux_name_, muxStateValToString.at(state_)); +} + string MuxCable::getState() { SWSS_LOG_INFO("Get state request for %s, state %s", @@ -489,6 +594,11 @@ string MuxCable::getState() bool MuxCable::aclHandler(sai_object_id_t port, string alias, bool add) { + if (cable_type_ == MuxCableType::ACTIVE_ACTIVE) + { + SWSS_LOG_INFO("Skip programming ACL for mux port %s, cable type %d, add %d", alias.c_str(), cable_type_, add); + return true; + } if (add) { acl_handler_ = make_shared(port, alias); @@ -515,9 +625,13 @@ bool MuxCable::isIpInSubnet(IpAddress ip) bool MuxCable::nbrHandler(bool enable, bool update_rt) { + bool ret; + SWSS_LOG_NOTICE("Processing neighbors for mux %s, enable %d, state %d", + mux_name_.c_str(), enable, state_); if (enable) { - return nbr_handler_->enable(update_rt); + ret = nbr_handler_->enable(update_rt); + updateRoutes(); } else { @@ -527,19 +641,17 @@ bool MuxCable::nbrHandler(bool enable, bool update_rt) SWSS_LOG_INFO("Null NH object id, retry for %s", peer_ip4_.to_string().c_str()); return false; } - - return nbr_handler_->disable(tnh); + updateRoutes(); + ret = nbr_handler_->disable(tnh); } + return ret; } void MuxCable::updateNeighbor(NextHopKey nh, bool add) { + SWSS_LOG_NOTICE("Processing update on neighbor %s for mux %s, add %d, state %d", + nh.ip_address.to_string().c_str(), mux_name_.c_str(), add, state_); sai_object_id_t tnh = mux_orch_->getNextHopTunnelId(MUX_TUNNEL, peer_ip4_); - if (add && skip_neighbors_.find(nh.ip_address) != skip_neighbors_.end()) - { - SWSS_LOG_INFO("Skip update neighbor %s on %s", nh.ip_address.to_string().c_str(), nh.alias.c_str()); - return; - } nbr_handler_->update(nh, tnh, add, state_); if (add) { @@ -549,14 +661,38 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) { mux_orch_->removeNexthop(nh); } + updateRoutes(); +} + +/** + * @brief updates all routes pointing to the cables neighbor list + */ +void MuxCable::updateRoutes() +{ + SWSS_LOG_INFO("Updating routes pointing to multiple mux nexthops"); + MuxNeighbor neighbors = nbr_handler_->getNeighbors(); + string alias = nbr_handler_->getAlias(); + for (auto nh = neighbors.begin(); nh != neighbors.end(); nh ++) + { + std::set routes; + NextHopKey nhkey(nh->first, alias); + if (gRouteOrch->getRoutesForNexthop(routes, nhkey)) + { + for (auto rt = routes.begin(); rt != routes.end(); rt++) + { + mux_orch_->updateRoute(rt->prefix, true); + } + } + } } void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, MuxState state) { + uint32_t num_routes = 0; + SWSS_LOG_INFO("Neigh %s on %s, add %d, state %d", nh.ip_address.to_string().c_str(), nh.alias.c_str(), add, state); - MuxCableOrch* mux_cb_orch = gDirectory.get(); IpPrefix pfx = nh.ip_address.to_string(); if (add) @@ -579,11 +715,12 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu case MuxState::MUX_STATE_ACTIVE: neighbors_[nh.ip_address] = gNeighOrch->getLocalNextHopId(nh); gNeighOrch->enableNeighbor(nh); + gRouteOrch->updateNextHopRoutes(nh, num_routes); break; case MuxState::MUX_STATE_STANDBY: neighbors_[nh.ip_address] = tunnelId; gNeighOrch->disableNeighbor(nh); - mux_cb_orch->addTunnelRoute(nh); + updateTunnelRoute(nh, true); create_route(pfx, tunnelId); break; default: @@ -598,7 +735,7 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu if (state == MuxState::MUX_STATE_STANDBY) { remove_route(pfx); - mux_cb_orch->removeTunnelRoute(nh); + updateTunnelRoute(nh, false); } neighbors_.erase(nh.ip_address); } @@ -607,7 +744,6 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; - MuxCableOrch* mux_cb_orch = gDirectory.get(); auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -663,7 +799,7 @@ bool MuxNbrHandler::enable(bool update_rt) { return false; } - mux_cb_orch->removeTunnelRoute(nh_key); + updateTunnelRoute(nh_key, false); } it++; @@ -675,7 +811,6 @@ bool MuxNbrHandler::enable(bool update_rt) bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; - MuxCableOrch* mux_cb_orch = gDirectory.get(); auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -714,18 +849,18 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) return false; } - neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->disableNeighbor(neigh)) + updateTunnelRoute(nh_key, true); + + IpPrefix pfx = it->first.to_string(); + if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) { - SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); return false; } - mux_cb_orch->addTunnelRoute(nh_key); - - IpPrefix pfx = it->first.to_string(); - if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) + neigh = NeighborEntry(it->first, alias_); + if (!gNeighOrch->disableNeighbor(neigh)) { + SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); return false; } @@ -746,52 +881,66 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } -std::map MuxAclHandler::acl_table_; +void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) +{ + MuxOrch* mux_orch = gDirectory.get(); + MuxCableOrch* mux_cb_orch = gDirectory.get(); + + if (mux_orch->isSkipNeighbor(nh.ip_address)) + { + SWSS_LOG_INFO("Skip updating neighbor %s, add %d", nh.ip_address.to_string().c_str(), add); + return; + } + + if (add) + { + mux_cb_orch->addTunnelRoute(nh); + } + else + { + mux_cb_orch->removeTunnelRoute(nh); + } +} MuxAclHandler::MuxAclHandler(sai_object_id_t port, string alias) { SWSS_LOG_ENTER(); + string value; + shared_ptr m_config_db = shared_ptr(new DBConnector("CONFIG_DB", 0)); + unique_ptr
m_systemDefaultsTable = unique_ptr
(new Table(m_config_db.get(), "SYSTEM_DEFAULTS")); + m_systemDefaultsTable->hget("mux_tunnel_egress_acl", "status", value); + is_ingress_acl_ = value != "enabled"; + // There is one handler instance per MUX port - string table_name = MUX_ACL_TABLE_NAME; + string table_name = is_ingress_acl_ ? MUX_ACL_TABLE_NAME : EGRESS_TABLE_DROP; string rule_name = MUX_ACL_RULE_NAME; port_ = port; alias_ = alias; - auto found = acl_table_.find(table_name); - if (found == acl_table_.end()) - { - SWSS_LOG_NOTICE("First time create for port %" PRIx64 "", port); + // Always try to create the table first. If it already exists, function will return early. + createMuxAclTable(port, table_name); - // First time handling of Mux Table, create ACL table, and bind - createMuxAclTable(port, table_name); + SWSS_LOG_NOTICE("Binding port %" PRIx64 "", port); + + AclRule* rule = gAclOrch->getAclRule(table_name, rule_name); + if (rule == nullptr) + { shared_ptr newRule = - make_shared(gAclOrch, rule_name, table_name); + make_shared(gAclOrch, rule_name, table_name, false /*no counters*/); createMuxAclRule(newRule, table_name); } else { - SWSS_LOG_NOTICE("Binding port %" PRIx64 "", port); - - AclRule* rule = gAclOrch->getAclRule(table_name, rule_name); - if (rule == nullptr) - { - shared_ptr newRule = - make_shared(gAclOrch, rule_name, table_name); - createMuxAclRule(newRule, table_name); - } - else - { - gAclOrch->updateAclRule(table_name, rule_name, MATCH_IN_PORTS, &port, RULE_OPER_ADD); - } + gAclOrch->updateAclRule(table_name, rule_name, MATCH_IN_PORTS, &port, RULE_OPER_ADD); } } MuxAclHandler::~MuxAclHandler(void) { SWSS_LOG_ENTER(); - string table_name = MUX_ACL_TABLE_NAME; + string table_name = is_ingress_acl_ ? MUX_ACL_TABLE_NAME : EGRESS_TABLE_DROP; string rule_name = MUX_ACL_RULE_NAME; SWSS_LOG_NOTICE("Un-Binding port %" PRIx64 "", port_); @@ -817,27 +966,20 @@ void MuxAclHandler::createMuxAclTable(sai_object_id_t port, string strTable) { SWSS_LOG_ENTER(); - auto inserted = acl_table_.emplace(piecewise_construct, - std::forward_as_tuple(strTable), - std::forward_as_tuple(gAclOrch, strTable)); - - assert(inserted.second); - - AclTable& acl_table = inserted.first->second; - sai_object_id_t table_oid = gAclOrch->getTableById(strTable); if (table_oid != SAI_NULL_OBJECT_ID) { // DROP ACL table is already created - SWSS_LOG_NOTICE("ACL table %s exists, reuse the same", strTable.c_str()); - acl_table = *(gAclOrch->getTableByOid(table_oid)); + SWSS_LOG_INFO("ACL table %s exists, reuse the same", strTable.c_str()); return; } + SWSS_LOG_NOTICE("First time create for port %" PRIx64 "", port); + AclTable acl_table(gAclOrch, strTable); auto dropType = gAclOrch->getAclTableType(TABLE_TYPE_DROP); assert(dropType); acl_table.validateAddType(*dropType); - acl_table.stage = ACL_STAGE_INGRESS; + acl_table.stage = is_ingress_acl_ ? ACL_STAGE_INGRESS : ACL_STAGE_EGRESS; gAclOrch->addAclTable(acl_table); bindAllPorts(acl_table); } @@ -879,6 +1021,13 @@ void MuxAclHandler::bindAllPorts(AclTable &acl_table) acl_table.link(port.m_port_id); acl_table.bind(port.m_port_id); } + else if (port.m_type == Port::LAG && !is_ingress_acl_) + { + SWSS_LOG_INFO("Binding LAG %" PRIx64 " to ACL table %s", port.m_lag_id, acl_table.id.c_str()); + + acl_table.link(port.m_lag_id); + acl_table.bind(port.m_lag_id); + } } } @@ -938,6 +1087,93 @@ sai_object_id_t MuxOrch::getNextHopTunnelId(std::string tunnelKey, IpAddress& ip return it->second.nh_id; } +/** + * @brief updates the given route to point to a single active NH or tunnel + * @param pfx IpPrefix of route to update + * @param remove bool only true when route is getting removed + */ +void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) +{ + NextHopGroupKey nhg_key; + NextHopGroupEntry nhg_entry; + + if (!add) + { + SWSS_LOG_INFO("Removing route %s from mux_multi_active_nh_table", + pfx.to_string().c_str()); + return; + } + + /* get nexthop group key from syncd */ + nhg_key = gRouteOrch->getSyncdRouteNhgKey(gVirtualRouterId, pfx); + + /* check for multi-nh neighbors. + * if none are present, ignore + */ + if (nhg_key.getSize() <= 1) + { + SWSS_LOG_INFO("Route points to single nexthop, ignoring"); + return; + } + + std::set nextHops; + sai_object_id_t next_hop_id; + sai_status_t status; + bool active_found = false; + + /* get nexthops from nexthop group */ + nextHops = nhg_key.getNextHops(); + + SWSS_LOG_NOTICE("Updating route %s pointing to Mux nexthops %s", + pfx.to_string().c_str(), nhg_key.to_string().c_str()); + + for (auto it = nextHops.begin(); it != nextHops.end(); it++) + { + NextHopKey nexthop = *it; + /* This will only work for configured MUX neighbors (most cases) + * TODO: add way to find MUX from neighbor + */ + MuxCable* cable = findMuxCableInSubnet(nexthop.ip_address); + auto standalone = standalone_tunnel_neighbors_.find(nexthop.ip_address); + + if ((cable == nullptr && standalone == standalone_tunnel_neighbors_.end()) || + cable->isActive()) + { + /* Here we pull from local nexthop ID because neighbor update occurs during state change + * before nexthopID is updated in neighorch. This ensures that if a neighbor is Active + * only that neighbor's nexthop ID is added, and not the tunnel nexthop + */ + next_hop_id = gNeighOrch->getLocalNextHopId(nexthop); + /* set route entry to point to nh */ + status = set_route(pfx, next_hop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s to nexthop %s", + pfx.to_string().c_str(), nexthop.to_string().c_str()); + continue; + } + SWSS_LOG_NOTICE("setting route %s with nexthop %s %" PRIx64 "", + pfx.to_string().c_str(), nexthop.to_string().c_str(), next_hop_id); + active_found = true; + break; + } + } + + if (!active_found) + { + next_hop_id = getNextHopTunnelId(MUX_TUNNEL, mux_peer_switch_); + /* no active nexthop found, point to first */ + SWSS_LOG_INFO("No Active neighbors found, setting route %s to point to tun", + pfx.getIp().to_string().c_str()); + status = set_route(pfx, next_hop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s to tunnel", + pfx.getIp().to_string().c_str()); + } + } +} + MuxCable* MuxOrch::findMuxCableInSubnet(IpAddress ip) { for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) @@ -963,7 +1199,7 @@ bool MuxOrch::isNeighborActive(const IpAddress& nbr, const MacAddress& mac, stri if (ptr) { - return (ptr->isActive() || ptr->isSkipNeighbor(nbr)); + return ptr->isActive(); } string port; @@ -977,7 +1213,7 @@ bool MuxOrch::isNeighborActive(const IpAddress& nbr, const MacAddress& mac, stri if (!port.empty() && isMuxExists(port)) { MuxCable* ptr = getMuxCable(port); - return (ptr->isActive() || ptr->isSkipNeighbor(nbr)); + return ptr->isActive(); } NextHopKey nh_key = NextHopKey(nbr, alias); @@ -985,7 +1221,7 @@ bool MuxOrch::isNeighborActive(const IpAddress& nbr, const MacAddress& mac, stri if (port.empty() && !curr_port.empty() && isMuxExists(curr_port)) { MuxCable* ptr = getMuxCable(curr_port); - return (ptr->isActive() || ptr->isSkipNeighbor(nbr)); + return ptr->isActive(); } return true; @@ -1069,7 +1305,7 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) return; } - auto standalone_tunnel_neigh_it = standalone_tunnel_neighbors_.find(update.entry.ip_address); + bool is_tunnel_route_installed = isStandaloneTunnelRouteInstalled(update.entry.ip_address); // Handling zero MAC neighbor updates if (!update.mac) { @@ -1080,7 +1316,7 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) if (update.add) { - if (standalone_tunnel_neigh_it == standalone_tunnel_neighbors_.end()) + if (!is_tunnel_route_installed) { createStandaloneTunnelRoute(update.entry.ip_address); } @@ -1095,7 +1331,7 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) * make sure to remove any existing tunnel routes to prevent conflicts. * This block also covers the case of neighbor deletion. */ - if (standalone_tunnel_neigh_it != standalone_tunnel_neighbors_.end()) + if (is_tunnel_route_installed) { removeStandaloneTunnelRoute(update.entry.ip_address); } @@ -1166,6 +1402,37 @@ void MuxOrch::removeNexthop(NextHopKey nh) mux_nexthop_tb_.erase(nh); } +/** + * @brief checks if mux nexthop tb contains nexthop + * @param nexthop NextHopKey + * @return true if a mux contains the nexthop + */ +bool MuxOrch::containsNextHop(const NextHopKey& nexthop) +{ + return mux_nexthop_tb_.find(nexthop) != mux_nexthop_tb_.end(); +} + +/** + * @brief checks if a given nexthop group belongs to a mux + * @param nextHops NextHopGroupKey + * @return true if a mux contains any of the nexthops in the group + * false if none of the nexthops belong to a mux + */ +bool MuxOrch::isMuxNexthops(const NextHopGroupKey& nextHops) +{ + const std::set s_nexthops = nextHops.getNextHops(); + for (auto it = s_nexthops.begin(); it != s_nexthops.end(); it ++) + { + if (this->containsNextHop(*it)) + { + SWSS_LOG_INFO("Found mux nexthop %s", it->to_string().c_str()); + return true; + } + } + SWSS_LOG_INFO("No mux nexthop found"); + return false; +} + string MuxOrch::getNexthopMuxName(NextHopKey nh) { if (mux_nexthop_tb_.find(nh) == mux_nexthop_tb_.end()) @@ -1245,6 +1512,7 @@ bool MuxOrch::handleMuxCfg(const Request& request) auto srv_ip = request.getAttrIpPrefix("server_ipv4"); auto srv_ip6 = request.getAttrIpPrefix("server_ipv6"); + MuxCableType cable_type = MuxCableType::ACTIVE_STANDBY; std::set skip_neighbors; const auto& port_name = request.getKeyString(0); @@ -1264,6 +1532,14 @@ bool MuxOrch::handleMuxCfg(const Request& request) SWSS_LOG_NOTICE("%s: %s was added to ignored neighbor list", port_name.c_str(), soc_ip6.getIp().to_string().c_str()); skip_neighbors.insert(soc_ip6.getIp()); } + else if (name == "cable_type") + { + auto cable_type_str = request.getAttrString("cable_type"); + if (cable_type_str == "active-active") + { + cable_type = MuxCableType::ACTIVE_ACTIVE; + } + } } if (op == SET_COMMAND) @@ -1281,9 +1557,10 @@ bool MuxOrch::handleMuxCfg(const Request& request) } mux_cable_tb_[port_name] = std::make_unique - (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_, skip_neighbors)); + (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_, cable_type)); + addSkipNeighbors(skip_neighbors); - SWSS_LOG_NOTICE("Mux entry for port '%s' was added", port_name.c_str()); + SWSS_LOG_NOTICE("Mux entry for port '%s' was added, cable type %d", port_name.c_str(), cable_type); } else { @@ -1293,6 +1570,7 @@ bool MuxOrch::handleMuxCfg(const Request& request) return true; } + removeSkipNeighbors(skip_neighbors); mux_cable_tb_.erase(port_name); SWSS_LOG_NOTICE("Mux cable for port '%s' was removed", port_name.c_str()); @@ -1428,6 +1706,11 @@ void MuxOrch::removeStandaloneTunnelRoute(IpAddress neighborIp) standalone_tunnel_neighbors_.erase(neighborIp); } +bool MuxOrch::isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp) +{ + return standalone_tunnel_neighbors_.find(neighborIp) != standalone_tunnel_neighbors_.end(); +} + MuxCableOrch::MuxCableOrch(DBConnector *db, DBConnector *sdb, const std::string& tableName): Orch2(db, tableName, request_), app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME), @@ -1521,10 +1804,25 @@ bool MuxCableOrch::addOperation(const Request& request) { mux_obj->setState(state); } - catch(const std::runtime_error& error) + catch(const std::runtime_error& e) { SWSS_LOG_ERROR("Mux Error setting state %s for port %s. Error: %s", - state.c_str(), port_name.c_str(), error.what()); + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); + return true; + } + catch (const std::logic_error& e) + { + SWSS_LOG_ERROR("Logic error while setting state %s for port %s. Error: %s", + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); + return true; + } + catch (const std::exception& e) + { + SWSS_LOG_ERROR("Exception caught while setting state %s for port %s. Error: %s", + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); return true; } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index ff66e67ff3..22f01ce27d 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -29,6 +29,12 @@ enum MuxStateChange MUX_STATE_UNKNOWN_STATE }; +enum MuxCableType +{ + ACTIVE_STANDBY, + ACTIVE_ACTIVE +}; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -46,9 +52,8 @@ class MuxAclHandler void createMuxAclRule(shared_ptr rule, string strTable); void bindAllPorts(AclTable &acl_table); - // class shared dict: ACL table name -> ACL table - static std::map acl_table_; sai_object_id_t port_ = SAI_NULL_OBJECT_ID; + bool is_ingress_acl_ = true; string alias_; }; @@ -66,6 +71,11 @@ class MuxNbrHandler void update(NextHopKey nh, sai_object_id_t, bool = true, MuxState = MuxState::MUX_STATE_INIT); sai_object_id_t getNextHopId(const NextHopKey); + MuxNeighbor getNeighbors() const { return neighbors_; }; + string getAlias() const { return alias_; }; + +private: + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; @@ -76,7 +86,7 @@ class MuxNbrHandler class MuxCable { public: - MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, std::set skip_neighbors); + MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, MuxCableType cable_type); bool isActive() const { @@ -87,20 +97,18 @@ class MuxCable using state_machine_handlers = map; void setState(string state); + void rollbackStateChange(); string getState(); bool isStateChangeInProgress() { return st_chg_in_progress_; } bool isStateChangeFailed() { return st_chg_failed_; } bool isIpInSubnet(IpAddress ip); void updateNeighbor(NextHopKey nh, bool add); + void updateRoutes(); sai_object_id_t getNextHopId(const NextHopKey nh) { return nbr_handler_->getNextHopId(nh); } - bool isSkipNeighbor(const IpAddress& nbr) - { - return (skip_neighbors_.find(nbr) != skip_neighbors_.end()); - } private: bool stateActive(); @@ -111,16 +119,16 @@ class MuxCable bool nbrHandler(bool enable, bool update_routes = true); string mux_name_; + MuxCableType cable_type_; MuxState state_ = MuxState::MUX_STATE_INIT; + MuxState prev_state_; bool st_chg_in_progress_ = false; bool st_chg_failed_ = false; IpPrefix srv_ip4_, srv_ip6_; IpAddress peer_ip4_; - std::set skip_neighbors_; - MuxOrch *mux_orch_; MuxCableOrch *mux_cb_orch_; MuxStateOrch *mux_state_orch_; @@ -154,6 +162,7 @@ typedef std::unique_ptr MuxCable_T; typedef std::map MuxCableTb; typedef std::map MuxTunnelNHs; typedef std::map NextHopTb; +typedef std::map MuxRouteTb; class MuxCfgRequest : public Request { @@ -180,12 +189,19 @@ class MuxOrch : public Orch2, public Observer, public Subject return mux_cable_tb_.at(portName).get(); } + bool isSkipNeighbor(const IpAddress& nbr) + { + return (skip_neighbors_.find(nbr) != skip_neighbors_.end()); + } + MuxCable* findMuxCableInSubnet(IpAddress); bool isNeighborActive(const IpAddress&, const MacAddress&, string&); void update(SubjectType, void *); void addNexthop(NextHopKey, string = ""); void removeNexthop(NextHopKey); + bool containsNextHop(const NextHopKey&); + bool isMuxNexthops(const NextHopGroupKey&); string getNexthopMuxName(NextHopKey); sai_object_id_t getNextHopId(const NextHopKey&); @@ -193,6 +209,9 @@ class MuxOrch : public Orch2, public Observer, public Subject bool removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); sai_object_id_t getNextHopTunnelId(std::string tunnelKey, IpAddress& ipAddr); + void updateRoute(const IpPrefix &pfx, bool add); + bool isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -212,6 +231,19 @@ class MuxOrch : public Orch2, public Observer, public Subject void createStandaloneTunnelRoute(IpAddress neighborIp); void removeStandaloneTunnelRoute(IpAddress neighborIp); + void addSkipNeighbors(const std::set &neighbors) + { + skip_neighbors_.insert(neighbors.begin(), neighbors.end()); + } + + void removeSkipNeighbors(const std::set &neighbors) + { + for (const IpAddress &neighbor : neighbors) + { + skip_neighbors_.erase(neighbor); + } + } + IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; @@ -227,6 +259,7 @@ class MuxOrch : public Orch2, public Observer, public Subject MuxCfgRequest request_; std::set standalone_tunnel_neighbors_; + std::set skip_neighbors_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/natorch.cpp b/orchagent/natorch.cpp index d7f124a28e..c19f2d7823 100644 --- a/orchagent/natorch.cpp +++ b/orchagent/natorch.cpp @@ -106,8 +106,7 @@ NatOrch::NatOrch(DBConnector *appDb, DBConnector *stateDb, vectorfirst; NatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3564,13 +3519,11 @@ bool NatOrch::getNatCounters(const NatEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&nat_entry, 0, sizeof(nat_entry)); nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3627,8 +3580,8 @@ bool NatOrch::getTwiceNatCounters(const TwiceNatEntry::iterator &iter) const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3639,14 +3592,11 @@ bool NatOrch::getTwiceNatCounters(const TwiceNatEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -3678,9 +3628,9 @@ bool NatOrch::setNatCounters(const NatEntry::iterator &iter) { const IpAddress &ipAddr = iter->first; NatEntryValue &entry = iter->second; - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3690,12 +3640,8 @@ bool NatOrch::setNatCounters(const NatEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; - - memset(&nat_entry, 0, sizeof(nat_entry)); nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3762,8 +3708,8 @@ bool NatOrch::getNaptCounters(const NaptEntry::iterator &iter) NaptEntryValue &entry = iter->second; uint8_t protoType = ((naptKey.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3774,14 +3720,11 @@ bool NatOrch::getNaptCounters(const NaptEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&nat_entry, 0, sizeof(nat_entry)); - nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3848,8 +3791,8 @@ bool NatOrch::getTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3861,14 +3804,11 @@ bool NatOrch::getTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -3907,9 +3847,9 @@ bool NatOrch::setNaptCounters(const NaptEntry::iterator &iter) const NaptEntryKey &naptKey = iter->first; NaptEntryValue &entry = iter->second; uint8_t protoType = ((naptKey.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3920,13 +3860,9 @@ bool NatOrch::setNaptCounters(const NaptEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&nat_entry, 0, sizeof(nat_entry)); - nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -4002,9 +3938,9 @@ bool NatOrch::setTwiceNatCounters(const TwiceNatEntry::iterator &iter) { const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -4015,13 +3951,9 @@ bool NatOrch::setTwiceNatCounters(const TwiceNatEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4059,9 +3991,9 @@ bool NatOrch::setTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) const TwiceNaptEntryKey &key = iter->first; TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -4072,13 +4004,9 @@ bool NatOrch::setTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4211,8 +4139,9 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now NatEntryValue &entry = iter->second; uint32_t attr_count; IpAddress srcIp; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t snat_entry, dnat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t snat_entry = {}; + sai_nat_entry_t dnat_entry; sai_status_t status; if (entry.nat_type == "dnat") @@ -4233,7 +4162,6 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4241,8 +4169,6 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now attr_count = 2; - memset(&snat_entry, 0, sizeof(snat_entry)); - snat_entry.vr_id = gVirtualRouterId; snat_entry.switch_id = gSwitchId; snat_entry.nat_type = SAI_NAT_TYPE_SOURCE_NAT; @@ -4306,8 +4232,9 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n uint32_t attr_count; IpAddress srcIp; uint16_t srcPort; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t snat_entry, dnat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t snat_entry = {}; + sai_nat_entry_t dnat_entry; sai_status_t status; if (entry.nat_type == "dnat") @@ -4329,7 +4256,6 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4337,8 +4263,6 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n attr_count = 2; - memset(&snat_entry, 0, sizeof(snat_entry)); - snat_entry.vr_id = gVirtualRouterId; snat_entry.switch_id = gSwitchId; snat_entry.nat_type = SAI_NAT_TYPE_SOURCE_NAT; @@ -4417,8 +4341,8 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; if (entry.entry_type == "static") @@ -4434,7 +4358,6 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4442,8 +4365,6 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4472,8 +4393,8 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; if (entry.addedToHw == false) @@ -4489,7 +4410,6 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4497,8 +4417,6 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index eb4afc2193..a2bdebbc62 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -21,6 +21,7 @@ extern FgNhgOrch *gFgNhgOrch; extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; +extern BfdOrch *gBfdOrch; const int neighorch_pri = 30; @@ -35,6 +36,12 @@ NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, m_fdbOrch->attach(this); + // Some UTs instantiate NeighOrch but gBfdOrch is null, it is not null in orchagent + if (gBfdOrch) + { + gBfdOrch->attach(this); + } + if(gMySwitchType == "voq") { //Add subscriber to process VOQ system neigh @@ -151,6 +158,12 @@ void NeighOrch::update(SubjectType type, void *cntx) processFDBFlushUpdate(*update); break; } + case SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE: + { + BfdUpdate *update = static_cast(cntx); + updateNextHop (*update); + break; + } default: break; } @@ -255,6 +268,12 @@ bool NeighOrch::addNextHop(const NextHopKey &nh) sai_status_t status = sai_next_hop_api->create_next_hop(&next_hop_id, gSwitchId, (uint32_t)next_hop_attrs.size(), next_hop_attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("Next hop %s on %s already exists", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + return true; + } SWSS_LOG_ERROR("Failed to create next hop %s on %s, rv:%d", nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str(), status); task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP, status); @@ -279,7 +298,7 @@ bool NeighOrch::addNextHop(const NextHopKey &nh) next_hop_entry.nh_flags = 0; m_syncdNextHops[nexthop] = next_hop_entry; - m_intfsOrch->increaseRouterIntfsRefCount(nexthop.alias); + m_intfsOrch->increaseRouterIntfsRefCount(nh.alias); if (nexthop.isMplsNextHop()) { @@ -427,6 +446,62 @@ bool NeighOrch::ifChangeInformNextHop(const string &alias, bool if_up) return rc; } +void NeighOrch::updateNextHop(const BfdUpdate& update) +{ + SWSS_LOG_ENTER(); + bool rc = true; + + auto key = update.peer; + sai_bfd_session_state_t state = update.state; + + size_t found_vrf = key.find(state_db_key_delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_INFO("Failed to parse key %s, no vrf is given", key.c_str()); + return; + } + + size_t found_ifname = key.find(state_db_key_delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_INFO("Failed to parse key %s, no ifname is given", key.c_str()); + return; + } + + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + + if (alias != "default" || vrf_name != "default") + { + return; + } + + for (auto nhop = m_syncdNextHops.begin(); nhop != m_syncdNextHops.end(); ++nhop) + { + if (nhop->first.ip_address != peer_address) + { + continue; + } + + if (state == SAI_BFD_SESSION_STATE_UP) + { + SWSS_LOG_INFO("updateNextHop get BFD session UP event, key %s", key.c_str()); + rc = clearNextHopFlag(nhop->first, NHFLAGS_IFDOWN); + } + else + { + SWSS_LOG_INFO("updateNextHop get BFD session DOWN event, key %s", key.c_str()); + rc = setNextHopFlag(nhop->first, NHFLAGS_IFDOWN); + } + + if (!rc) + { + break; + } + } +} + bool NeighOrch::removeNextHop(const IpAddress &ipAddress, const string &alias) { SWSS_LOG_ENTER(); @@ -591,24 +666,51 @@ void NeighOrch::decreaseNextHopRefCount(const NextHopKey &nexthop, uint32_t coun assert(hasNextHop(nexthop)); if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end()) { + if ((m_syncdNextHops[nexthop].ref_count - (int)count) < 0) + { + SWSS_LOG_ERROR("Ref count cannot be negative for next_hop_id: 0x%" PRIx64 " with ip: %s and alias: %s", + m_syncdNextHops[nexthop].next_hop_id, nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + // Reset refcount to 0 to match expected value + m_syncdNextHops[nexthop].ref_count = 0; + return; + } m_syncdNextHops[nexthop].ref_count -= count; } } bool NeighOrch::getNeighborEntry(const NextHopKey &nexthop, NeighborEntry &neighborEntry, MacAddress &macAddress) { + Port inbp; + string nbr_alias; if (!hasNextHop(nexthop)) { return false; } + if (gMySwitchType == "voq") + { + gPortsOrch->getInbandPort(inbp); + assert(inbp.m_alias.length()); + } for (const auto &entry : m_syncdNeighbors) { - if (entry.first.ip_address == nexthop.ip_address && entry.first.alias == nexthop.alias) + if (entry.first.ip_address == nexthop.ip_address) { - neighborEntry = entry.first; - macAddress = entry.second.mac; - return true; + if (m_intfsOrch->isRemoteSystemPortIntf(entry.first.alias)) + { + //For remote system ports, nexthops are always on inband. + nbr_alias = inbp.m_alias; + } + else + { + nbr_alias = entry.first.alias; + } + if (nbr_alias == nexthop.alias) + { + neighborEntry = entry.first; + macAddress = entry.second.mac; + return true; + } } } @@ -712,17 +814,33 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } - if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() - || m_syncdNeighbors[neighbor_entry].mac != mac_address) + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); + if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { - // only for unresolvable neighbors that are new - if (!mac_address) + if (!mac_address) { - if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()) + if (nbr_not_found) { - addZeroMacTunnelRoute(neighbor_entry, mac_address); + // only for unresolvable neighbors that are new + if (addZeroMacTunnelRoute(neighbor_entry, mac_address)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + continue; + } + } + else + { + /* + * For neighbors that were previously resolvable but are now unresolvable, + * we expect such neighbor entries to be deleted prior to a zero MAC update + * arriving for that same neighbor. + */ + it = consumer.m_toSync.erase(it); } - it = consumer.m_toSync.erase(it); } else if (addNeighbor(neighbor_entry, mac_address)) { @@ -815,6 +933,27 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } } + PortsOrch* ports_orch = gDirectory.get(); + auto vlan_ports = ports_orch->getAllVlans(); + + for (auto vlan_port: vlan_ports) + { + if (vlan_port == alias) + { + continue; + } + NeighborEntry temp_entry = { ip_address, vlan_port }; + if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) + { + SWSS_LOG_NOTICE("Neighbor %s on %s already exists, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); + if (!removeNeighbor(temp_entry)) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + return false; + } + } + } + MuxOrch* mux_orch = gDirectory.get(); bool hw_config = isHwConfigured(neighborEntry); @@ -893,15 +1032,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } else if (isHwConfigured(neighborEntry)) { - status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &neighbor_attr); - if (status != SAI_STATUS_SUCCESS) + for (auto itr : neighbor_attrs) { - SWSS_LOG_ERROR("Failed to update neighbor %s on %s, rv:%d", - macAddress.to_string().c_str(), alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); - if (handle_status != task_success) + status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); + if (status != SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_ERROR("Failed to update neighbor %s on %s, attr.id=0x%x, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), itr.id, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } } SWSS_LOG_NOTICE("Updated neighbor %s on %s", macAddress.to_string().c_str(), alias.c_str()); @@ -968,7 +1110,7 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) /* When next hop is not found, we continue to remove neighbor entry. */ if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_ERROR("Failed to locate next hop %s on %s, rv:%d", + SWSS_LOG_NOTICE("Next hop %s on %s doesn't exist, rv:%d", ip_address.to_string().c_str(), alias.c_str(), status); } else @@ -1003,9 +1145,8 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) { if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_ERROR("Failed to locate neighbor %s on %s, rv:%d", + SWSS_LOG_NOTICE("Neighbor %s on %s already removed, rv:%d", m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); - return true; } else { @@ -1018,22 +1159,24 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) } } } - - if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); - } else { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); - } + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } - removeNextHop(ip_address, alias); - m_intfsOrch->decreaseRouterIntfsRefCount(alias); + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } } - SWSS_LOG_NOTICE("Removed neighbor %s on %s", - m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); /* Do not delete entry from cache if its disable request */ if (disable) @@ -1242,7 +1385,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) continue; } - MacAddress mac_address; + MacAddress mac_address, original_mac_address; uint32_t encap_index = 0; for (auto i = kfvFieldsValues(t).begin(); i != kfvFieldsValues(t).end(); i++) @@ -1298,6 +1441,17 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) { SWSS_LOG_NOTICE("VOQ encap index updated for neighbor %s", kfvKey(t).c_str()); it = consumer.m_toSync.erase(it); + + /* Remove remaining DEL operation in m_toSync for the same neighbor. + * Since DEL operation is supposed to be executed before SET for the same neighbor + * A remaining DEL after the SET operation means the DEL operation failed previously and should not be executed anymore + */ + auto rit = make_reverse_iterator(it); + while (rit != consumer.m_toSync.rend() && rit->first == key && kfvOp(rit->second) == DEL_COMMAND) + { + consumer.m_toSync.erase(next(rit).base()); + SWSS_LOG_NOTICE("Removed pending system neighbor DEL operation for %s after SET operation", key.c_str()); + } } continue; } @@ -1311,42 +1465,13 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) //kernel programming. if(ibif.m_type != Port::VLAN) { + original_mac_address = mac_address; mac_address = gMacAddress; - - // For VS platforms, the mac of the static neigh should not be same as asic's own mac. - // This is because host originated packets will have same mac for both src and dst which - // will result in host NOT sending packet out. To address this problem which is specific - // to port type inband interfaces, set the mac to the neighbor's owner asic's mac. Since - // the owner asic's mac is not readily avaiable here, the owner asic mac is derived from - // the switch id and lower 5 bytes of asic mac which is assumed to be same for all asics - // in the VS system. - // Therefore to make VOQ chassis systems work in VS platform based setups like the setups - // using KVMs, it is required that all asics have same base mac in the format given below - // :<6th byte = switch_id> - string platform = getenv("ASIC_VENDOR") ? getenv("ASIC_VENDOR") : ""; - + // For VS platform, use the original MAC address if (platform == VS_PLATFORM_SUBSTRING) { - int8_t sw_id = -1; - uint8_t egress_asic_mac[ETHER_ADDR_LEN]; - - gMacAddress.getMac(egress_asic_mac); - - if (p.m_type == Port::LAG) - { - sw_id = (int8_t) p.m_system_lag_info.switch_id; - } - else if (p.m_type == Port::PHY || p.m_type == Port::SYSTEM) - { - sw_id = (int8_t) p.m_system_port_info.switch_id; - } - - if(sw_id != -1) - { - egress_asic_mac[5] = sw_id; - mac_address = MacAddress(egress_asic_mac); - } + mac_address = original_mac_address; } } vector fvVector; @@ -1359,6 +1484,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) else { it++; + continue; } } else @@ -1367,6 +1493,17 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_INFO("System neighbor %s already exists", kfvKey(t).c_str()); it = consumer.m_toSync.erase(it); } + + /* Remove remaining DEL operation in m_toSync for the same neighbor. + * Since DEL operation is supposed to be executed before SET for the same neighbor + * A remaining DEL after the SET operation means the DEL operation failed previously and should not be executed anymore + */ + auto rit = make_reverse_iterator(it); + while (rit != consumer.m_toSync.rend() && rit->first == key && kfvOp(rit->second) == DEL_COMMAND) + { + consumer.m_toSync.erase(next(rit).base()); + SWSS_LOG_NOTICE("Removed pending system neighbor DEL operation for %s after SET operation", key.c_str()); + } } else if (op == DEL_COMMAND) { @@ -1725,12 +1862,18 @@ void NeighOrch::updateSrv6Nexthop(const NextHopKey &nh, const sai_object_id_t &n m_syncdNextHops.erase(nh); } } -void NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddress& mac) + +bool NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddress& mac) { SWSS_LOG_INFO("Creating tunnel route for neighbor %s", entry.ip_address.to_string().c_str()); MuxOrch* mux_orch = gDirectory.get(); NeighborUpdate update = {entry, mac, true}; mux_orch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); - m_syncdNeighbors[entry] = { mac, false }; -} + if (mux_orch->isStandaloneTunnelRouteInstalled(entry.ip_address)) + { + m_syncdNeighbors[entry] = { mac, false }; + return true; + } + return false; +} diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index 727797757f..e72979ad07 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -11,6 +11,7 @@ #include "nexthopkey.h" #include "producerstatetable.h" #include "schema.h" +#include "bfdorch.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -112,11 +113,12 @@ class NeighOrch : public Orch, public Subject, public Observer void voqSyncAddNeigh(string &alias, IpAddress &ip_address, const MacAddress &mac, sai_neighbor_entry_t &neighbor_entry); void voqSyncDelNeigh(string &alias, IpAddress &ip_address); bool updateVoqNeighborEncapIndex(const NeighborEntry &neighborEntry, uint32_t encap_index); + void updateNextHop(const BfdUpdate&); bool resolveNeighborEntry(const NeighborEntry &, const MacAddress &); void clearResolvedNeighborEntry(const NeighborEntry &); - void addZeroMacTunnelRoute(const NeighborEntry &, const MacAddress &); + bool addZeroMacTunnelRoute(const NeighborEntry &, const MacAddress &); }; #endif /* SWSS_NEIGHORCH_H */ diff --git a/orchagent/nhgbase.h b/orchagent/nhgbase.h index 65f0690555..1dbf2f7762 100644 --- a/orchagent/nhgbase.h +++ b/orchagent/nhgbase.h @@ -451,11 +451,6 @@ class NhgOrchCommon : public Orch } inline void decSyncedNhgCount() { NhgBase::decSyncedCount(); } - /* Handling SAI status*/ - using Orch::handleSaiCreateStatus; - using Orch::handleSaiRemoveStatus; - using Orch::parseHandleSaiStatusFailure; - protected: /* * Map of synced next hop groups. diff --git a/orchagent/nhgorch.cpp b/orchagent/nhgorch.cpp index 32ddb27eb5..cefc2efbb1 100644 --- a/orchagent/nhgorch.cpp +++ b/orchagent/nhgorch.cpp @@ -576,10 +576,10 @@ bool NextHopGroup::sync() SWSS_LOG_ERROR("Failed to create next hop group %s, rv:%d", m_key.to_string().c_str(), status); - task_process_status handle_status = gNhgOrch->handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 1a49526370..9455620fb5 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -5,6 +5,10 @@ extern "C" { #include "logger.h" #include "notifications.h" +#ifdef ASAN_ENABLED +#include +#endif + void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data) { // don't use this event handler, because it runs by libsairedis in a separate thread @@ -23,12 +27,35 @@ void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notificat // which causes concurrency access to the DB } -void on_switch_shutdown_request() +void on_twamp_session_event(uint32_t count, sai_twamp_session_event_notification_data_t *data) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB +} + +void on_switch_shutdown_request(sai_object_id_t switch_id) { SWSS_LOG_ENTER(); /* TODO: Later a better restart story will be told here */ SWSS_LOG_ERROR("Syncd stopped"); - exit(EXIT_FAILURE); + /* + The quick_exit() is used instead of the exit() to avoid a following data race: + * the exit() calls the destructors for global static variables (e.g.BufferOrch::m_buffer_type_maps) + * in parallel to that, orchagent accesses the global static variables + Since quick_exit doesn't call atexit() flows, the LSAN check is called explicitly via __lsan_do_leak_check() + */ + +#ifdef ASAN_ENABLED + __lsan_do_leak_check(); +#endif + + quick_exit(EXIT_FAILURE); +} + +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB } diff --git a/orchagent/notifications.h b/orchagent/notifications.h index ea22593a1f..403b358a12 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -7,4 +7,9 @@ extern "C" { void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data); void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data); void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data); -void on_switch_shutdown_request(); +void on_twamp_session_event(uint32_t count, sai_twamp_session_event_notification_data_t *data); + +// The function prototype information can be found here: +// https://github.com/sonic-net/sonic-sairedis/blob/master/meta/NotificationSwitchShutdownRequest.cpp#L49 +void on_switch_shutdown_request(sai_object_id_t switch_id); +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus); diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index f04a438a5d..d1cbdb89c8 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -1,7 +1,4 @@ -#include -#include #include -#include #include #include #include "timestamp.h" @@ -12,16 +9,13 @@ #include "tokenize.h" #include "logger.h" #include "consumerstatetable.h" +#include "zmqserver.h" +#include "zmqconsumerstatetable.h" #include "sai_serialize.h" using namespace swss; -extern int gBatchSize; - -extern bool gSwssRecord; -extern ofstream gRecordOfs; -extern bool gLogRotate; -extern string gRecordFile; +int gBatchSize = 0; Orch::Orch(DBConnector *db, const string tableName, int pri) { @@ -52,12 +46,8 @@ Orch::Orch(const vector& tables) } } -Orch::~Orch() +Orch::Orch() { - if (gRecordOfs.is_open()) - { - gRecordOfs.close(); - } } vector Orch::getSelectables() @@ -70,19 +60,15 @@ vector Orch::getSelectables() return selectables; } -void Consumer::addToSync(const KeyOpFieldsValuesTuple &entry) +void ConsumerBase::addToSync(const KeyOpFieldsValuesTuple &entry) { SWSS_LOG_ENTER(); - string key = kfvKey(entry); string op = kfvOp(entry); /* Record incoming tasks */ - if (gSwssRecord) - { - Orch::recordTuple(*this, entry); - } + Recorder::Instance().swss.record(dumpTuple(entry)); /* * m_toSync is a multimap which will allow one key with multiple values, @@ -157,7 +143,7 @@ void Consumer::addToSync(const KeyOpFieldsValuesTuple &entry) } -size_t Consumer::addToSync(const std::deque &entries) +size_t ConsumerBase::addToSync(const std::deque &entries) { SWSS_LOG_ENTER(); @@ -170,7 +156,7 @@ size_t Consumer::addToSync(const std::deque &entries) } // TODO: Table should be const -size_t Consumer::refillToSync(Table* table) +size_t ConsumerBase::refillToSync(Table* table) { std::deque entries; vector keys; @@ -192,11 +178,9 @@ size_t Consumer::refillToSync(Table* table) return addToSync(entries); } -size_t Consumer::refillToSync() +size_t ConsumerBase::refillToSync() { - ConsumerTableBase *consumerTable = getConsumerTable(); - - auto subTable = dynamic_cast(consumerTable); + auto subTable = dynamic_cast(getSelectable()); if (subTable != NULL) { size_t update_size = 0; @@ -210,38 +194,26 @@ size_t Consumer::refillToSync() } while (update_size != 0); return total_size; } - else + string tableName = getTableName(); + auto consumerTable = dynamic_cast(getSelectable()); + if (consumerTable != NULL) { // consumerTable is either ConsumerStateTable or ConsumerTable auto db = consumerTable->getDbConnector(); - string tableName = consumerTable->getTableName(); auto table = Table(db, tableName); return refillToSync(&table); } -} - -void Consumer::execute() -{ - SWSS_LOG_ENTER(); - - size_t update_size = 0; - do + auto zmqTable = dynamic_cast(getSelectable()); + if (zmqTable != NULL) { - std::deque entries; - getConsumerTable()->pops(entries); - update_size = addToSync(entries); - } while (update_size != 0); - - drain(); -} - -void Consumer::drain() -{ - if (!m_toSync.empty()) - m_orch->doTask(*this); + auto db = zmqTable->getDbConnector(); + auto table = Table(db, tableName); + return refillToSync(&table); + } + return 0; } -string Consumer::dumpTuple(const KeyOpFieldsValuesTuple &tuple) +string ConsumerBase::dumpTuple(const KeyOpFieldsValuesTuple &tuple) { string s = getTableName() + getConsumerTable()->getTableNameSeparator() + kfvKey(tuple) + "|" + kfvOp(tuple); @@ -253,7 +225,7 @@ string Consumer::dumpTuple(const KeyOpFieldsValuesTuple &tuple) return s; } -void Consumer::dumpPendingTasks(vector &ts) +void ConsumerBase::dumpPendingTasks(vector &ts) { for (auto &tm : m_toSync) { @@ -265,9 +237,32 @@ void Consumer::dumpPendingTasks(vector &ts) } } +void Consumer::execute() +{ + // ConsumerBase::execute_impl(); + SWSS_LOG_ENTER(); + + size_t update_size = 0; + auto table = static_cast(getSelectable()); + do + { + std::deque entries; + table->pops(entries); + update_size = addToSync(entries); + } while (update_size != 0); + + drain(); +} + +void Consumer::drain() +{ + if (!m_toSync.empty()) + ((Orch *)m_orch)->doTask((Consumer&)*this); +} + size_t Orch::addExistingData(const string& tableName) { - auto consumer = dynamic_cast(getExecutor(tableName)); + auto consumer = dynamic_cast(getExecutor(tableName)); if (consumer == NULL) { SWSS_LOG_ERROR("No consumer %s in Orch", tableName.c_str()); @@ -281,7 +276,7 @@ size_t Orch::addExistingData(const string& tableName) size_t Orch::addExistingData(Table *table) { string tableName = table->getTableName(); - Consumer* consumer = dynamic_cast(getExecutor(tableName)); + ConsumerBase* consumer = dynamic_cast(getExecutor(tableName)); if (consumer == NULL) { SWSS_LOG_ERROR("No consumer %s in Orch", tableName.c_str()); @@ -299,7 +294,7 @@ bool Orch::bake() { string executorName = it.first; auto executor = it.second; - auto consumer = dynamic_cast(executor.get()); + auto consumer = dynamic_cast(executor.get()); if (consumer == NULL) { continue; @@ -393,7 +388,7 @@ ref_resolve_status Orch::resolveFieldRefValue( { return ref_resolve_status::not_resolved; } - else if (ref_type_name.empty() && object_name.empty()) + else if (object_name.empty()) { return ref_resolve_status::empty; } @@ -551,7 +546,7 @@ void Orch::dumpPendingTasks(vector &ts) { for (auto &it : m_consumerMap) { - Consumer* consumer = dynamic_cast(it.second.get()); + ConsumerBase* consumer = dynamic_cast(it.second.get()); if (consumer == NULL) { SWSS_LOG_DEBUG("Executor is not a Consumer"); @@ -562,43 +557,9 @@ void Orch::dumpPendingTasks(vector &ts) } } -void Orch::logfileReopen() -{ - gRecordOfs.close(); - - /* - * On log rotate we will use the same file name, we are assuming that - * logrotate daemon move filename to filename.1 and we will create new - * empty file here. - */ - - gRecordOfs.open(gRecordFile, std::ofstream::out | std::ofstream::app); - - if (!gRecordOfs.is_open()) - { - SWSS_LOG_ERROR("failed to open gRecordOfs file %s: %s", gRecordFile.c_str(), strerror(errno)); - return; - } -} - -void Orch::recordTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) -{ - string s = consumer.dumpTuple(tuple); - - gRecordOfs << getTimestamp() << "|" << s << endl; - - if (gLogRotate) - { - gLogRotate = false; - - logfileReopen(); - } -} - -string Orch::dumpTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) +void Orch::flushResponses() { - string s = consumer.dumpTuple(tuple); - return s; + m_publisher.flush(); } ref_resolve_status Orch::resolveFieldRefArray( @@ -736,7 +697,7 @@ set Orch::generateIdListFromMap(unsigned long idsMap, sai_uint32_t maxId { unsigned long currentIdMask = 1; bool started = false, needGenerateMap = false; - sai_uint32_t lower, upper; + sai_uint32_t lower = 0, upper = 0; set idStringList; for (sai_uint32_t id = 0; id <= maxId; id ++) { @@ -856,193 +817,6 @@ Executor *Orch::getExecutor(string executorName) return NULL; } -task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis create - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (api) - { - case SAI_API_FDB: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - /* - * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. - * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, - * and orchagent should ignore the error and treat it as entry was explicitly created. - */ - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - break; - case SAI_API_HOSTIF: - switch (status) - { - case SAI_STATUS_SUCCESS: - return task_success; - case SAI_STATUS_FAILURE: - /* - * Host interface maybe failed due to lane not available. - * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, - * So just ignore the failure and report an error log. - */ - return task_ignore; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - default: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - } - return task_need_retry; -} - -task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis set - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - if (status == SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); - return task_success; - } - - switch (api) - { - case SAI_API_PORT: - switch (status) - { - case SAI_STATUS_INVALID_ATTR_VALUE_0: - /* - * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task - * and let user correct the configuration. - */ - SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - - return task_need_retry; -} - -task_process_status Orch::handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis remove - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, - * SAI_STATUS_ITEM_NOT_FOUND) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - return task_need_retry; -} - -task_process_status Orch::handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis get - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); - return task_success; - case SAI_STATUS_NOT_IMPLEMENTED: - SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", - sai_serialize_api(api).c_str()); - throw std::logic_error("SAI get function not implemented"); - default: - SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - } - return task_failed; -} - -bool Orch::parseHandleSaiStatusFailure(task_process_status status) -{ - /* - * This function parses task process status from SAI failure handling function to whether a retry is needed. - * Return value: true - no retry is needed. - * false - retry is needed. - */ - switch (status) - { - case task_need_retry: - return false; - case task_failed: - return true; - default: - SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); - } - return true; -} - void Orch2::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/orch.h b/orchagent/orch.h index 7e803bbd93..6e4702ce3d 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -9,18 +9,21 @@ #include extern "C" { -#include "sai.h" -#include "saistatus.h" +#include +#include } #include "dbconnector.h" #include "table.h" #include "consumertable.h" #include "consumerstatetable.h" +#include "zmqconsumerstatetable.h" +#include "zmqserver.h" #include "notificationconsumer.h" #include "selectabletimer.h" #include "macaddress.h" #include "response_publisher.h" +#include "recorder.h" const char delimiter = ':'; const char list_item_delimiter = ','; @@ -71,7 +74,7 @@ typedef struct } referenced_object; typedef std::map object_reference_map; -typedef std::map type_map; +typedef std::map> type_map; typedef std::map object_map; typedef std::pair object_map_pair; @@ -132,49 +135,70 @@ class Executor : public swss::Selectable swss::Selectable *getSelectable() const { return m_selectable; } }; -class Consumer : public Executor { +class ConsumerBase : public Executor { public: - Consumer(swss::ConsumerTableBase *select, Orch *orch, const std::string &name) - : Executor(select, orch, name) + ConsumerBase(swss::Selectable *selectable, Orch *orch, const std::string &name) + : Executor(selectable, orch, name) { } - swss::ConsumerTableBase *getConsumerTable() const - { - return static_cast(getSelectable()); - } + virtual swss::TableBase *getConsumerTable() const = 0; std::string getTableName() const { return getConsumerTable()->getTableName(); } - int getDbId() const - { - return getConsumerTable()->getDbConnector()->getDbId(); - } - - std::string getDbName() const - { - return getConsumerTable()->getDbConnector()->getDbName(); - } - std::string dumpTuple(const swss::KeyOpFieldsValuesTuple &tuple); void dumpPendingTasks(std::vector &ts); - size_t refillToSync(); - size_t refillToSync(swss::Table* table); - void execute(); - void drain(); - /* Store the latest 'golden' status */ // TODO: hide? SyncMap m_toSync; + /* record the tuple */ + void recordTuple(const swss::KeyOpFieldsValuesTuple &tuple); + void addToSync(const swss::KeyOpFieldsValuesTuple &entry); // Returns: the number of entries added to m_toSync size_t addToSync(const std::deque &entries); + + size_t refillToSync(); + size_t refillToSync(swss::Table* table); +}; + +class Consumer : public ConsumerBase { +public: + Consumer(swss::ConsumerTableBase *select, Orch *orch, const std::string &name) + : ConsumerBase(select, orch, name) + { + } + + swss::TableBase *getConsumerTable() const override + { + // ConsumerTableBase is a subclass of TableBase + return static_cast(getSelectable()); + } + + const swss::DBConnector* getDbConnector() const + { + auto table = static_cast(getSelectable()); + return table->getDbConnector(); + } + + int getDbId() const + { + return getDbConnector()->getDbId(); + } + + std::string getDbName() const + { + return getDbConnector()->getDbName(); + } + + void execute() override; + void drain() override; }; typedef std::map> ConsumerMap; @@ -199,7 +223,7 @@ class Orch Orch(swss::DBConnector *db, const std::vector &tableNames); Orch(swss::DBConnector *db, const std::vector &tableNameWithPri); Orch(const std::vector& tables); - virtual ~Orch(); + virtual ~Orch() = default; std::vector getSelectables(); @@ -215,19 +239,20 @@ class Orch virtual void doTask(); /* Run doTask against a specific executor */ - virtual void doTask(Consumer &consumer) = 0; + virtual void doTask(Consumer &consumer) { }; virtual void doTask(swss::NotificationConsumer &consumer) { } virtual void doTask(swss::SelectableTimer &timer) { } - /* TODO: refactor recording */ - static void recordTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); - void dumpPendingTasks(std::vector &ts); + + /** + * @brief Flush pending responses + */ + void flushResponses(); protected: ConsumerMap m_consumerMap; - static void logfileReopen(); - std::string dumpTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); + Orch(); ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); std::set generateIdListFromMap(unsigned long idsMap, sai_uint32_t maxId); unsigned long generateBitMapFromIdsStr(const std::string &idsStr); @@ -246,13 +271,6 @@ class Orch void addExecutor(Executor* executor); Executor *getExecutor(std::string executorName); - /* Handling SAI status*/ - virtual task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - bool parseHandleSaiStatusFailure(task_process_status status); - ResponsePublisher m_publisher; private: void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 1cc3127c31..63fd037fa6 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -6,6 +6,7 @@ #include "logger.h" #include #include "warm_restart.h" +#include #define SAI_SWITCH_ATTR_CUSTOM_RANGE_BASE SAI_SWITCH_ATTR_CUSTOM_RANGE_START #include "sairedis.h" @@ -18,9 +19,12 @@ using namespace swss; #define SELECT_TIMEOUT 1000 #define PFC_WD_POLL_MSECS 100 +/* orchagent heart beat message interval */ +#define HEART_BEAT_INTERVAL_MSECS 10 * 1000 + extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; -extern bool gSaiRedisLogRotate; +extern string gMySwitchType; extern void syncd_apply_view(); /* @@ -55,20 +59,24 @@ BfdOrch *gBfdOrch; Srv6Orch *gSrv6Orch; FlowCounterRouteOrch *gFlowCounterRouteOrch; DebugCounterOrch *gDebugCounterOrch; +MonitorOrch *gMonitorOrch; bool gIsNatSupported = false; +event_handle_t g_events_handle; #define DEFAULT_MAX_BULK_SIZE 1000 size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; -OrchDaemon::OrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb) : +OrchDaemon::OrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb, ZmqServer *zmqServer) : m_applDb(applDb), m_configDb(configDb), m_stateDb(stateDb), - m_chassisAppDb(chassisAppDb) + m_chassisAppDb(chassisAppDb), + m_zmqServer(zmqServer) { SWSS_LOG_ENTER(); m_select = new Select(); + m_lastHeartBeat = std::chrono::high_resolution_clock::now(); } OrchDaemon::~OrchDaemon() @@ -89,6 +97,8 @@ OrchDaemon::~OrchDaemon() delete(*it); } delete m_select; + + events_deinit_publisher(g_events_handle); } bool OrchDaemon::init() @@ -97,13 +107,17 @@ bool OrchDaemon::init() string platform = getenv("platform") ? getenv("platform") : ""; + g_events_handle = events_init_publisher("sonic-events-swss"); + gCrmOrch = new CrmOrch(m_configDb, CFG_CRM_TABLE_NAME); - TableConnector stateDbSwitchTable(m_stateDb, "SWITCH_CAPABILITY"); + TableConnector stateDbSwitchTable(m_stateDb, STATE_SWITCH_CAPABILITY_TABLE_NAME); TableConnector app_switch_table(m_applDb, APP_SWITCH_TABLE_NAME); TableConnector conf_asic_sensors(m_configDb, CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector conf_switch_hash(m_configDb, CFG_SWITCH_HASH_TABLE_NAME); vector switch_tables = { + conf_switch_hash, conf_asic_sensors, app_switch_table }; @@ -114,6 +128,7 @@ bool OrchDaemon::init() vector ports_tables = { { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_SEND_TO_INGRESS_PORT_TABLE_NAME, portsorch_base_pri + 5 }, { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, @@ -130,9 +145,15 @@ bool OrchDaemon::init() TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); TableConnector stateMclagDbFdb(m_stateDb, STATE_MCLAG_REMOTE_FDB_TABLE_NAME); gFdbOrch = new FdbOrch(m_applDb, app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); - gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + BgpGlobalStateOrch* bgp_global_state_orch; + bgp_global_state_orch = new BgpGlobalStateOrch(m_configDb, CFG_BGP_DEVICE_GLOBAL_TABLE_NAME); + gDirectory.set(bgp_global_state_orch); + + gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + gDirectory.set(gBfdOrch); static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; @@ -159,6 +180,8 @@ bool OrchDaemon::init() gDirectory.set(vnet_rt_orch); VRFOrch *vrf_orch = new VRFOrch(m_applDb, APP_VRF_TABLE_NAME, m_stateDb, STATE_VRF_OBJECT_TABLE_NAME); gDirectory.set(vrf_orch); + gMonitorOrch = new MonitorOrch(m_stateDb, STATE_VNET_MONITOR_TABLE_NAME); + gDirectory.set(gMonitorOrch); const vector chassis_frontend_tables = { CFG_PASS_THROUGH_ROUTE_TABLE_NAME, @@ -215,6 +238,41 @@ bool OrchDaemon::init() NvgreTunnelMapOrch *nvgre_tunnel_map_orch = new NvgreTunnelMapOrch(m_configDb, CFG_NVGRE_TUNNEL_MAP_TABLE_NAME); gDirectory.set(nvgre_tunnel_map_orch); + vector dash_vnet_tables = { + APP_DASH_VNET_TABLE_NAME, + APP_DASH_VNET_MAPPING_TABLE_NAME + }; + DashVnetOrch *dash_vnet_orch = new DashVnetOrch(m_applDb, dash_vnet_tables, m_zmqServer); + gDirectory.set(dash_vnet_orch); + + vector dash_tables = { + APP_DASH_APPLIANCE_TABLE_NAME, + APP_DASH_ROUTING_TYPE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_QOS_TABLE_NAME + }; + + DashOrch *dash_orch = new DashOrch(m_applDb, dash_tables, m_zmqServer); + gDirectory.set(dash_orch); + + vector dash_route_tables = { + APP_DASH_ROUTE_TABLE_NAME, + APP_DASH_ROUTE_RULE_TABLE_NAME + }; + + DashRouteOrch *dash_route_orch = new DashRouteOrch(m_applDb, dash_route_tables, dash_orch, m_zmqServer); + gDirectory.set(dash_route_orch); + + vector dash_acl_tables = { + APP_DASH_PREFIX_TAG_TABLE_NAME, + APP_DASH_ACL_IN_TABLE_NAME, + APP_DASH_ACL_OUT_TABLE_NAME, + APP_DASH_ACL_GROUP_TABLE_NAME, + APP_DASH_ACL_RULE_TABLE_NAME + }; + DashAclOrch *dash_acl_orch = new DashAclOrch(m_applDb, dash_acl_tables, dash_orch, m_zmqServer); + gDirectory.set(dash_acl_orch); + vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, CFG_SCHEDULER_TABLE_NAME, @@ -229,6 +287,7 @@ bool OrchDaemon::init() CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DOT1P_MAP_TABLE_NAME, CFG_TC_TO_DSCP_MAP_TABLE_NAME }; gQosOrch = new QosOrch(m_configDb, qos_tables); @@ -346,7 +405,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch, mux_orch, mux_cb_orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, bgp_global_state_orch, gBfdOrch, gSrv6Orch, mux_orch, mux_cb_orch, gMonitorOrch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) @@ -447,13 +506,17 @@ bool OrchDaemon::init() m_orchList.push_back(mux_st_orch); m_orchList.push_back(nvgre_tunnel_orch); m_orchList.push_back(nvgre_tunnel_map_orch); + m_orchList.push_back(dash_acl_orch); + m_orchList.push_back(dash_vnet_orch); + m_orchList.push_back(dash_route_orch); + m_orchList.push_back(dash_orch); if (m_fabricEnabled) { vector fabric_port_tables = { // empty for now }; - gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables); + gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables, m_fabricPortStatEnabled, m_fabricQueueStatEnabled); m_orchList.push_back(gFabricPortsOrch); } @@ -599,7 +662,7 @@ bool OrchDaemon::init() if(gSwitchOrch->checkPfcDlrInitEnable()) { - m_orchList.push_back(new PfcWdSwOrch( + m_orchList.push_back(new PfcWdSwOrch( m_configDb, pfc_wd_tables, portStatIds, @@ -619,7 +682,25 @@ bool OrchDaemon::init() } } else if (platform == CISCO_8000_PLATFORM_SUBSTRING) { - static const vector portStatIds; + static const vector portStatIds = + { + SAI_PORT_STAT_PFC_0_RX_PKTS, + SAI_PORT_STAT_PFC_1_RX_PKTS, + SAI_PORT_STAT_PFC_2_RX_PKTS, + SAI_PORT_STAT_PFC_3_RX_PKTS, + SAI_PORT_STAT_PFC_4_RX_PKTS, + SAI_PORT_STAT_PFC_5_RX_PKTS, + SAI_PORT_STAT_PFC_6_RX_PKTS, + SAI_PORT_STAT_PFC_7_RX_PKTS, + SAI_PORT_STAT_PFC_0_TX_PKTS, + SAI_PORT_STAT_PFC_1_TX_PKTS, + SAI_PORT_STAT_PFC_2_TX_PKTS, + SAI_PORT_STAT_PFC_3_TX_PKTS, + SAI_PORT_STAT_PFC_4_TX_PKTS, + SAI_PORT_STAT_PFC_5_TX_PKTS, + SAI_PORT_STAT_PFC_6_TX_PKTS, + SAI_PORT_STAT_PFC_7_TX_PKTS, + }; static const vector queueStatIds = { @@ -646,6 +727,11 @@ bool OrchDaemon::init() gP4Orch = new P4Orch(m_applDb, p4rt_tables, vrf_orch, gCoppOrch); m_orchList.push_back(gP4Orch); + TableConnector confDbTwampTable(m_configDb, CFG_TWAMP_SESSION_TABLE_NAME); + TableConnector stateDbTwampTable(m_stateDb, STATE_TWAMP_SESSION_TABLE_NAME); + TwampOrch *twamp_orch = new TwampOrch(confDbTwampTable, stateDbTwampTable, gSwitchOrch, gPortsOrch, vrf_orch); + m_orchList.push_back(twamp_orch); + if (WarmStart::isWarmStart()) { bool suc = warmRestoreAndSyncUp(); @@ -669,27 +755,35 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - abort(); + handleSaiFailure(true); } - // check if logroate is requested - if (gSaiRedisLogRotate) + for (auto* orch: m_orchList) { - SWSS_LOG_NOTICE("performing log rotate"); - - gSaiRedisLogRotate = false; - - attr.id = SAI_REDIS_SWITCH_ATTR_PERFORM_LOG_ROTATE; - attr.value.booldata = true; + orch->flushResponses(); + } +} - sai_switch_api->set_switch_attribute(gSwitchId, &attr); +/* Release the file handle so the log can be rotated */ +void OrchDaemon::logRotate() { + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_SWITCH_ATTR_PERFORM_LOG_ROTATE; + attr.value.booldata = true; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to release the file handle on sairedis log %d", status); } } + void OrchDaemon::start() { SWSS_LOG_ENTER(); + Recorder::Instance().sairedis.setRotate(false); + for (Orch *o : m_orchList) { m_select->addSelectables(o->getSelectables()); @@ -705,6 +799,7 @@ void OrchDaemon::start() ret = m_select->select(&s, SELECT_TIMEOUT); auto tend = std::chrono::high_resolution_clock::now(); + heartBeat(tend); auto diff = std::chrono::duration_cast(tend - tstart); @@ -732,6 +827,14 @@ void OrchDaemon::start() continue; } + // check if logroate is requested + if (Recorder::Instance().sairedis.isRotate()) + { + SWSS_LOG_NOTICE("Performing %s log rotate", Recorder::Instance().sairedis.getName().c_str()); + Recorder::Instance().sairedis.setRotate(false); + logRotate(); + } + auto *c = (Executor *)s; c->execute(); @@ -773,7 +876,7 @@ void OrchDaemon::start() flush(); SWSS_LOG_WARN("Orchagent is frozen for warm restart!"); - sleep(UINT_MAX); + freezeAndHeartBeat(UINT_MAX); } } } @@ -930,8 +1033,33 @@ void OrchDaemon::addOrchList(Orch *o) m_orchList.push_back(o); } -FabricOrchDaemon::FabricOrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb) : - OrchDaemon(applDb, configDb, stateDb, chassisAppDb), +void OrchDaemon::heartBeat(std::chrono::time_point tcurrent) +{ + // output heart beat message to SYSLOG + auto diff = std::chrono::duration_cast(tcurrent - m_lastHeartBeat); + if (diff.count() >= HEART_BEAT_INTERVAL_MSECS) + { + m_lastHeartBeat = tcurrent; + // output heart beat message to supervisord with 'PROCESS_COMMUNICATION_STDOUT' event: http://supervisord.org/events.html + cout << "heartbeat" << endl; + } +} + +void OrchDaemon::freezeAndHeartBeat(unsigned int duration) +{ + while (duration > 0) + { + // Send heartbeat message to prevent Orchagent stuck alert. + auto tend = std::chrono::high_resolution_clock::now(); + heartBeat(tend); + + duration--; + sleep(1); + } +} + +FabricOrchDaemon::FabricOrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb, ZmqServer *zmqServer) : + OrchDaemon(applDb, configDb, stateDb, chassisAppDb, zmqServer), m_applDb(applDb), m_configDb(configDb) { diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index def4b78629..2473848bf5 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -4,6 +4,7 @@ #include "dbconnector.h" #include "producerstatetable.h" #include "consumertable.h" +#include "zmqserver.h" #include "select.h" #include "portsorch.h" @@ -45,13 +46,19 @@ #include "bfdorch.h" #include "srv6orch.h" #include "nvgreorch.h" +#include "twamporch.h" +#include "dash/dashaclorch.h" +#include "dash/dashorch.h" +#include "dash/dashrouteorch.h" +#include "dash/dashvnetorch.h" +#include using namespace swss; class OrchDaemon { public: - OrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *); + OrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *, ZmqServer *); ~OrchDaemon(); virtual bool init(); @@ -67,24 +74,42 @@ class OrchDaemon { m_fabricEnabled = enabled; } + void setFabricPortStatEnabled(bool enabled) + { + m_fabricPortStatEnabled = enabled; + } + void setFabricQueueStatEnabled(bool enabled) + { + m_fabricQueueStatEnabled = enabled; + } + void logRotate(); private: DBConnector *m_applDb; DBConnector *m_configDb; DBConnector *m_stateDb; DBConnector *m_chassisAppDb; + ZmqServer *m_zmqServer; bool m_fabricEnabled = false; + bool m_fabricPortStatEnabled = true; + bool m_fabricQueueStatEnabled = true; std::vector m_orchList; Select *m_select; + + std::chrono::time_point m_lastHeartBeat; void flush(); + + void heartBeat(std::chrono::time_point tcurrent); + + void freezeAndHeartBeat(unsigned int duration); }; class FabricOrchDaemon : public OrchDaemon { public: - FabricOrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *); + FabricOrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *, ZmqServer *); bool init() override; private: DBConnector *m_applDb; diff --git a/orchagent/p4orch/acl_rule_manager.cpp b/orchagent/p4orch/acl_rule_manager.cpp index fb73cb0128..5131b718ac 100644 --- a/orchagent/p4orch/acl_rule_manager.cpp +++ b/orchagent/p4orch/acl_rule_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/acl_rule_manager.h" +#include #include #include #include @@ -9,7 +10,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "intfsorch.h" -#include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch.h" @@ -165,7 +165,13 @@ std::vector getMeterSaiAttrs(const P4AclMeter &p4_acl_meter) } // namespace -void AclRuleManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode AclRuleManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void AclRuleManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } diff --git a/orchagent/p4orch/acl_rule_manager.h b/orchagent/p4orch/acl_rule_manager.h index 34cb8361c0..1e65ef7c8d 100644 --- a/orchagent/p4orch/acl_rule_manager.h +++ b/orchagent/p4orch/acl_rule_manager.h @@ -41,9 +41,11 @@ class AclRuleManager : public ObjectManagerInterface } virtual ~AclRuleManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Update counters stats for every rule in each ACL table in COUNTERS_DB, if // counters are enabled in rules. diff --git a/orchagent/p4orch/acl_table_manager.cpp b/orchagent/p4orch/acl_table_manager.cpp index 6412803c9f..4a3910992e 100644 --- a/orchagent/p4orch/acl_table_manager.cpp +++ b/orchagent/p4orch/acl_table_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/acl_table_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch.h" @@ -205,7 +205,13 @@ ReturnCodeOr> AclTableManager::getUdfSaiAttrs(const return udf_attrs; } -void AclTableManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode AclTableManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void AclTableManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } diff --git a/orchagent/p4orch/acl_table_manager.h b/orchagent/p4orch/acl_table_manager.h index f48d34c309..68cc1c9920 100644 --- a/orchagent/p4orch/acl_table_manager.h +++ b/orchagent/p4orch/acl_table_manager.h @@ -31,9 +31,11 @@ class AclTableManager : public ObjectManagerInterface explicit AclTableManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher); virtual ~AclTableManager(); - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Get ACL table definition by table name in cache. Return nullptr if not // found. diff --git a/orchagent/p4orch/acl_util.cpp b/orchagent/p4orch/acl_util.cpp index 92905ec622..5ab2276b4d 100644 --- a/orchagent/p4orch/acl_util.cpp +++ b/orchagent/p4orch/acl_util.cpp @@ -1,7 +1,8 @@ #include "p4orch/acl_util.h" +#include + #include "converter.h" -#include "json.hpp" #include "logger.h" #include "sai_serialize.h" #include "table.h" diff --git a/orchagent/p4orch/acl_util.h b/orchagent/p4orch/acl_util.h index 74de14d2a5..b4123d0754 100644 --- a/orchagent/p4orch/acl_util.h +++ b/orchagent/p4orch/acl_util.h @@ -1,11 +1,11 @@ #pragma once #include +#include #include #include #include -#include "json.hpp" #include "p4orch/p4orch_util.h" #include "return_code.h" extern "C" @@ -243,7 +243,7 @@ struct P4AclTableDefinition P4AclTableDefinition(const std::string &acl_table_name, const sai_acl_stage_t stage, const uint32_t priority, const uint32_t size, const std::string &meter_unit, const std::string &counter_unit) : acl_table_name(acl_table_name), stage(stage), priority(priority), size(size), meter_unit(meter_unit), - counter_unit(counter_unit){}; + counter_unit(counter_unit) {}; }; struct P4UserDefinedTrapHostifTableEntry @@ -251,7 +251,7 @@ struct P4UserDefinedTrapHostifTableEntry sai_object_id_t user_defined_trap; sai_object_id_t hostif_table_entry; P4UserDefinedTrapHostifTableEntry() - : user_defined_trap(SAI_NULL_OBJECT_ID), hostif_table_entry(SAI_NULL_OBJECT_ID){}; + : user_defined_trap(SAI_NULL_OBJECT_ID), hostif_table_entry(SAI_NULL_OBJECT_ID) {}; }; using acl_rule_attr_lookup_t = std::map; diff --git a/orchagent/p4orch/ext_tables_manager.cpp b/orchagent/p4orch/ext_tables_manager.cpp new file mode 100644 index 0000000000..ae091fcd77 --- /dev/null +++ b/orchagent/p4orch/ext_tables_manager.cpp @@ -0,0 +1,864 @@ +#include "p4orch/ext_tables_manager.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "crmorch.h" +#include "directory.h" +#include "logger.h" +#include "orch.h" +#include "p4orch/p4orch.h" +#include "p4orch/p4orch_util.h" +#include "tokenize.h" + +extern sai_counter_api_t *sai_counter_api; +extern sai_generic_programmable_api_t *sai_generic_programmable_api; + +extern Directory gDirectory; +extern sai_object_id_t gSwitchId; +extern P4Orch *gP4Orch; +extern CrmOrch *gCrmOrch; + +P4ExtTableEntry *ExtTablesManager::getP4ExtTableEntry(const std::string &table_name, const std::string &key) +{ + SWSS_LOG_ENTER(); + + auto it = m_extTables.find(table_name); + if (it == m_extTables.end()) + return nullptr; + + if (it->second.find(key) == it->second.end()) + return nullptr; + + return &it->second[key]; +} + +std::string getCrossRefTableName(const std::string table_name) +{ + auto it = FixedTablesMap.find(table_name); + if (it != FixedTablesMap.end()) + { + return (it->second); + } + + return (table_name); +} + +ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action) +{ + const std::string action_name = action->name; + std::unordered_map cross_ref_key_j; + ReturnCode status; + + for (auto param_defn_it = action->params.begin(); param_defn_it != action->params.end(); param_defn_it++) + { + ActionParamInfo action_param_defn = param_defn_it->second; + if (action_param_defn.table_reference_map.empty()) + { + continue; + } + + std::string param_name = param_defn_it->first; + + auto app_db_param_it = app_db_entry.action_params[action_name].find(param_name); + if (app_db_param_it == app_db_entry.action_params[action_name].end()) + { + SWSS_LOG_ERROR("Required param not specified for action %s\n", action_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Required param not specified for action %s " << action_name.c_str(); + } + + for (auto cross_ref_it = action_param_defn.table_reference_map.begin(); + cross_ref_it != action_param_defn.table_reference_map.end(); cross_ref_it++) + { + cross_ref_key_j[cross_ref_it->first].push_back( + nlohmann::json::object_t::value_type(prependMatchField(cross_ref_it->second), app_db_param_it->second)); + } + } + + for (auto it = cross_ref_key_j.begin(); it != cross_ref_key_j.end(); it++) + { + const std::string table_name = getCrossRefTableName(it->first); + const std::string table_key = it->second.dump(); + std::string key; + sai_object_type_t object_type; + sai_object_id_t oid; + DepObject dep_object = {}; + + if (gP4Orch->m_p4TableToManagerMap.find(table_name) != gP4Orch->m_p4TableToManagerMap.end()) + { + status = gP4Orch->m_p4TableToManagerMap[table_name]->getSaiObject(table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Cross-table reference validation failed from fixed-table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed from fixed-table"; + } + } + else + { + if (getTableInfo(table_name)) + { + auto ext_table_key = KeyGenerator::generateExtTableKey(table_name, table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Cross-table reference validation failed from extension-table %s", + table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed from extension " + "table"; + } + } + else + { + SWSS_LOG_ERROR("Cross-table reference validation failed due to non-existent table " + "%s", + table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed due to non-existent " + "table"; + } + } + + if (!m_p4OidMapper->getOID(object_type, key, &oid)) + { + SWSS_LOG_ERROR("Cross-table reference validation failed, no OID found from table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed, no OID found"; + } + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Cross-table reference validation failed, null OID expected from " + "table %s", + table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Cross-table reference valdiation failed, null OID"; + } + + dep_object.sai_object = object_type; + dep_object.key = key; + dep_object.oid = oid; + app_db_entry.action_dep_objects[action_name] = dep_object; + } + + return ReturnCode(); +} + +ReturnCode ExtTablesManager::validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry &app_db_entry) +{ + // Perform generic APP DB entry validations. Operation specific validations + // will be done by the respective request process methods. + ReturnCode status; + + TableInfo *table; + table = getTableInfo(app_db_entry.table_name); + if (table == nullptr) + { + SWSS_LOG_ERROR("Not a valid extension table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Not a valid extension table " << app_db_entry.table_name.c_str(); + } + + if (table->action_ref_tables.empty()) + { + return ReturnCode(); + } + + ActionInfo *action; + for (auto app_db_action_it = app_db_entry.action_params.begin(); + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + { + auto action_name = app_db_action_it->first; + action = getTableActionInfo(table, action_name); + if (action == nullptr) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Not a valid action " << action_name.c_str() << " in extension table " + << app_db_entry.table_name.c_str(); + } + + if (!action->refers_to) + { + continue; + } + + status = validateActionParamsCrossRef(app_db_entry, action); + if (!status.ok()) + { + return status; + } + } + + return ReturnCode(); +} + +ReturnCodeOr ExtTablesManager::deserializeP4ExtTableEntry( + const std::string &table_name, const std::string &key, const std::vector &attributes) +{ + std::string action_name; + + SWSS_LOG_ENTER(); + + P4ExtTableAppDbEntry app_db_entry_or = {}; + app_db_entry_or.table_name = table_name; + app_db_entry_or.table_key = key; + + action_name = ""; + for (const auto &it : attributes) + { + auto field = fvField(it); + auto value = fvValue(it); + + if (field == p4orch::kAction) + { + action_name = value; + continue; + } + + const auto &tokenized_fields = tokenize(field, p4orch::kFieldDelimiter); + if (tokenized_fields.size() <= 1) + { + SWSS_LOG_ERROR("Unknown extension entry field"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown extension entry field " << QuotedVar(field); + } + + const auto &prefix = tokenized_fields[0]; + if (prefix == p4orch::kActionParamPrefix) + { + const auto ¶m_name = tokenized_fields[1]; + app_db_entry_or.action_params[action_name][param_name] = value; + continue; + } + else + { + SWSS_LOG_ERROR("Unexpected extension entry field"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected extension entry field " << QuotedVar(field); + } + } + + return app_db_entry_or; +} + +ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, + std::string &ext_table_entry_attr) +{ + nlohmann::json sai_j, sai_metadata_j, sai_array_j = {}, sai_entry_j; + + SWSS_LOG_ENTER(); + + try + { + TableInfo *table; + table = getTableInfo(app_db_entry.table_name); + if (!table) + { + SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + } + + nlohmann::json j = nlohmann::json::parse(app_db_entry.table_key); + for (auto it = j.begin(); it != j.end(); ++it) + { + std::string match, value, prefix; + std::size_t pos; + + match = it.key(); + value = it.value(); + + prefix = p4orch::kMatchPrefix; + pos = match.rfind(prefix); + if (pos != std::string::npos) + { + match.erase(0, prefix.length()); + } + else + { + SWSS_LOG_ERROR("Failed to encode match fields for sai call"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; + } + + prefix = p4orch::kFieldDelimiter; + pos = match.rfind(prefix); + if (pos != std::string::npos) + { + match.erase(0, prefix.length()); + } + else + { + SWSS_LOG_ERROR("Failed to encode match fields for sai call"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; + } + + auto match_defn_it = table->match_fields.find(match); + if (match_defn_it == table->match_fields.end()) + { + SWSS_LOG_ERROR("extension entry for invalid match field %s", match.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid match field " << match.c_str(); + } + + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = match_defn_it->second.datatype; + + sai_j = nlohmann::json::object({}); + sai_j[match]["value"] = value; + sai_j[match]["sai_metadata"] = sai_metadata_j; + + sai_array_j.push_back(sai_j); + } + + for (auto app_db_action_it = app_db_entry.action_params.begin(); + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + { + sai_j = nlohmann::json::object({}); + auto action_dep_object_it = app_db_entry.action_dep_objects.find(app_db_action_it->first); + if (action_dep_object_it == app_db_entry.action_dep_objects.end()) + { + auto action_defn_it = table->action_fields.find(app_db_action_it->first); + for (auto app_db_param_it = app_db_action_it->second.begin(); + app_db_param_it != app_db_action_it->second.end(); app_db_param_it++) + { + nlohmann::json params_j = nlohmann::json::object({}); + if (action_defn_it != table->action_fields.end()) + { + auto param_defn_it = action_defn_it->second.params.find(app_db_param_it->first); + if (param_defn_it != action_defn_it->second.params.end()) + { + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = param_defn_it->second.datatype; + + params_j[app_db_param_it->first]["sai_metadata"] = sai_metadata_j; + } + } + params_j[app_db_param_it->first]["value"] = app_db_param_it->second; + sai_j[app_db_action_it->first].push_back(params_j); + } + } + else + { + auto action_dep_object = action_dep_object_it->second; + + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = "SAI_ATTR_VALUE_TYPE_OBJECT_ID"; + + sai_j[app_db_action_it->first]["sai_metadata"] = sai_metadata_j; + sai_j[app_db_action_it->first]["value"] = action_dep_object.oid; + } + + sai_array_j.push_back(sai_j); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("Failed to encode table %s entry for sai call", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode table entry for sai call"; + } + + sai_entry_j = nlohmann::json::object({}); + sai_entry_j.push_back(nlohmann::json::object_t::value_type("attributes", sai_array_j)); + SWSS_LOG_ERROR("table: %s, sai entry: %s", app_db_entry.table_name.c_str(), sai_entry_j.dump().c_str()); + ext_table_entry_attr = sai_entry_j.dump(); + + return ReturnCode(); +} + +bool removeGenericCounter(sai_object_id_t counter_id) +{ + sai_status_t sai_status = sai_counter_api->remove_counter(counter_id); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove generic counter: %" PRId64 "", counter_id); + return false; + } + + return true; +} + +bool createGenericCounter(sai_object_id_t &counter_id) +{ + sai_attribute_t counter_attr; + counter_attr.id = SAI_COUNTER_ATTR_TYPE; + counter_attr.value.s32 = SAI_COUNTER_TYPE_REGULAR; + sai_status_t sai_status = sai_counter_api->create_counter(&counter_id, gSwitchId, 1, &counter_attr); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to create generic counter"); + return false; + } + + return true; +} + +ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry &ext_table_entry) +{ + ReturnCode status; + sai_object_type_t object_type; + std::string key; + std::string ext_table_entry_attr; + sai_object_id_t counter_id; + + SWSS_LOG_ENTER(); + + status = prepareP4SaiExtAPIParams(app_db_entry, ext_table_entry_attr); + if (!status.ok()) + { + return status; + } + + // Prepare attributes for the SAI create call. + std::vector generic_programmable_attrs; + sai_attribute_t generic_programmable_attr; + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME; + generic_programmable_attr.value.s8list.count = (uint32_t)app_db_entry.table_name.size(); + generic_programmable_attr.value.s8list.list = (int8_t *)const_cast(app_db_entry.table_name.c_str()); + generic_programmable_attrs.push_back(generic_programmable_attr); + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY; + generic_programmable_attr.value.json.json.count = (uint32_t)ext_table_entry_attr.size(); + generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); + generic_programmable_attrs.push_back(generic_programmable_attr); + + auto *table = getTableInfo(app_db_entry.table_name); + if (!table) + { + SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + } + + if (table->counter_bytes_enabled || table->counter_packets_enabled) + { + if (!createGenericCounter(counter_id)) + { + SWSS_LOG_WARN("Failed to create counter for table %s, key %s\n", app_db_entry.table_name.c_str(), + app_db_entry.table_key.c_str()); + } + else + { + ext_table_entry.sai_counter_oid = counter_id; + } + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_COUNTER_ID; + generic_programmable_attr.value.oid = counter_id; + generic_programmable_attrs.push_back(generic_programmable_attr); + } + + sai_object_id_t sai_generic_programmable_oid = SAI_NULL_OBJECT_ID; + sai_status_t sai_status = sai_generic_programmable_api->create_generic_programmable( + &sai_generic_programmable_oid, gSwitchId, (uint32_t)generic_programmable_attrs.size(), + generic_programmable_attrs.data()); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("create sai api call failed for extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), app_db_entry.table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "create sai api call failed for extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << app_db_entry.table_key.c_str(); + } + std::string crm_table_name = "EXT_" + app_db_entry.table_name; + boost::algorithm::to_upper(crm_table_name); + gCrmOrch->incCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); + + ext_table_entry.sai_entry_oid = sai_generic_programmable_oid; + for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); + ext_table_entry.action_dep_objects[action_dep_object_it->first] = action_dep_object; + } + + auto ext_table_key = KeyGenerator::generateExtTableKey(app_db_entry.table_name, app_db_entry.table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid formation of a key %s", ext_table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid formation of a key"; + } + + m_p4OidMapper->setOID(object_type, key, ext_table_entry.sai_entry_oid); + m_extTables[app_db_entry.table_name][app_db_entry.table_key] = ext_table_entry; + return ReturnCode(); +} + +ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry *ext_table_entry) +{ + ReturnCode status; + std::string ext_table_entry_attr; + std::unordered_map old_action_dep_objects; + + SWSS_LOG_ENTER(); + + if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("update sai api call for NULL extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "update sai api call for NULL extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); + } + + status = prepareP4SaiExtAPIParams(app_db_entry, ext_table_entry_attr); + if (!status.ok()) + { + return status; + } + + // Prepare attribute for the SAI update call. + sai_attribute_t generic_programmable_attr; + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY; + generic_programmable_attr.value.json.json.count = (uint32_t)ext_table_entry_attr.length(); + generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); + + sai_status_t sai_status = sai_generic_programmable_api->set_generic_programmable_attribute( + ext_table_entry->sai_entry_oid, &generic_programmable_attr); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("update sai api call failed for extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "update sai api call failed for extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); + } + + old_action_dep_objects = ext_table_entry->action_dep_objects; + ext_table_entry->action_dep_objects.clear(); + + for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); + ext_table_entry->action_dep_objects[action_dep_object_it->first] = action_dep_object; + } + + for (auto old_action_dep_object_it = old_action_dep_objects.begin(); + old_action_dep_object_it != old_action_dep_objects.end(); old_action_dep_object_it++) + { + auto old_action_dep_object = old_action_dep_object_it->second; + m_p4OidMapper->decreaseRefCount(old_action_dep_object.sai_object, old_action_dep_object.key); + } + + return ReturnCode(); +} + +ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name, const std::string &table_key) +{ + ReturnCode status; + sai_object_type_t object_type; + std::string key; + + SWSS_LOG_ENTER(); + + auto *ext_table_entry = getP4ExtTableEntry(table_name, table_key); + if (!ext_table_entry) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "extension entry with key " << QuotedVar(table_key) << " does not exist for table " + << QuotedVar(table_name)); + } + + if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("remove sai api call for NULL extension entry table %s, entry %s", table_name.c_str(), + table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "remove sai api call for NULL extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); + } + + SWSS_LOG_ERROR("table: %s, key: %s", ext_table_entry->table_name.c_str(), ext_table_entry->table_key.c_str()); + sai_status_t sai_status = sai_generic_programmable_api->remove_generic_programmable(ext_table_entry->sai_entry_oid); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("remove sai api call failed for extension entry table %s, entry %s", table_name.c_str(), + table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "remove sai api call failed for extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); + } + std::string crm_table_name = "EXT_" + table_name; + boost::algorithm::to_upper(crm_table_name); + gCrmOrch->decCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); + + auto ext_table_key = KeyGenerator::generateExtTableKey(table_name, table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid formation of a key %s", ext_table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid formation of a key"; + } + + uint32_t ref_count; + if (!m_p4OidMapper->getRefCount(object_type, key, &ref_count)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to get reference count for " << QuotedVar(key)); + } + if (ref_count > 0) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry " << QuotedVar(key) + << " referenced by other objects (ref_count = " << ref_count); + } + m_p4OidMapper->eraseOID(object_type, key); + + for (auto action_dep_object_it = ext_table_entry->action_dep_objects.begin(); + action_dep_object_it != ext_table_entry->action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->decreaseRefCount(action_dep_object.sai_object, action_dep_object.key); + } + + if (ext_table_entry->sai_counter_oid != SAI_NULL_OBJECT_ID) + { + m_countersTable->del(ext_table_entry->db_key); + removeGenericCounter(ext_table_entry->sai_counter_oid); + } + + m_extTables[table_name].erase(table_key); + + return ReturnCode(); +} + +ReturnCode ExtTablesManager::processAddRequest(const P4ExtTableAppDbEntry &app_db_entry) +{ + SWSS_LOG_ENTER(); + + P4ExtTableEntry ext_table_entry(app_db_entry.db_key, app_db_entry.table_name, app_db_entry.table_key); + auto status = createP4ExtTableEntry(app_db_entry, ext_table_entry); + if (!status.ok()) + { + return status; + } + return ReturnCode(); +} + +ReturnCode ExtTablesManager::processUpdateRequest(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry *ext_table_entry) +{ + SWSS_LOG_ENTER(); + + auto status = updateP4ExtTableEntry(app_db_entry, ext_table_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to update extension entry with key %s", app_db_entry.table_key.c_str()); + } + return ReturnCode(); +} + +ReturnCode ExtTablesManager::processDeleteRequest(const P4ExtTableAppDbEntry &app_db_entry) +{ + SWSS_LOG_ENTER(); + + auto status = removeP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to remove extension entry with key %s", app_db_entry.table_key.c_str()); + } + return ReturnCode(); +} + +ReturnCode ExtTablesManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + object_type = SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE; + object_key = json_key; + + return ReturnCode(); +} + +void ExtTablesManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entriesTables[table_name].push_back(entry); +} + +void ExtTablesManager::drain() +{ + SWSS_LOG_ENTER(); + std::string table_prefix = "EXT_"; + + if (gP4Orch->tablesinfo) + { + for (auto table_it = gP4Orch->tablesinfo->m_tablePrecedenceMap.begin(); + table_it != gP4Orch->tablesinfo->m_tablePrecedenceMap.end(); ++table_it) + { + auto table_name = table_prefix + table_it->second; + boost::algorithm::to_upper(table_name); + auto it_m = m_entriesTables.find(table_name); + if (it_m == m_entriesTables.end()) + { + continue; + } + + for (const auto &key_op_fvs_tuple : it_m->second) + { + std::string table_name; + std::string table_key; + + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &table_key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + if (table_name.rfind(table_prefix, 0) == std::string::npos) + { + SWSS_LOG_ERROR("Table %s is without prefix %s", table_name.c_str(), table_prefix.c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), StatusCode::SWSS_RC_INVALID_PARAM, + /*replace=*/true); + continue; + } + table_name = table_name.substr(table_prefix.length()); + boost::algorithm::to_lower(table_name); + + ReturnCode status; + auto app_db_entry_or = deserializeP4ExtTableEntry(table_name, table_key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + continue; + } + + auto &app_db_entry = *app_db_entry_or; + status = validateP4ExtTableAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for extension APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + continue; + } + + const std::string &operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) + { + auto *ext_table_entry = getP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); + if (ext_table_entry == nullptr) + { + // Create extension entry + app_db_entry.db_key = kfvKey(key_op_fvs_tuple); + status = processAddRequest(app_db_entry); + } + else + { + // Modify existing extension entry + status = processUpdateRequest(app_db_entry, ext_table_entry); + } + } + else if (operation == DEL_COMMAND) + { + // Delete extension entry + status = processDeleteRequest(app_db_entry); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) + { + SWSS_LOG_ERROR("Processing failed for extension APP_DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + } + + it_m->second.clear(); + } + } + + // Now report error for all remaining un-processed entries + for (auto it_m = m_entriesTables.begin(); it_m != m_entriesTables.end(); it_m++) + { + for (const auto &key_op_fvs_tuple : it_m->second) + { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + StatusCode::SWSS_RC_INVALID_PARAM, /*replace=*/true); + } + + it_m->second.clear(); + } +} + +void ExtTablesManager::doExtCounterStatsTask() +{ + SWSS_LOG_ENTER(); + + if (!gP4Orch->tablesinfo) + { + return; + } + + sai_stat_id_t stat_ids[] = {SAI_COUNTER_STAT_PACKETS, SAI_COUNTER_STAT_BYTES}; + uint64_t stats[2]; + std::vector counter_stats_values; + + for (auto table_it = gP4Orch->tablesinfo->m_tableInfoMap.begin(); + table_it != gP4Orch->tablesinfo->m_tableInfoMap.end(); ++table_it) + { + if (!table_it->second.counter_bytes_enabled && !table_it->second.counter_packets_enabled) + { + continue; + } + + auto table_name = table_it->second.name; + auto ext_table_it = m_extTables.find(table_name); + if (ext_table_it == m_extTables.end()) + { + continue; + } + + for (auto ext_table_entry_it = ext_table_it->second.begin(); ext_table_entry_it != ext_table_it->second.end(); + ++ext_table_entry_it) + { + auto *ext_table_entry = &ext_table_entry_it->second; + if (ext_table_entry->sai_counter_oid == SAI_NULL_OBJECT_ID) + { + continue; + } + + sai_status_t sai_status = + sai_counter_api->get_counter_stats(ext_table_entry->sai_counter_oid, 2, stat_ids, stats); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set counters stats for extension entry %s:%s in " + "COUNTERS_DB: ", + table_name.c_str(), ext_table_entry->table_key.c_str()); + continue; + } + + counter_stats_values.push_back(swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(stats[0])}); + counter_stats_values.push_back(swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(stats[1])}); + + // Set field value tuples for counters stats in COUNTERS_DB + m_countersTable->set(ext_table_entry->db_key, counter_stats_values); + } + } +} + +std::string ExtTablesManager::verifyState(const std::string &key, const std::vector &tuple) +{ + std::string result = ""; + SWSS_LOG_ENTER(); + + return result; +} diff --git a/orchagent/p4orch/ext_tables_manager.h b/orchagent/p4orch/ext_tables_manager.h new file mode 100644 index 0000000000..82256f72ba --- /dev/null +++ b/orchagent/p4orch/ext_tables_manager.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "macaddress.h" +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "p4orch/tables_definition_manager.h" +#include "response_publisher_interface.h" +#include "return_code.h" +#include "vrforch.h" +extern "C" +{ +#include "sai.h" +} + +struct P4ExtTableEntry +{ + std::string db_key; + std::string table_name; + std::string table_key; + sai_object_id_t sai_entry_oid = SAI_NULL_OBJECT_ID; + sai_object_id_t sai_counter_oid = SAI_NULL_OBJECT_ID; + std::unordered_map action_dep_objects; + + P4ExtTableEntry() {}; + P4ExtTableEntry(const std::string &db_key, const std::string &table_name, const std::string &table_key) + : db_key(db_key), table_name(table_name), table_key(table_key) + { + } +}; + +typedef std::unordered_map P4ExtTableEntryMap; +typedef std::unordered_map P4ExtTableMap; +typedef std::unordered_map> m_entriesTableMap; + +class ExtTablesManager : public ObjectManagerInterface +{ + public: + ExtTablesManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher) + : m_vrfOrch(vrfOrch), m_countersDb(std::make_unique("COUNTERS_DB", 0)), + m_countersTable(std::make_unique( + m_countersDb.get(), std::string(COUNTERS_TABLE) + DEFAULT_KEY_SEPARATOR + APP_P4RT_TABLE_NAME)) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + virtual ~ExtTablesManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; + + // For every extension entry, update counters stats in COUNTERS_DB, if + // counters are enabled for those entries + void doExtCounterStatsTask(); + + private: + ReturnCodeOr deserializeP4ExtTableEntry(const std::string &table_name, const std::string &key, + const std::vector &attributes); + ReturnCode validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action); + ReturnCode validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry &app_db_entry); + P4ExtTableEntry *getP4ExtTableEntry(const std::string &table_name, const std::string &table_key); + ReturnCode prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, std::string &ext_table_entry_attr); + ReturnCode createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry &ext_table_entry); + ReturnCode updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry *ext_table_entry); + ReturnCode removeP4ExtTableEntry(const std::string &table_name, const std::string &table_key); + ReturnCode processAddRequest(const P4ExtTableAppDbEntry &app_db_entry); + ReturnCode processUpdateRequest(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry *ext_table_entry); + ReturnCode processDeleteRequest(const P4ExtTableAppDbEntry &app_db_entry); + + ReturnCode setExtTableCounterStats(P4ExtTableEntry *ext_table_entry); + + P4ExtTableMap m_extTables; + P4OidMapper *m_p4OidMapper; + VRFOrch *m_vrfOrch; + ResponsePublisherInterface *m_publisher; + m_entriesTableMap m_entriesTables; + + std::unique_ptr m_countersDb; + std::unique_ptr m_countersTable; +}; diff --git a/orchagent/p4orch/gre_tunnel_manager.cpp b/orchagent/p4orch/gre_tunnel_manager.cpp index 84f48a57b9..c3bfd7d6d7 100644 --- a/orchagent/p4orch/gre_tunnel_manager.cpp +++ b/orchagent/p4orch/gre_tunnel_manager.cpp @@ -1,6 +1,7 @@ #include "p4orch/gre_tunnel_manager.h" #include +#include #include #include #include @@ -9,7 +10,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "ipaddress.h" -#include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" #include "sai_serialize.h" @@ -89,15 +89,22 @@ std::vector getSaiAttrs(const P4GreTunnelEntry &gre_tunnel_entr } // namespace P4GreTunnelEntry::P4GreTunnelEntry(const std::string &tunnel_id, const std::string &router_interface_id, - const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip) + const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip, + const swss::IpAddress &neighbor_id) : tunnel_id(tunnel_id), router_interface_id(router_interface_id), encap_src_ip(encap_src_ip), - encap_dst_ip(encap_dst_ip) + encap_dst_ip(encap_dst_ip), neighbor_id(neighbor_id) { SWSS_LOG_ENTER(); tunnel_key = KeyGenerator::generateTunnelKey(tunnel_id); } -void GreTunnelManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode GreTunnelManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void GreTunnelManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -188,7 +195,7 @@ P4GreTunnelEntry *GreTunnelManager::getGreTunnelEntry(const std::string &tunnel_ } }; -ReturnCodeOr GreTunnelManager::getUnderlayIfFromGreTunnelEntry(const std::string &tunnel_key) +ReturnCodeOr GreTunnelManager::getConstGreTunnelEntry(const std::string &tunnel_key) { SWSS_LOG_ENTER(); @@ -200,7 +207,7 @@ ReturnCodeOr GreTunnelManager::getUnderlayIfFromGreTunnelEntr } else { - return tunnel->router_interface_id; + return *tunnel; } } @@ -274,7 +281,7 @@ ReturnCode GreTunnelManager::processAddRequest(const P4GreTunnelAppDbEntry &app_ SWSS_LOG_ENTER(); P4GreTunnelEntry gre_tunnel_entry(app_db_entry.tunnel_id, app_db_entry.router_interface_id, - app_db_entry.encap_src_ip, app_db_entry.encap_dst_ip); + app_db_entry.encap_src_ip, app_db_entry.encap_dst_ip, app_db_entry.encap_dst_ip); auto status = createGreTunnel(gre_tunnel_entry); if (!status.ok()) { @@ -570,6 +577,15 @@ std::string GreTunnelManager::verifyStateCache(const P4GreTunnelAppDbEntry &app_ return msg.str(); } + if (gre_tunnel_entry->neighbor_id.to_string() != app_db_entry.encap_dst_ip.to_string()) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " with destination IP " + << QuotedVar(app_db_entry.encap_dst_ip.to_string()) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->neighbor_id.to_string()) << " fo neighbor_id in GreTunnel manager."; + return msg.str(); + } + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_entry->tunnel_key, gre_tunnel_entry->tunnel_oid); } @@ -616,4 +632,4 @@ std::string GreTunnelManager::verifyStateAsicDb(const P4GreTunnelEntry *gre_tunn return verifyAttrs(values, exp, std::vector{}, /*allow_unknown=*/false); -} \ No newline at end of file +} diff --git a/orchagent/p4orch/gre_tunnel_manager.h b/orchagent/p4orch/gre_tunnel_manager.h index deb5b319e3..2eee9b18c4 100644 --- a/orchagent/p4orch/gre_tunnel_manager.h +++ b/orchagent/p4orch/gre_tunnel_manager.h @@ -36,6 +36,9 @@ struct P4GreTunnelEntry std::string router_interface_id; swss::IpAddress encap_src_ip; swss::IpAddress encap_dst_ip; + // neighbor_id is required to be equal to encap_dst_ip by BRCM. And the + // neighbor entry needs to be created before GRE tunnel object + swss::IpAddress neighbor_id; // SAI OID associated with this entry. sai_object_id_t tunnel_oid = SAI_NULL_OBJECT_ID; @@ -45,7 +48,8 @@ struct P4GreTunnelEntry sai_object_id_t underlay_if_oid = SAI_NULL_OBJECT_ID; P4GreTunnelEntry(const std::string &tunnel_id, const std::string &router_interface_id, - const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip); + const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip, + const swss::IpAddress &neighbor_id); }; // GreTunnelManager listens to changes in table APP_P4RT_TUNNEL_TABLE_NAME and @@ -65,11 +69,13 @@ class GreTunnelManager : public ObjectManagerInterface virtual ~GreTunnelManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; - ReturnCodeOr getUnderlayIfFromGreTunnelEntry(const std::string &gre_tunnel_key); + ReturnCodeOr getConstGreTunnelEntry(const std::string &gre_tunnel_key); private: // Gets the internal cached GRE tunnel entry by its key. diff --git a/orchagent/p4orch/l3_admit_manager.cpp b/orchagent/p4orch/l3_admit_manager.cpp index 75d4d6f7d2..da5b955dba 100644 --- a/orchagent/p4orch/l3_admit_manager.cpp +++ b/orchagent/p4orch/l3_admit_manager.cpp @@ -1,13 +1,13 @@ #include "p4orch/l3_admit_manager.h" #include +#include #include #include #include #include "SaiAttributeList.h" #include "dbconnector.h" -#include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" @@ -64,7 +64,13 @@ ReturnCodeOr> getSaiAttrs(const P4L3AdmitEntry &l3_ } // namespace -void L3AdmitManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode L3AdmitManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void L3AdmitManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } diff --git a/orchagent/p4orch/l3_admit_manager.h b/orchagent/p4orch/l3_admit_manager.h index 933f5792c8..5f0af69b71 100644 --- a/orchagent/p4orch/l3_admit_manager.h +++ b/orchagent/p4orch/l3_admit_manager.h @@ -60,9 +60,11 @@ class L3AdmitManager : public ObjectManagerInterface virtual ~L3AdmitManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Gets the internal cached next hop entry by its key. @@ -95,4 +97,4 @@ class L3AdmitManager : public ObjectManagerInterface P4OidMapper *m_p4OidMapper; friend class L3AdmitManagerTest; -}; \ No newline at end of file +}; diff --git a/orchagent/p4orch/mirror_session_manager.cpp b/orchagent/p4orch/mirror_session_manager.cpp index dfecb74ad7..e562b87ff5 100644 --- a/orchagent/p4orch/mirror_session_manager.cpp +++ b/orchagent/p4orch/mirror_session_manager.cpp @@ -1,10 +1,10 @@ #include "p4orch/mirror_session_manager.h" #include +#include #include "SaiAttributeList.h" #include "dbconnector.h" -#include "json.hpp" #include "p4orch/p4orch_util.h" #include "portsorch.h" #include "sai_serialize.h" @@ -21,7 +21,35 @@ extern sai_object_id_t gSwitchId; namespace p4orch { -void MirrorSessionManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode MirrorSessionManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kMirrorSessionId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kMirrorSessionId)).get(); + object_key = KeyGenerator::generateMirrorSessionKey(value); + object_type = SAI_OBJECT_TYPE_MIRROR_SESSION; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kMirrorSessionId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void MirrorSessionManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { SWSS_LOG_ENTER(); m_entries.push_back(entry); diff --git a/orchagent/p4orch/mirror_session_manager.h b/orchagent/p4orch/mirror_session_manager.h index 3cbd46ee15..7c2bf3b3b1 100644 --- a/orchagent/p4orch/mirror_session_manager.h +++ b/orchagent/p4orch/mirror_session_manager.h @@ -81,12 +81,15 @@ class MirrorSessionManager : public ObjectManagerInterface m_publisher = publisher; } - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; + private: ReturnCodeOr deserializeP4MirrorSessionAppDbEntry( const std::string &key, const std::vector &attributes); diff --git a/orchagent/p4orch/neighbor_manager.cpp b/orchagent/p4orch/neighbor_manager.cpp index 9a903baa6a..f68f22a545 100644 --- a/orchagent/p4orch/neighbor_manager.cpp +++ b/orchagent/p4orch/neighbor_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/neighbor_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" @@ -324,7 +324,46 @@ ReturnCode NeighborManager::processDeleteRequest(const std::string &neighbor_key return status; } -void NeighborManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode NeighborManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + std::string router_intf_id, neighbor_id; + swss::IpAddress neighbor; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) + { + router_intf_id = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); + if (j.find(prependMatchField(p4orch::kNeighborId)) != j.end()) + { + neighbor_id = j.at(prependMatchField(p4orch::kNeighborId)).get(); + neighbor = swss::IpAddress(neighbor_id); + object_key = KeyGenerator::generateNeighborKey(router_intf_id, neighbor); + object_type = SAI_OBJECT_TYPE_NEIGHBOR_ENTRY; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kNeighborId); + } + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", + p4orch::kRouterInterfaceId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void NeighborManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } diff --git a/orchagent/p4orch/neighbor_manager.h b/orchagent/p4orch/neighbor_manager.h index 4165bb90ed..229dcc41d1 100644 --- a/orchagent/p4orch/neighbor_manager.h +++ b/orchagent/p4orch/neighbor_manager.h @@ -49,9 +49,11 @@ class NeighborManager : public ObjectManagerInterface } virtual ~NeighborManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: ReturnCodeOr deserializeNeighborEntry(const std::string &key, diff --git a/orchagent/p4orch/next_hop_manager.cpp b/orchagent/p4orch/next_hop_manager.cpp index 2a9bbcf8f9..f55c83534a 100644 --- a/orchagent/p4orch/next_hop_manager.cpp +++ b/orchagent/p4orch/next_hop_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/next_hop_manager.h" +#include #include #include #include @@ -8,7 +9,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "ipaddress.h" -#include "json.hpp" #include "logger.h" #include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" @@ -41,18 +41,22 @@ namespace ReturnCode validateAppDbEntry(const P4NextHopAppDbEntry &app_db_entry) { + // TODO(b/225242372): remove kSetNexthop action after P4RT and Orion update + // naming if (app_db_entry.action_str != p4orch::kSetIpNexthop && app_db_entry.action_str != p4orch::kSetNexthop && app_db_entry.action_str != p4orch::kSetTunnelNexthop) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid action " << QuotedVar(app_db_entry.action_str) << " of Nexthop App DB entry"; } - if (app_db_entry.neighbor_id.isZero()) + if (app_db_entry.action_str == p4orch::kSetIpNexthop && app_db_entry.neighbor_id.isZero()) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Missing field " << QuotedVar(prependParamField(p4orch::kNeighborId)) << " for action " << QuotedVar(p4orch::kSetIpNexthop) << " in table entry"; } + // TODO(b/225242372): remove kSetNexthop action after P4RT and Orion update + // naming if (app_db_entry.action_str == p4orch::kSetIpNexthop || app_db_entry.action_str == p4orch::kSetNexthop) { if (!app_db_entry.gre_tunnel_id.empty()) @@ -143,7 +147,35 @@ ReturnCodeOr> NextHopManager::getSaiAttrs(const P4N return next_hop_attrs; } -void NextHopManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode NextHopManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kNexthopId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kNexthopId)).get(); + object_key = KeyGenerator::generateNextHopKey(value); + object_type = SAI_OBJECT_TYPE_NEXT_HOP; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kNexthopId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void NextHopManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -321,23 +353,27 @@ ReturnCode NextHopManager::createNextHop(P4NextHopEntry &next_hop_entry) << " already exists in centralized mapper"); } - std::string router_interface_id = next_hop_entry.router_interface_id; if (!next_hop_entry.gre_tunnel_id.empty()) { - auto underlay_if_or = gP4Orch->getGreTunnelManager()->getUnderlayIfFromGreTunnelEntry( + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( KeyGenerator::generateTunnelKey(next_hop_entry.gre_tunnel_id)); - if (!underlay_if_or.ok()) + if (!gre_tunnel_or.ok()) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) << "GRE Tunnel " << QuotedVar(next_hop_entry.gre_tunnel_id) << " does not exist in GRE Tunnel Manager"); } - router_interface_id = *underlay_if_or; + next_hop_entry.router_interface_id = (*gre_tunnel_or).router_interface_id; + // BRCM requires neighbor object to be created before GRE tunnel, referring + // to the one in GRE tunnel object when creating next_hop_entry_with + // setTunnelAction + next_hop_entry.neighbor_id = (*gre_tunnel_or).neighbor_id; } // Neighbor doesn't have OID and the IP addr needed in next hop creation is // neighbor_id, so only check neighbor existence in centralized mapper. - const auto neighbor_key = KeyGenerator::generateNeighborKey(router_interface_id, next_hop_entry.neighbor_id); + const auto neighbor_key = + KeyGenerator::generateNeighborKey(next_hop_entry.router_interface_id, next_hop_entry.neighbor_id); if (!m_p4OidMapper->existsOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key)) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) @@ -456,15 +492,15 @@ ReturnCode NextHopManager::removeNextHop(const std::string &next_hop_key) std::string router_interface_id = next_hop_entry->router_interface_id; if (!next_hop_entry->gre_tunnel_id.empty()) { - auto underlay_if_or = gP4Orch->getGreTunnelManager()->getUnderlayIfFromGreTunnelEntry( + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( KeyGenerator::generateTunnelKey(next_hop_entry->gre_tunnel_id)); - if (!underlay_if_or.ok()) + if (!gre_tunnel_or.ok()) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) << "GRE Tunnel " << QuotedVar(next_hop_entry->gre_tunnel_id) << " does not exist in GRE Tunnel Manager"); } - router_interface_id = *underlay_if_or; + router_interface_id = (*gre_tunnel_or).router_interface_id; } m_p4OidMapper->decreaseRefCount( SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, @@ -560,7 +596,8 @@ std::string NextHopManager::verifyStateCache(const P4NextHopAppDbEntry &app_db_e << QuotedVar(next_hop_entry->next_hop_id) << " in nexthop manager."; return msg.str(); } - if (next_hop_entry->router_interface_id != app_db_entry.router_interface_id) + if (app_db_entry.action_str == p4orch::kSetIpNexthop && + next_hop_entry->router_interface_id != app_db_entry.router_interface_id) { std::stringstream msg; msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with ritf ID " @@ -568,7 +605,8 @@ std::string NextHopManager::verifyStateCache(const P4NextHopAppDbEntry &app_db_e << QuotedVar(next_hop_entry->router_interface_id) << " in nexthop manager."; return msg.str(); } - if (next_hop_entry->neighbor_id.to_string() != app_db_entry.neighbor_id.to_string()) + if (app_db_entry.action_str == p4orch::kSetIpNexthop && + next_hop_entry->neighbor_id.to_string() != app_db_entry.neighbor_id.to_string()) { std::stringstream msg; msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with neighbor ID " @@ -577,7 +615,8 @@ std::string NextHopManager::verifyStateCache(const P4NextHopAppDbEntry &app_db_e return msg.str(); } - if (next_hop_entry->gre_tunnel_id != app_db_entry.gre_tunnel_id) + if (app_db_entry.action_str == p4orch::kSetTunnelNexthop && + next_hop_entry->gre_tunnel_id != app_db_entry.gre_tunnel_id) { std::stringstream msg; msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with GRE tunnel ID " @@ -585,6 +624,36 @@ std::string NextHopManager::verifyStateCache(const P4NextHopAppDbEntry &app_db_e << QuotedVar(next_hop_entry->gre_tunnel_id) << " in nexthop manager."; return msg.str(); } + if (!next_hop_entry->gre_tunnel_id.empty()) + { + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( + KeyGenerator::generateTunnelKey(next_hop_entry->gre_tunnel_id)); + if (!gre_tunnel_or.ok()) + { + std::stringstream msg; + msg << "GRE Tunnel " << QuotedVar(next_hop_entry->gre_tunnel_id) << " does not exist in GRE Tunnel Manager"; + return msg.str(); + } + P4GreTunnelEntry gre_tunnel = *gre_tunnel_or; + if (gre_tunnel.neighbor_id.to_string() != next_hop_entry->neighbor_id.to_string()) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(next_hop_entry->next_hop_id) << " with neighbor ID " + << QuotedVar(next_hop_entry->neighbor_id.to_string()) + << " in nexthop manager does not match internal cache " << QuotedVar(gre_tunnel.neighbor_id.to_string()) + << " with tunnel ID " << QuotedVar(gre_tunnel.tunnel_id) << " in GRE tunnel manager."; + return msg.str(); + } + if (gre_tunnel.router_interface_id != next_hop_entry->router_interface_id) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(next_hop_entry->next_hop_id) << " with rif ID " + << QuotedVar(next_hop_entry->router_interface_id) + << " in nexthop manager does not match internal cache " << QuotedVar(gre_tunnel.router_interface_id) + << " with tunnel ID " << QuotedVar(gre_tunnel.tunnel_id) << " in GRE tunnel manager."; + return msg.str(); + } + } return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_entry->next_hop_key, next_hop_entry->next_hop_oid); diff --git a/orchagent/p4orch/next_hop_manager.h b/orchagent/p4orch/next_hop_manager.h index eda1ac0001..aac6f5e444 100644 --- a/orchagent/p4orch/next_hop_manager.h +++ b/orchagent/p4orch/next_hop_manager.h @@ -57,9 +57,11 @@ class NextHopManager : public ObjectManagerInterface virtual ~NextHopManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Gets the internal cached next hop entry by its key. diff --git a/orchagent/p4orch/object_manager_interface.h b/orchagent/p4orch/object_manager_interface.h index 17b6e9ec84..1d44990edc 100644 --- a/orchagent/p4orch/object_manager_interface.h +++ b/orchagent/p4orch/object_manager_interface.h @@ -8,11 +8,16 @@ class ObjectManagerInterface virtual ~ObjectManagerInterface() = default; // Enqueues an entry into the manager - virtual void enqueue(const swss::KeyOpFieldsValuesTuple &entry) = 0; + virtual void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) = 0; // Processes all entries in the queue virtual void drain() = 0; // StateVerification helper function for the manager virtual std::string verifyState(const std::string &key, const std::vector &tuple) = 0; + + // For sai extension objects depending on a sai object + // return sai object id for a given table with a given key + virtual ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) = 0; }; diff --git a/orchagent/p4orch/p4orch.cpp b/orchagent/p4orch/p4orch.cpp index 717f23bc93..f1e6bd4702 100644 --- a/orchagent/p4orch/p4orch.cpp +++ b/orchagent/p4orch/p4orch.cpp @@ -10,12 +10,15 @@ #include "orch.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/ext_tables_manager.h" #include "p4orch/gre_tunnel_manager.h" #include "p4orch/l3_admit_manager.h" #include "p4orch/neighbor_manager.h" #include "p4orch/next_hop_manager.h" +#include "p4orch/p4orch_util.h" #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" +#include "p4orch/tables_definition_manager.h" #include "portsorch.h" #include "return_code.h" #include "sai_serialize.h" @@ -23,15 +26,18 @@ extern PortsOrch *gPortsOrch; #define P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME "P4_ACL_COUNTERS_STATS_POLL_TIMER" +#define P4_EXT_COUNTERS_STATS_POLL_TIMER_NAME "P4_EXT_COUNTERS_STATS_POLL_TIMER" +#define APP_P4RT_EXT_TABLES_MANAGER "EXT_TABLES_MANAGER" P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOrch *vrfOrch, CoppOrch *coppOrch) : Orch(db, tableNames) { SWSS_LOG_ENTER(); + m_tablesDefnManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_routerIntfManager = std::make_unique(&m_p4OidMapper, &m_publisher); - m_greTunnelManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_neighborManager = std::make_unique(&m_p4OidMapper, &m_publisher); + m_greTunnelManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_nextHopManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_routeManager = std::make_unique(&m_p4OidMapper, vrfOrch, &m_publisher); m_mirrorSessionManager = std::make_unique(&m_p4OidMapper, &m_publisher); @@ -39,7 +45,9 @@ P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOr m_aclRuleManager = std::make_unique(&m_p4OidMapper, vrfOrch, coppOrch, &m_publisher); m_wcmpManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_l3AdmitManager = std::make_unique(&m_p4OidMapper, &m_publisher); + m_extTablesManager = std::make_unique(&m_p4OidMapper, vrfOrch, &m_publisher); + m_p4TableToManagerMap[APP_P4RT_TABLES_DEFINITION_TABLE_NAME] = m_tablesDefnManager.get(); m_p4TableToManagerMap[APP_P4RT_ROUTER_INTERFACE_TABLE_NAME] = m_routerIntfManager.get(); m_p4TableToManagerMap[APP_P4RT_NEIGHBOR_TABLE_NAME] = m_neighborManager.get(); m_p4TableToManagerMap[APP_P4RT_TUNNEL_TABLE_NAME] = m_greTunnelManager.get(); @@ -50,10 +58,12 @@ P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOr m_p4TableToManagerMap[APP_P4RT_ACL_TABLE_DEFINITION_NAME] = m_aclTableManager.get(); m_p4TableToManagerMap[APP_P4RT_WCMP_GROUP_TABLE_NAME] = m_wcmpManager.get(); m_p4TableToManagerMap[APP_P4RT_L3_ADMIT_TABLE_NAME] = m_l3AdmitManager.get(); + m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER] = m_extTablesManager.get(); + m_p4ManagerPrecedence.push_back(m_tablesDefnManager.get()); m_p4ManagerPrecedence.push_back(m_routerIntfManager.get()); - m_p4ManagerPrecedence.push_back(m_greTunnelManager.get()); m_p4ManagerPrecedence.push_back(m_neighborManager.get()); + m_p4ManagerPrecedence.push_back(m_greTunnelManager.get()); m_p4ManagerPrecedence.push_back(m_nextHopManager.get()); m_p4ManagerPrecedence.push_back(m_wcmpManager.get()); m_p4ManagerPrecedence.push_back(m_routeManager.get()); @@ -61,14 +71,23 @@ P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOr m_p4ManagerPrecedence.push_back(m_aclTableManager.get()); m_p4ManagerPrecedence.push_back(m_aclRuleManager.get()); m_p4ManagerPrecedence.push_back(m_l3AdmitManager.get()); + m_p4ManagerPrecedence.push_back(m_extTablesManager.get()); + tablesinfo = nullptr; // Add timer executor to update ACL counters stats in COUNTERS_DB - auto interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; - m_aclCounterStatsTimer = new swss::SelectableTimer(interv); - auto executor = new swss::ExecutableTimer(m_aclCounterStatsTimer, this, P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME); - Orch::addExecutor(executor); + auto acl_interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; + m_aclCounterStatsTimer = new swss::SelectableTimer(acl_interv); + auto acl_executor = new swss::ExecutableTimer(m_aclCounterStatsTimer, this, P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME); + Orch::addExecutor(acl_executor); m_aclCounterStatsTimer->start(); + // Add timer executor to update EXT counters stats in COUNTERS_DB + auto ext_interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; + m_extCounterStatsTimer = new swss::SelectableTimer(ext_interv); + auto ext_executor = new swss::ExecutableTimer(m_extCounterStatsTimer, this, P4_EXT_COUNTERS_STATS_POLL_TIMER_NAME); + Orch::addExecutor(ext_executor); + m_extCounterStatsTimer->start(); + // Add port state change notification handling support swss::DBConnector notificationsDb("ASIC_DB", 0); m_portStatusNotificationConsumer = new swss::NotificationConsumer(¬ificationsDb, "NOTIFICATIONS"); @@ -110,16 +129,25 @@ void P4Orch::doTask(Consumer &consumer) status); continue; } - if (m_p4TableToManagerMap.find(table_name) == m_p4TableToManagerMap.end()) + if (m_p4TableToManagerMap.find(table_name) != m_p4TableToManagerMap.end()) { - auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Failed to find P4Orch Manager for " << table_name << " P4RT DB table"; - SWSS_LOG_ERROR("%s", status.message().c_str()); - m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status); - continue; + m_p4TableToManagerMap[table_name]->enqueue(table_name, key_op_fvs_tuple); + } + else + { + if (table_name.rfind(p4orch::kTablePrefixEXT, 0) != std::string::npos) + { + m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER]->enqueue(table_name, key_op_fvs_tuple); + } + else + { + auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Failed to find P4Orch Manager for " << table_name << " P4RT DB table"; + SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status); + } } - m_p4TableToManagerMap[table_name]->enqueue(key_op_fvs_tuple); } for (const auto &manager : m_p4ManagerPrecedence) @@ -141,6 +169,10 @@ void P4Orch::doTask(swss::SelectableTimer &timer) { m_aclRuleManager->doAclCounterStatsTask(); } + else if (&timer == m_extCounterStatsTimer) + { + m_extTablesManager->doExtCounterStatsTask(); + } else { SWSS_LOG_NOTICE("Unrecognized timer passed in P4Orch::doTask(swss::SelectableTimer& " diff --git a/orchagent/p4orch/p4orch.h b/orchagent/p4orch/p4orch.h index e39041802b..cc02052830 100644 --- a/orchagent/p4orch/p4orch.h +++ b/orchagent/p4orch/p4orch.h @@ -12,6 +12,7 @@ #include "orch.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/ext_tables_manager.h" #include "p4orch/gre_tunnel_manager.h" #include "p4orch/l3_admit_manager.h" #include "p4orch/mirror_session_manager.h" @@ -21,10 +22,22 @@ #include "p4orch/p4oidmapper.h" #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" +#include "p4orch/tables_definition_manager.h" #include "p4orch/wcmp_manager.h" #include "response_publisher.h" #include "vrforch.h" +static const std::map FixedTablesMap = { + {"router_interface_table", APP_P4RT_ROUTER_INTERFACE_TABLE_NAME}, + {"neighbor_table", APP_P4RT_NEIGHBOR_TABLE_NAME}, + {"nexthop_table", APP_P4RT_NEXTHOP_TABLE_NAME}, + {"wcmp_group_table", APP_P4RT_WCMP_GROUP_TABLE_NAME}, + {"ipv4_table", APP_P4RT_IPV4_TABLE_NAME}, + {"ipv6_table", APP_P4RT_IPV6_TABLE_NAME}, + {"mirror_session_table", APP_P4RT_MIRROR_SESSION_TABLE_NAME}, + {"l3_admit_table", APP_P4RT_L3_ADMIT_TABLE_NAME}, + {"tunnel_table", APP_P4RT_TUNNEL_TABLE_NAME}}; + class P4Orch : public Orch { public: @@ -37,6 +50,10 @@ class P4Orch : public Orch p4orch::AclRuleManager *getAclRuleManager(); p4orch::WcmpManager *getWcmpManager(); GreTunnelManager *getGreTunnelManager(); + TablesInfo *tablesinfo = NULL; + + // m_p4TableToManagerMap: P4 APP DB table name, P4 Object Manager + std::unordered_map m_p4TableToManagerMap; private: void doTask(Consumer &consumer); @@ -44,13 +61,13 @@ class P4Orch : public Orch void doTask(swss::NotificationConsumer &consumer); void handlePortStatusChangeNotification(const std::string &op, const std::string &data); - // m_p4TableToManagerMap: P4 APP DB table name, P4 Object Manager - std::unordered_map m_p4TableToManagerMap; // P4 object manager request processing order. std::vector m_p4ManagerPrecedence; swss::SelectableTimer *m_aclCounterStatsTimer; + swss::SelectableTimer *m_extCounterStatsTimer; P4OidMapper m_p4OidMapper; + std::unique_ptr m_tablesDefnManager; std::unique_ptr m_routerIntfManager; std::unique_ptr m_greTunnelManager; std::unique_ptr m_neighborManager; @@ -61,6 +78,7 @@ class P4Orch : public Orch std::unique_ptr m_aclRuleManager; std::unique_ptr m_wcmpManager; std::unique_ptr m_l3AdmitManager; + std::unique_ptr m_extTablesManager; // Notification consumer for port state change swss::NotificationConsumer *m_portStatusNotificationConsumer; diff --git a/orchagent/p4orch/p4orch_util.cpp b/orchagent/p4orch/p4orch_util.cpp index 5ff0c058d4..dd0a4171ad 100644 --- a/orchagent/p4orch/p4orch_util.cpp +++ b/orchagent/p4orch/p4orch_util.cpp @@ -1,8 +1,10 @@ #include "p4orch/p4orch_util.h" +#include "p4orch/p4orch.h" #include "schema.h" using ::p4orch::kTableKeyDelimiter; +extern P4Orch *gP4Orch; // Prepends "match/" to the input string str to construct a new string. std::string prependMatchField(const std::string &str) @@ -80,6 +82,44 @@ std::string verifyAttrs(const std::vector &targets, return ""; } +TableInfo *getTableInfo(const std::string &table_name) +{ + if (!gP4Orch->tablesinfo) + { + return nullptr; + } + + auto it = gP4Orch->tablesinfo->m_tableInfoMap.find(table_name); + if (it == gP4Orch->tablesinfo->m_tableInfoMap.end()) + { + return nullptr; + } + + return &it->second; +} + +ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name) +{ + if (!table) + { + return nullptr; + } + + auto it = table->action_fields.find(action_name); + if (it == table->action_fields.end()) + { + return nullptr; + } + + return &it->second; +} + +std::string KeyGenerator::generateTablesInfoKey(const std::string &context) +{ + std::map fv_map = {{"context", context}}; + return generateKey(fv_map); +} + std::string KeyGenerator::generateRouteKey(const std::string &vrf_id, const swss::IpPrefix &ip_prefix) { std::map fv_map = { @@ -152,6 +192,17 @@ std::string KeyGenerator::generateTunnelKey(const std::string &tunnel_id) return generateKey(fv_map); } +std::string KeyGenerator::generateExtTableKey(const std::string &table_name, const std::string &table_key) +{ + std::string key; + + key.append(table_name); + key.append(":"); + key.append(table_key); + + return key; +} + std::string KeyGenerator::generateKey(const std::map &fv_map) { std::string key; diff --git a/orchagent/p4orch/p4orch_util.h b/orchagent/p4orch/p4orch_util.h index 995ccf9b27..9cfcf53a82 100644 --- a/orchagent/p4orch/p4orch_util.h +++ b/orchagent/p4orch/p4orch_util.h @@ -5,17 +5,23 @@ #include #include #include +#include #include #include "ipaddress.h" #include "ipprefix.h" #include "macaddress.h" #include "table.h" +extern "C" +{ +#include "saitypes.h" +} namespace p4orch { // Field names in P4RT APP DB entry. +constexpr char *kTablePrefixEXT = "EXT_"; constexpr char *kRouterInterfaceId = "router_interface_id"; constexpr char *kPort = "port"; constexpr char *kInPort = "in_port"; @@ -40,7 +46,7 @@ constexpr char *kSetWcmpGroupIdAndMetadata = "set_wcmp_group_id_and_metadata"; constexpr char *kSetMetadataAndDrop = "set_metadata_and_drop"; constexpr char *kSetNexthop = "set_nexthop"; constexpr char *kSetIpNexthop = "set_ip_nexthop"; -constexpr char *kSetTunnelNexthop = "set_tunnel_encap_nexthop"; +constexpr char *kSetTunnelNexthop = "set_p2p_tunnel_encap_nexthop"; constexpr char *kDrop = "drop"; constexpr char *kTrap = "trap"; constexpr char *kStage = "stage"; @@ -79,7 +85,20 @@ constexpr char *kTtl = "ttl"; constexpr char *kTos = "tos"; constexpr char *kMirrorAsIpv4Erspan = "mirror_as_ipv4_erspan"; constexpr char *kL3AdmitAction = "admit_to_l3"; -constexpr char *kTunnelAction = "mark_for_tunnel_encap"; +constexpr char *kTunnelAction = "mark_for_p2p_tunnel_encap"; + +// Field names in P4RT TABLE DEFINITION APP DB entry. +constexpr char *kTables = "tables"; +constexpr char *kId = "id"; +constexpr char *kName = "name"; +constexpr char *kAlias = "alias"; +constexpr char *kBitwidth = "bitwidth"; +constexpr char *kFormat = "format"; +constexpr char *kmatchFields = "matchFields"; +constexpr char *kActionParams = "params"; +constexpr char *kReferences = "references"; +constexpr char *kTableRef = "table"; +constexpr char *kMatchRef = "match"; } // namespace p4orch // Prepends "match/" to the input string str to construct a new string. @@ -88,6 +107,57 @@ std::string prependMatchField(const std::string &str); // Prepends "param/" to the input string str to construct a new string. std::string prependParamField(const std::string &str); +struct ActionParamInfo +{ + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; +}; + +struct ActionInfo +{ + std::string name; + std::unordered_map params; + bool refers_to; +}; + +struct TableMatchInfo +{ + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; +}; + +/** + * Dervied table definition + * This is a derived state out of table definition provided by P4RT-APP + */ +struct TableInfo +{ + std::string name; + int id; + int precedence; + std::unordered_map match_fields; + std::unordered_map action_fields; + bool counter_bytes_enabled; + bool counter_packets_enabled; + std::vector action_ref_tables; + // list of tables across all actions, of current table, refer to +}; + +/** + * table-name to table-definition map + */ +typedef std::unordered_map TableInfoMap; + +struct TablesInfoAppDbEntry +{ + std::string context; + std::string info; +}; + struct P4RouterInterfaceAppDbEntry { std::string router_interface_id; @@ -221,6 +291,25 @@ struct P4AclRuleAppDbEntry P4AclMeterAppDb meter; }; +struct DepObject +{ + sai_object_type_t sai_object; + std::string key; + sai_object_id_t oid; +}; + +struct P4ExtTableAppDbEntry +{ + std::string db_key; + std::string table_name; + std::string table_key; + std::unordered_map> action_params; + std::unordered_map action_dep_objects; +}; + +TableInfo *getTableInfo(const std::string &table_name); +ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name); + // Get the table name and key content from the given P4RT key. // Outputs will be empty strings in case of error. // Example: FIXED_NEIGHBOR_TABLE:{content} @@ -246,6 +335,8 @@ std::string verifyAttrs(const std::vector &targets, class KeyGenerator { public: + static std::string generateTablesInfoKey(const std::string &context); + static std::string generateRouteKey(const std::string &vrf_id, const swss::IpPrefix &ip_prefix); static std::string generateRouterInterfaceKey(const std::string &router_intf_id); @@ -267,6 +358,8 @@ class KeyGenerator static std::string generateTunnelKey(const std::string &tunnel_id); + static std::string generateExtTableKey(const std::string &table_name, const std::string &table_key); + // Generates key used by object managers and centralized mapper. // Takes map of as input and returns a concatenated string // of the form id1=value1:id2=value2... @@ -283,4 +376,4 @@ template std::string QuotedVar(T name) } // Trim tailing and leading whitespace -std::string trim(const std::string &s); \ No newline at end of file +std::string trim(const std::string &s); diff --git a/orchagent/p4orch/route_manager.cpp b/orchagent/p4orch/route_manager.cpp index e029489fde..c50b3bb4b9 100644 --- a/orchagent/p4orch/route_manager.cpp +++ b/orchagent/p4orch/route_manager.cpp @@ -1,6 +1,7 @@ #include "p4orch/route_manager.h" #include +#include #include #include #include @@ -11,7 +12,6 @@ #include "converter.h" #include "crmorch.h" #include "dbconnector.h" -#include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" #include "sai_serialize.h" @@ -837,7 +837,13 @@ std::vector RouteManager::deleteRouteEntries(const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Applies route entry updates from src to dest. The merged result will be diff --git a/orchagent/p4orch/router_interface_manager.cpp b/orchagent/p4orch/router_interface_manager.cpp index 43dc652eb6..de32576e68 100644 --- a/orchagent/p4orch/router_interface_manager.cpp +++ b/orchagent/p4orch/router_interface_manager.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -10,7 +11,6 @@ #include "SaiAttributeList.h" #include "dbconnector.h" #include "directory.h" -#include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" @@ -337,7 +337,36 @@ ReturnCode RouterInterfaceManager::processDeleteRequest(const std::string &route return status; } -void RouterInterfaceManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode RouterInterfaceManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); + object_key = KeyGenerator::generateRouterInterfaceKey(value); + object_type = SAI_OBJECT_TYPE_ROUTER_INTERFACE; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", + p4orch::kRouterInterfaceId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void RouterInterfaceManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } diff --git a/orchagent/p4orch/router_interface_manager.h b/orchagent/p4orch/router_interface_manager.h index 73d994ff06..f33f443979 100644 --- a/orchagent/p4orch/router_interface_manager.h +++ b/orchagent/p4orch/router_interface_manager.h @@ -49,9 +49,11 @@ class RouterInterfaceManager : public ObjectManagerInterface } virtual ~RouterInterfaceManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: ReturnCodeOr deserializeRouterIntfEntry( diff --git a/orchagent/p4orch/tables_definition_manager.cpp b/orchagent/p4orch/tables_definition_manager.cpp new file mode 100644 index 0000000000..1da15028e5 --- /dev/null +++ b/orchagent/p4orch/tables_definition_manager.cpp @@ -0,0 +1,670 @@ +#include "p4orch/tables_definition_manager.h" + +#include +#include +#include +#include +#include +#include + +#include "directory.h" +#include "logger.h" +#include "orch.h" +#include "p4orch/p4orch.h" +#include "p4orch/p4orch_util.h" +#include "tokenize.h" +extern "C" +{ +#include "saitypes.h" +} + +extern Directory gDirectory; +extern P4Orch *gP4Orch; +const std::map format_datatype_map = { + {"MAC", "SAI_ATTR_VALUE_TYPE_MAC"}, {"IPV4", "SAI_ATTR_VALUE_TYPE_IPV4"}, {"IPV6", "SAI_ATTR_VALUE_TYPE_IPV6"}}; + +std::string BitwidthToDatatype(int bitwidth) +{ + std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + + if (bitwidth <= 0) + { + datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + } + else if (bitwidth <= 8) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT8"; + } + else if (bitwidth <= 16) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT16"; + } + else if (bitwidth <= 32) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT32"; + } + else if (bitwidth <= 64) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT64"; + } + + return datatype; +} + +std::string parseBitwidthToDatatype(const nlohmann::json &json) +{ + int bitwidth; + std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + + if (json.find(p4orch::kBitwidth) != json.end()) + { + bitwidth = json.at(p4orch::kBitwidth).get(); + datatype = BitwidthToDatatype(bitwidth); + } + + return datatype; +} + +std::string parseFormatToDatatype(const nlohmann::json &json, std::string datatype) +{ + std::string format; + + if (json.find(p4orch::kFormat) != json.end()) + { + format = json.at(p4orch::kFormat).get(); + + auto it = format_datatype_map.find(format); + if (it != format_datatype_map.end()) + { + datatype = it->second; + } + } + + return datatype; +} + +ReturnCode parseTableMatchReferences(const nlohmann::json &match_json, TableMatchInfo &match) +{ + std::string table, field; + + if (match_json.find(p4orch::kReferences) != match_json.end()) + { + for (const auto &ref_json : match_json[p4orch::kReferences]) + { + try + { + table = ref_json.at(p4orch::kTableRef).get(); + field = ref_json.at(p4orch::kMatchRef).get(); + match.table_reference_map[table] = field; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition " + "info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode parseActionParamReferences(const nlohmann::json ¶m_json, ActionParamInfo ¶m) +{ + std::string table, field; + + if (param_json.find(p4orch::kReferences) != param_json.end()) + { + for (const auto &ref_json : param_json[p4orch::kReferences]) + { + try + { + table = ref_json.at(p4orch::kTableRef).get(); + field = ref_json.at(p4orch::kMatchRef).get(); + param.table_reference_map[table] = field; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition " + "info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode parseTableActionParams(const nlohmann::json &action_json, ActionInfo &action) +{ + action.refers_to = false; + if (action_json.find(p4orch::kActionParams) != action_json.end()) + { + for (const auto ¶m_json : action_json[p4orch::kActionParams]) + { + try + { + ActionParamInfo param; + std::string param_name; + + param_name = param_json.at(p4orch::kName).get(); + param.name = param_name; + param.datatype = parseBitwidthToDatatype(param_json); + param.datatype = parseFormatToDatatype(param_json, param.datatype); + parseActionParamReferences(param_json, param); + action.params[param_name] = param; + + if (!param.table_reference_map.empty()) + { + /** + * Helps avoid walk of action parameters if this is set to false at + * action level + */ + action.refers_to = true; + } + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition " + "info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode parseTableCounter(const nlohmann::json &table_json, TableInfo &table) +{ + if (table_json.find(p4orch::kCounterUnit) != table_json.end()) + { + auto unit = table_json.at(p4orch::kCounterUnit); + if (unit == "PACKETS") + { + table.counter_packets_enabled = true; + } + else if (unit == "BYTES") + { + table.counter_bytes_enabled = true; + } + else + { + table.counter_packets_enabled = true; + table.counter_bytes_enabled = true; + } + } + + return ReturnCode(); +} + +ReturnCode parseTablesInfo(const nlohmann::json &info_json, TablesInfo &info_entry) +{ + ReturnCode status; + int table_id; + std::string table_name, field_name; + + if (info_json.find(p4orch::kTables) == info_json.end()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "no tables in app-db supplied table definition info"; + } + + for (const auto &table_json : info_json[p4orch::kTables]) + { + try + { + table_id = table_json.at(p4orch::kId).get(); + table_name = table_json.at(p4orch::kAlias).get(); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition " + "info"; + } + + TableInfo table = {}; + table.name = table_name; + table.id = table_id; + try + { + for (const auto &match_json : table_json[p4orch::kmatchFields]) + { + TableMatchInfo match = {}; + std::string match_name; + + match_name = match_json.at(p4orch::kName).get(); + match.name = match_name; + match.datatype = parseBitwidthToDatatype(match_json); + match.datatype = parseFormatToDatatype(match_json, match.datatype); + parseTableMatchReferences(match_json, match); + table.match_fields[match_name] = match; + } + + for (const auto &action_json : table_json[p4orch::kActions]) + { + ActionInfo action = {}; + std::string action_name; + + action_name = action_json.at(p4orch::kAlias).get(); + action.name = action_name; + parseTableActionParams(action_json, action); + table.action_fields[action_name] = action; + + /** + * If any parameter of action refers to another table, add that one in + * the cross-reference list of current table + */ + for (auto param_it = action.params.begin(); param_it != action.params.end(); param_it++) + { + ActionParamInfo action_param = param_it->second; + for (auto ref_it = action_param.table_reference_map.begin(); + ref_it != action_param.table_reference_map.end(); ref_it++) + { + if (std::find(table.action_ref_tables.begin(), table.action_ref_tables.end(), ref_it->first) == + table.action_ref_tables.end()) + { + table.action_ref_tables.push_back(ref_it->first); + } + } + } + } + + parseTableCounter(table_json, table); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse table " << QuotedVar(table_name.c_str()) << "match fields"; + } + + info_entry.m_tableIdNameMap[std::to_string(table_id)] = table_name; + info_entry.m_tableInfoMap[table_name] = table; + } + + return ReturnCode(); +} + +ReturnCodeOr TablesDefnManager::deserializeTablesInfoEntry( + const std::string &key, const std::vector &attributes) +{ + SWSS_LOG_ENTER(); + + TablesInfoAppDbEntry app_db_entry = {}; + try + { + nlohmann::json j = nlohmann::json::parse(key); + app_db_entry.context = j["context"]; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to deserialize tables info"; + } + + for (const auto &it : attributes) + { + const auto &field = fvField(it); + std::string value = fvValue(it); + if (field == "info") + { + app_db_entry.info = value; + } + else + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(field) << " in table entry"; + } + } + + return app_db_entry; +} + +ReturnCode validateTablesInfoAppDbEntry(const TablesInfoAppDbEntry &app_db_entry) +{ + // Perform generic APP DB entry validations. Operation specific validations + // will be done by the respective request process methods. + + return ReturnCode(); +} + +TablesInfo *TablesDefnManager::getTablesInfoEntry(const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + if (m_tablesinfoMap.find(context_key) == m_tablesinfoMap.end()) + return nullptr; + + return &m_tablesinfoMap[context_key]; +} + +ReturnCode TablesDefnManager::processAddRequest(const TablesInfoAppDbEntry &app_db_entry, + const std::string &context_key) +{ + nlohmann::json tablesinfo_json; + ReturnCode status; + + SWSS_LOG_ENTER(); + + if (!m_tablesinfoMap.empty()) + { + // For now p4rt can send only same table-definition, so ignore it silently + return ReturnCode(); + } + + try + { + tablesinfo_json = nlohmann::json::parse(app_db_entry.info); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "tables info from appdb can not be parsed\n"; + } + + TablesInfo tablesinfo_entry(app_db_entry.context, tablesinfo_json); + + status = parseTablesInfo(tablesinfo_json, tablesinfo_entry); + if (!status.ok()) + { + return status; + } + + m_tablesinfoMap[app_db_entry.context] = tablesinfo_entry; + gP4Orch->tablesinfo = &m_tablesinfoMap[app_db_entry.context]; + return ReturnCode(); +} + +ReturnCode TablesDefnManager::processUpdateRequest(const TablesInfoAppDbEntry &app_db_entry, + const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + return ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) << "update of Tables Definition not supported"; +} + +ReturnCode TablesDefnManager::processDeleteRequest(const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + auto *tablesinfo = getTablesInfoEntry(context_key); + + if (tablesinfo) + { + if (gP4Orch->tablesinfo == tablesinfo) + { + gP4Orch->tablesinfo = nullptr; + } + + tablesinfo->m_tableIdNameMap.clear(); + } + + m_tablesinfoMap.erase(context_key); + return ReturnCode(); +} + +ReturnCode TablesDefnManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +std::unordered_map> createGraph(std::vector> preReq) +{ + std::unordered_map> graph; + + for (auto pre : preReq) + { + auto it = graph.find(pre.second); + if (it != graph.end()) + { + it->second.insert(pre.first); + } + else + { + graph[pre.second].insert(pre.first); + } + } + + return graph; +} + +std::unordered_map computeIndegree(std::unordered_map> &graph) +{ + std::unordered_map degrees; + + for (auto g_it = graph.begin(); g_it != graph.end(); g_it++) + { + for (int neigh : g_it->second) + { + auto n_it = degrees.find(neigh); + if (n_it != degrees.end()) + { + n_it->second++; + } + else + { + degrees.insert({neigh, 0}); + } + } + } + + return degrees; +} + +std::vector findTablePrecedence(int tables, std::vector> preReq, TablesInfo *tables_info) +{ + std::unordered_map> graph = createGraph(preReq); + std::unordered_map degrees = computeIndegree(graph); + std::vector visited; + std::vector toposort; + std::queue zeros; + + // initialize queue with tables having no dependencies + for (auto table_it = tables_info->m_tableInfoMap.begin(); table_it != tables_info->m_tableInfoMap.end(); table_it++) + { + TableInfo table_info = table_it->second; + if (degrees.find(table_info.id) == degrees.end()) + { + zeros.push(table_info.id); + visited.push_back(table_info.id); + } + } + + for (int i = 0; i < tables; i++) + { + // Err input data like possible cyclic dependencies, could not build + // precedence order + if (zeros.empty()) + { + SWSS_LOG_ERROR("Filed to build table precedence order"); + return {}; + } + + // Run BFS + int zero = zeros.front(); + zeros.pop(); + toposort.push_back(zero); + auto g_it = graph.find(zero); + if (g_it != graph.end()) + { + for (int neigh : g_it->second) + { + auto n_it = degrees.find(neigh); + if (n_it != degrees.end()) + { + if (!n_it->second) + { + if (std::find(visited.begin(), visited.end(), neigh) == visited.end()) + { + zeros.push(neigh); + visited.push_back(neigh); + } + } + else + { + n_it->second--; + } + } + } + } + } + + return toposort; +} + +void buildTablePrecedence(TablesInfo *tables_info) +{ + std::vector> preReq; + std::vector orderedTables; + int tables = 0; + + if (!tables_info) + { + return; + } + + // build dependencies + for (auto table_it = tables_info->m_tableInfoMap.begin(); table_it != tables_info->m_tableInfoMap.end(); table_it++) + { + TableInfo table_info = table_it->second; + tables++; + + for (std::size_t i = 0; i < table_info.action_ref_tables.size(); i++) + { + /** + * For now processing precedence order is only amongst extension tables + * Skip fixed tables, include them in precedence calculations when fixed + * and extension tables processing precedence may be interleaved + */ + if (FixedTablesMap.find(table_info.action_ref_tables[i]) != FixedTablesMap.end()) + { + continue; + } + + TableInfo ref_table_info = tables_info->m_tableInfoMap[table_info.action_ref_tables[i]]; + if (std::find(preReq.begin(), preReq.end(), std::make_pair(table_info.id, ref_table_info.id)) == + preReq.end()) + { + preReq.push_back(std::make_pair(table_info.id, ref_table_info.id)); + } + } + } + + // find precedence of tables based on dependencies + orderedTables = findTablePrecedence(tables, preReq, tables_info); + + // update each table with calculated precedence value and build table + // precedence map + for (std::size_t i = 0; i < orderedTables.size(); i++) + { + auto table_id = orderedTables[i]; + auto id_it = tables_info->m_tableIdNameMap.find(std::to_string(table_id)); + if (id_it == tables_info->m_tableIdNameMap.end()) + { + continue; + } + + auto table_it = tables_info->m_tableInfoMap.find(id_it->second); + if (table_it == tables_info->m_tableInfoMap.end()) + { + continue; + } + + table_it->second.precedence = (int)i; + tables_info->m_tablePrecedenceMap[(int)i] = table_it->second.name; + } + + return; +} + +void TablesDefnManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entries.push_back(entry); +} + +void TablesDefnManager::drain() +{ + SWSS_LOG_ENTER(); + + for (const auto &key_op_fvs_tuple : m_entries) + { + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + ReturnCode status; + auto app_db_entry_or = deserializeTablesInfoEntry(key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + auto &app_db_entry = *app_db_entry_or; + + status = validateTablesInfoAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for tables definition APP DB entry with key %s: " + "%s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + + const std::string context_key = KeyGenerator::generateTablesInfoKey(app_db_entry.context); + + const std::string &operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) + { + auto *tablesinfo = getTablesInfoEntry(context_key); + if (tablesinfo == nullptr) + { + // Create TablesInfo + status = processAddRequest(app_db_entry, context_key); + } + else + { + // Modify existing TablesInfo + status = processUpdateRequest(app_db_entry, context_key); + } + } + else if (operation == DEL_COMMAND) + { + // Delete TablesInfo + status = processDeleteRequest(context_key); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) + { + SWSS_LOG_ERROR("Processing failed for tables definition APP DB entry with key %s: " + "%s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + } + else + { + buildTablePrecedence(gP4Orch->tablesinfo); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + } + m_entries.clear(); +} + +std::string TablesDefnManager::verifyState(const std::string &key, const std::vector &tuple) +{ + std::string result = ""; + SWSS_LOG_ENTER(); + + return result; +} diff --git a/orchagent/p4orch/tables_definition_manager.h b/orchagent/p4orch/tables_definition_manager.h new file mode 100644 index 0000000000..85ca363bf5 --- /dev/null +++ b/orchagent/p4orch/tables_definition_manager.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "macaddress.h" +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "response_publisher_interface.h" +#include "return_code.h" +extern "C" +{ +#include "sai.h" +} + +/** + * A set of tables definition + */ +struct TablesInfo +{ + std::string context; + nlohmann::json info; + std::unordered_map m_tableIdNameMap; + std::unordered_map m_tableInfoMap; + std::map m_tablePrecedenceMap; + + TablesInfo() {}; + TablesInfo(const std::string &context_key, const nlohmann::json &info_value) + : context(context_key), info(info_value) + { + } +}; + +/** + * Datastructure is designed to hold multiple set of table definition. + * However, current support handles only one set of table definition. + */ +typedef std::unordered_map TablesInfoMap; + +class TablesDefnManager : public ObjectManagerInterface +{ + public: + TablesDefnManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + virtual ~TablesDefnManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; + + private: + ReturnCodeOr deserializeTablesInfoEntry(const std::string &key, + const std::vector &attributes); + TablesInfo *getTablesInfoEntry(const std::string &context_key); + ReturnCode createTablesInfo(const std::string &context_key, TablesInfo &tablesinfo_entry); + ReturnCode removeTablesInfo(const std::string &context_key); + ReturnCode processAddRequest(const TablesInfoAppDbEntry &app_db_entry, const std::string &context_key); + ReturnCode processUpdateRequest(const TablesInfoAppDbEntry &app_db_entry, const std::string &context_key); + ReturnCode processDeleteRequest(const std::string &context_key); + + TablesInfoMap m_tablesinfoMap; + P4OidMapper *m_p4OidMapper; + ResponsePublisherInterface *m_publisher; + std::deque m_entries; +}; diff --git a/orchagent/p4orch/tests/Makefile.am b/orchagent/p4orch/tests/Makefile.am index 2af4e8e613..d541bbe637 100644 --- a/orchagent/p4orch/tests/Makefile.am +++ b/orchagent/p4orch/tests/Makefile.am @@ -26,13 +26,19 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/vrforch.cpp \ $(ORCHAGENT_DIR)/vxlanorch.cpp \ $(ORCHAGENT_DIR)/copporch.cpp \ + $(ORCHAGENT_DIR)/switch/switch_capabilities.cpp \ + $(ORCHAGENT_DIR)/switch/switch_helper.cpp \ $(ORCHAGENT_DIR)/switchorch.cpp \ $(ORCHAGENT_DIR)/request_parser.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(ORCHAGENT_DIR)/flex_counter/flex_counter_manager.cpp \ $(ORCHAGENT_DIR)/flex_counter/flow_counter_handler.cpp \ + $(ORCHAGENT_DIR)/port/port_capabilities.cpp \ + $(ORCHAGENT_DIR)/port/porthlpr.cpp \ $(P4ORCH_DIR)/p4oidmapper.cpp \ $(P4ORCH_DIR)/p4orch.cpp \ $(P4ORCH_DIR)/p4orch_util.cpp \ + $(P4ORCH_DIR)/tables_definition_manager.cpp \ $(P4ORCH_DIR)/router_interface_manager.cpp \ $(P4ORCH_DIR)/gre_tunnel_manager.cpp \ $(P4ORCH_DIR)/neighbor_manager.cpp \ @@ -44,6 +50,7 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(P4ORCH_DIR)/wcmp_manager.cpp \ $(P4ORCH_DIR)/mirror_session_manager.cpp \ $(P4ORCH_DIR)/l3_admit_manager.cpp \ + $(P4ORCH_DIR)/ext_tables_manager.cpp \ $(top_srcdir)/tests/mock_tests/fake_response_publisher.cpp \ fake_portorch.cpp \ fake_crmorch.cpp \ diff --git a/orchagent/p4orch/tests/acl_manager_test.cpp b/orchagent/p4orch/tests/acl_manager_test.cpp index b18fdc4fcb..107dfdfde5 100644 --- a/orchagent/p4orch/tests/acl_manager_test.cpp +++ b/orchagent/p4orch/tests/acl_manager_test.cpp @@ -3,13 +3,13 @@ #include #include +#include #include #include "acl_rule_manager.h" #include "acl_table_manager.h" #include "acl_util.h" #include "acltable.h" -#include "json.hpp" #include "mock_sai_acl.h" #include "mock_sai_hostif.h" #include "mock_sai_policer.h" @@ -983,7 +983,7 @@ class AclManagerTest : public ::testing::Test } void EnqueueTableTuple(const swss::KeyOpFieldsValuesTuple &entry) { - acl_table_manager_->enqueue(entry); + acl_table_manager_->enqueue(APP_P4RT_ACL_TABLE_DEFINITION_NAME, entry); } std::string VerifyTableState(const std::string &key, const std::vector &tuple) { @@ -994,9 +994,9 @@ class AclManagerTest : public ::testing::Test { acl_rule_manager_->drain(); } - void EnqueueRuleTuple(const swss::KeyOpFieldsValuesTuple &entry) + void EnqueueRuleTuple(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { - acl_rule_manager_->enqueue(entry); + acl_rule_manager_->enqueue(table_name, entry); } std::string VerifyRuleState(const std::string &key, const std::vector &tuple) { @@ -2320,10 +2320,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestSucceeds) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); // Update request on exact rule without change will not need SAI call - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); // Drain rule tuples to process SET request EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) @@ -2348,7 +2350,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain ACL rule tuple to process SET request EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) @@ -2385,7 +2388,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) // Drain ACL rule tuple to process DEL request attributes.clear(); - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, DEL_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, DEL_COMMAND, attributes})); EXPECT_CALL(mock_sai_acl_, remove_acl_entry(Eq(kAclIngressRuleOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); @@ -2401,7 +2405,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; auto rule_tuple_key = std::string("INVALID_TABLE_NAME") + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string("INVALID_TABLE_NAME"), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request with invalid ACL table name: // "INVALID_TABLE_NAME" DrainRuleTuples(); @@ -2417,7 +2422,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53"; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request without priority field in rule // JSON key DrainRuleTuples(); @@ -2479,7 +2485,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesWithInvalidCommand) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, "INVALID_COMMAND", attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, "INVALID_COMMAND", attributes})); DrainRuleTuples(); const auto &acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:priority=15"; @@ -4761,7 +4768,8 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) "\"match/in_ports\": \"Ethernet1,Ethernet2\", \"match/out_ports\": " "\"Ethernet4,Ethernet5\", \"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) @@ -5189,7 +5197,8 @@ TEST_F(AclManagerTest, AclRuleVerifyStateAsicDbTest) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) diff --git a/orchagent/p4orch/tests/fake_crmorch.cpp b/orchagent/p4orch/tests/fake_crmorch.cpp index 03f15c28ac..63c19b9fdf 100644 --- a/orchagent/p4orch/tests/fake_crmorch.cpp +++ b/orchagent/p4orch/tests/fake_crmorch.cpp @@ -32,6 +32,14 @@ void CrmOrch::decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_ { } +void CrmOrch::incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ +} + +void CrmOrch::decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ +} + void CrmOrch::doTask(Consumer &consumer) { } diff --git a/orchagent/p4orch/tests/fake_flexcounterorch.cpp b/orchagent/p4orch/tests/fake_flexcounterorch.cpp index e44fc555f6..91d6be3d14 100644 --- a/orchagent/p4orch/tests/fake_flexcounterorch.cpp +++ b/orchagent/p4orch/tests/fake_flexcounterorch.cpp @@ -1,11 +1,10 @@ #include "copporch.h" #include "flexcounterorch.h" -FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) : - Orch(db, tableNames), - m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), - m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), - m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME) +FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) + : Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), + m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME) { } diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index 5224ec55af..a34a30eb4b 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -185,7 +185,7 @@ void PortsOrch::generateQueueMap(std::map queues { } -void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) +void PortsOrch::generateQueueMapPerPort(const Port &port, FlexCounterQueueStates &queuesState, bool voq) { } @@ -201,15 +201,15 @@ void PortsOrch::generatePriorityGroupMap(std::map p { } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +void PortsOrch::generatePriorityGroupMapPerPort(const Port &port, FlexCounterPgStates &pgsState) { } -void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::createPortBufferPgCounters(const Port &port, string pgs) { } -void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::removePortBufferPgCounters(const Port &port, string pgs) { } @@ -322,7 +322,7 @@ bool PortsOrch::setVoqInbandIntf(string &alias, string &type) return true; } -bool PortsOrch::getRecircPort(Port &p, string role) +bool PortsOrch::getRecircPort(Port &p, Port::Role role) { return true; } @@ -443,7 +443,7 @@ bool PortsOrch::setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip) return true; } -bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) +bool PortsOrch::setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode) { return true; } @@ -473,7 +473,7 @@ bool PortsOrch::setLagTpid(sai_object_id_t id, sai_uint16_t tpid) return true; } -bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) +bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) { return true; } @@ -493,17 +493,12 @@ bool PortsOrch::setDistributionOnLagMember(Port &lagMember, bool enableDistribut return true; } -bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string fec) -{ - return true; -} - sai_status_t PortsOrch::removePort(sai_object_id_t port_id) { return SAI_STATUS_SUCCESS; } -bool PortsOrch::initPort(const string &alias, const string &role, const int index, const set &lane_set) +bool PortsOrch::initPort(const PortConfig &port) { return true; } @@ -527,7 +522,7 @@ bool PortsOrch::setPortMtu(const Port &port, sai_uint32_t mtu) return true; } -bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) +bool PortsOrch::setPortTpid(Port &port, sai_uint16_t tpid) { return true; } @@ -542,12 +537,17 @@ bool PortsOrch::getPortPvid(Port &port, sai_uint32_t &pvid) return true; } -bool PortsOrch::setPortFec(Port &port, std::string &mode) +bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t fec_mode, bool override_fec) +{ + return true; +} + +bool PortsOrch::isFecModeSupported(const Port &port, sai_port_fec_mode_t fec_mode) { return true; } -bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) +bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym) { return true; } @@ -586,17 +586,18 @@ bool PortsOrch::getPortSpeed(sai_object_id_t port_id, sai_uint32_t &speed) return true; } -bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value, bool override_fec) { return true; } -bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value, + bool override_fec) { return true; } -task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector &speed_list) +task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &speed_list) { return task_success; } @@ -606,22 +607,23 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) +bool PortsOrch::isAutoNegEnabled(sai_object_id_t id) { - return task_success; + return true; } -bool PortsOrch::setPortFecMode(sai_object_id_t id, int fec) +task_process_status PortsOrch::setPortAutoNeg(Port &port, bool autoneg) { - return true; + return task_success; } -task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type) +task_process_status PortsOrch::setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type) { return task_success; } -task_process_status PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, + std::set &interface_types) { return task_success; } @@ -639,25 +641,10 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) { } -void PortsOrch::getPortSerdesVal(const std::string &s, std::vector &lane_values) +void PortsOrch::getPortSerdesVal(const std::string &s, std::vector &lane_values, int base) { } -bool PortsOrch::getPortAdvSpeedsVal(const std::string &s, std::vector &speed_values) -{ - return true; -} - -bool PortsOrch::getPortInterfaceTypeVal(const std::string &s, sai_port_interface_type_t &interface_type) -{ - return true; -} - -bool PortsOrch::getPortAdvInterfaceTypesVal(const std::string &s, std::vector &type_values) -{ - return true; -} - void PortsOrch::removePortSerdesAttribute(sai_object_id_t port_id) { } @@ -694,7 +681,7 @@ void PortsOrch::voqSyncDelLag(Port &lag) { } -void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) +void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port, string status) { } diff --git a/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp index ebd0b54ce4..da3ae3578b 100644 --- a/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp +++ b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp @@ -4,11 +4,11 @@ #include #include +#include #include #include #include "ipaddress.h" -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_router_interface.h" #include "mock_sai_serialize.h" @@ -52,7 +52,7 @@ const P4GreTunnelAppDbEntry kP4GreTunnelAppDbEntry1{/*tunnel_id=*/"tunnel-1", /*router_interface_id=*/"intf-eth-1/2/3", /*encap_src_ip=*/swss::IpAddress("2607:f8b0:8096:3110::1"), /*encap_dst_ip=*/swss::IpAddress("2607:f8b0:8096:311a::2"), - /*action_str=*/"mark_for_tunnel_encap"}; + /*action_str=*/"mark_for_p2p_tunnel_encap"}; std::unordered_map CreateAttributeListForGreTunnelObject( const P4GreTunnelAppDbEntry &app_entry, const sai_object_id_t &rif_oid) @@ -215,7 +215,7 @@ class GreTunnelManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - gre_tunnel_manager_.enqueue(entry); + gre_tunnel_manager_.enqueue(APP_P4RT_TUNNEL_TABLE_NAME, entry); } void Drain() @@ -304,6 +304,7 @@ bool GreTunnelManagerTest::ValidateGreTunnelEntryAdd(const P4GreTunnelAppDbEntry const auto *p4_gre_tunnel_entry = GetGreTunnelEntry(KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id)); if (p4_gre_tunnel_entry == nullptr || p4_gre_tunnel_entry->encap_src_ip != app_db_entry.encap_src_ip || p4_gre_tunnel_entry->encap_dst_ip != app_db_entry.encap_dst_ip || + p4_gre_tunnel_entry->neighbor_id != app_db_entry.encap_dst_ip || p4_gre_tunnel_entry->router_interface_id != app_db_entry.router_interface_id || p4_gre_tunnel_entry->tunnel_id != app_db_entry.tunnel_id) { @@ -334,7 +335,7 @@ TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenDependingPortIsNotPr /*router_interface_id=*/"intf-eth-1/2/3", /*encap_src_ip=*/swss::IpAddress("2607:f8b0:8096:3110::1"), /*encap_dst_ip=*/swss::IpAddress("2607:f8b0:8096:311a::2"), - /*action_str=*/"mark_for_tunnel_encap"}; + /*action_str=*/"mark_for_p2p_tunnel_encap"}; const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kAppDbEntry.tunnel_id); EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(kAppDbEntry)); @@ -817,6 +818,12 @@ TEST_F(GreTunnelManagerTest, VerifyStateTest) EXPECT_FALSE(VerifyState(db_key, attributes).empty()); p4_tunnel_entry->encap_dst_ip = saved_DST_IP; + // Verification should fail if IP mask mismatches. + auto saved_NEIGHBOR_ID = p4_tunnel_entry->neighbor_id; + p4_tunnel_entry->neighbor_id = swss::IpAddress("2.2.2.2"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->neighbor_id = saved_NEIGHBOR_ID; + // Verification should fail if tunnel_id mismatches. auto saved_tunnel_id = p4_tunnel_entry->tunnel_id; p4_tunnel_entry->tunnel_id = "invalid"; @@ -899,4 +906,4 @@ TEST_F(GreTunnelManagerTest, VerifyStateAsicDbTest) p4_tunnel_entry->encap_src_ip = swss::IpAddress("1.2.3.4"); EXPECT_FALSE(VerifyState(db_key, attributes).empty()); p4_tunnel_entry->encap_src_ip = swss::IpAddress("2607:f8b0:8096:3110::1"); -} \ No newline at end of file +} diff --git a/orchagent/p4orch/tests/l3_admit_manager_test.cpp b/orchagent/p4orch/tests/l3_admit_manager_test.cpp index 6d0d67dd0e..0fa5cb7ac3 100644 --- a/orchagent/p4orch/tests/l3_admit_manager_test.cpp +++ b/orchagent/p4orch/tests/l3_admit_manager_test.cpp @@ -4,10 +4,10 @@ #include #include +#include #include #include -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_my_mac.h" #include "p4oidmapper.h" @@ -178,7 +178,7 @@ class L3AdmitManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - l3_admit_manager_.enqueue(entry); + l3_admit_manager_.enqueue(APP_P4RT_L3_ADMIT_TABLE_NAME, entry); } void Drain() @@ -650,4 +650,4 @@ TEST_F(L3AdmitManagerTest, VerifyStateAsicDbTest) swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS", kP4L3AdmitAppDbEntry1.mac_address_data.to_string()}, swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK", "FF:FF:FF:FF:00:00"}, swss::FieldValueTuple{"SAI_MY_MAC_ATTR_PRIORITY", "2030"}}); -} \ No newline at end of file +} diff --git a/orchagent/p4orch/tests/mirror_session_manager_test.cpp b/orchagent/p4orch/tests/mirror_session_manager_test.cpp index bc5563a078..1361fc96b3 100644 --- a/orchagent/p4orch/tests/mirror_session_manager_test.cpp +++ b/orchagent/p4orch/tests/mirror_session_manager_test.cpp @@ -3,10 +3,10 @@ #include #include +#include #include #include -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_mirror.h" #include "p4oidmapper.h" @@ -217,7 +217,7 @@ class MirrorSessionManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - return mirror_session_manager_.enqueue(entry); + return mirror_session_manager_.enqueue(APP_P4RT_MIRROR_SESSION_TABLE_NAME, entry); } void Drain() diff --git a/orchagent/p4orch/tests/mock_sai_next_hop_group.h b/orchagent/p4orch/tests/mock_sai_next_hop_group.h index 5398ec5a70..c1ffedc175 100644 --- a/orchagent/p4orch/tests/mock_sai_next_hop_group.h +++ b/orchagent/p4orch/tests/mock_sai_next_hop_group.h @@ -6,6 +6,7 @@ extern "C" { #include "sai.h" +#include "sainexthopgroup.h" } // Mock class including mock functions mapping to SAI next hop group's @@ -27,6 +28,16 @@ class MockSaiNextHopGroup MOCK_METHOD2(set_next_hop_group_member_attribute, sai_status_t(_In_ sai_object_id_t next_hop_group_member_id, _In_ const sai_attribute_t *attr)); + + MOCK_METHOD7(create_next_hop_group_members, + sai_status_t(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, + _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_object_id_t *object_id, + _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_next_hop_group_members, + sai_status_t(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); }; // Note that before mock functions below are used, mock_sai_next_hop_group must @@ -62,3 +73,18 @@ sai_status_t set_next_hop_group_member_attribute(_In_ sai_object_id_t next_hop_g { return mock_sai_next_hop_group->set_next_hop_group_member_attribute(next_hop_group_member_id, attr); } + +sai_status_t create_next_hop_group_members(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, + _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_object_id_t *object_id, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop_group->create_next_hop_group_members(switch_id, object_count, attr_count, attr_list, mode, + object_id, object_statuses); +} + +sai_status_t remove_next_hop_group_members(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop_group->remove_next_hop_group_members(object_count, object_id, mode, object_statuses); +} diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index ae91f4f567..4db1db873e 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -3,10 +3,10 @@ #include #include +#include #include #include -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_neighbor.h" #include "p4orch.h" @@ -130,7 +130,7 @@ class NeighborManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - neighbor_manager_.enqueue(entry); + neighbor_manager_.enqueue(APP_P4RT_NEIGHBOR_TABLE_NAME, entry); } void Drain() diff --git a/orchagent/p4orch/tests/next_hop_manager_test.cpp b/orchagent/p4orch/tests/next_hop_manager_test.cpp index 64416bb3b3..7a2e714bbc 100644 --- a/orchagent/p4orch/tests/next_hop_manager_test.cpp +++ b/orchagent/p4orch/tests/next_hop_manager_test.cpp @@ -4,11 +4,11 @@ #include #include +#include #include #include #include "ipaddress.h" -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_hostif.h" #include "mock_sai_next_hop.h" @@ -86,29 +86,32 @@ const P4NextHopAppDbEntry kP4NextHopAppDbEntry3{/*next_hop_id=*/kNextHopId, const P4NextHopAppDbEntry kP4TunnelNextHopAppDbEntry1{/*next_hop_id=*/kTunnelNextHopId, /*router_interface_id=*/"", /*gre_tunnel_id=*/kTunnelId1, - /*neighbor_id=*/swss::IpAddress(kNeighborId1), - /*action_str=*/"set_tunnel_encap_nexthop"}; + /*neighbor_id=*/swss::IpAddress("0.0.0.0"), + /*action_str=*/"set_p2p_tunnel_encap_nexthop"}; const P4NextHopAppDbEntry kP4TunnelNextHopAppDbEntry2{/*next_hop_id=*/kTunnelNextHopId, /*router_interface_id=*/"", /*gre_tunnel_id=*/kTunnelId2, - /*neighbor_id=*/swss::IpAddress(kNeighborId2), - /*action_str=*/"set_tunnel_encap_nexthop"}; + /*neighbor_id=*/swss::IpAddress("0.0.0.0"), + /*action_str=*/"set_p2p_tunnel_encap_nexthop"}; const P4GreTunnelEntry kP4TunnelEntry1( /*tunnel_id=*/kTunnelId1, /*router_interface_id=*/kRouterInterfaceId1, - /*src_ip=*/swss::IpAddress("1.2.3.4"), - /*dst_ip=*/swss::IpAddress("5.6.7.8")); + /*encap_src_ip=*/swss::IpAddress("1.2.3.4"), + /*encap_dst_ip=*/swss::IpAddress(kNeighborId1), + /*neighbor_id=*/swss::IpAddress(kNeighborId1)); const P4GreTunnelEntry kP4TunnelEntry2( /*tunnel_id=*/kTunnelId2, /*router_interface_id=*/kRouterInterfaceId2, - /*src_ip=*/swss::IpAddress("1.2.3.4"), - /*dst_ip=*/swss::IpAddress("5.6.7.8")); + /*encap_src_ip=*/swss::IpAddress("1.2.3.4"), + /*encap_dst_ip=*/swss::IpAddress(kNeighborId2), + /*neighbor_id=*/swss::IpAddress(kNeighborId2)); std::unordered_map CreateAttributeListForNextHopObject( - const P4NextHopAppDbEntry &app_entry, const sai_object_id_t &oid) + const P4NextHopAppDbEntry &app_entry, const sai_object_id_t &oid, + const swss::IpAddress &neighbor_id = swss::IpAddress("0.0.0.0")) { std::unordered_map next_hop_attrs; sai_attribute_t next_hop_attr; @@ -133,7 +136,14 @@ std::unordered_map CreateAttributeListForN } next_hop_attr.id = SAI_NEXT_HOP_ATTR_IP; - swss::copy(next_hop_attr.value.ipaddr, app_entry.neighbor_id); + if (!neighbor_id.isZero()) + { + swss::copy(next_hop_attr.value.ipaddr, neighbor_id); + } + else + { + swss::copy(next_hop_attr.value.ipaddr, app_entry.neighbor_id); + } next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); return next_hop_attrs; @@ -257,7 +267,7 @@ class NextHopManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - next_hop_manager_.enqueue(entry); + next_hop_manager_.enqueue(APP_P4RT_NEXTHOP_TABLE_NAME, entry); } void Drain() @@ -307,6 +317,12 @@ class NextHopManagerTest : public ::testing::Test // Returns a valid pointer to next hop entry on success. P4NextHopEntry *AddNextHopEntry1(); + // Adds the next hop entry -- kP4TunnelNextHopAppDbEntry1, via next hop + // manager's ProcessAddRequest (). This function also takes care of all the + // dependencies of the next hop entry. Returns a valid pointer to next hop + // entry on success. + P4NextHopEntry *AddTunnelNextHopEntry1(); + // Validates that a P4 App next hop entry is correctly added in next hop // manager and centralized mapper. Returns true on success. bool ValidateNextHopEntryAdd(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &expected_next_hop_oid); @@ -333,7 +349,8 @@ class NextHopManagerTest : public ::testing::Test bool NextHopManagerTest::ResolveNextHopEntryDependency(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &oid) { - std::string rif_id; + std::string rif_id = app_db_entry.router_interface_id; + auto neighbor_id = app_db_entry.neighbor_id; if (app_db_entry.action_str == p4orch::kSetTunnelNexthop) { const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.gre_tunnel_id); @@ -343,21 +360,27 @@ bool NextHopManagerTest::ResolveNextHopEntryDependency(const P4NextHopAppDbEntry } gP4Orch->getGreTunnelManager()->m_greTunnelTable.emplace( tunnel_key, app_db_entry.gre_tunnel_id == kTunnelId1 ? kP4TunnelEntry1 : kP4TunnelEntry2); - auto rif_id_or = gP4Orch->getGreTunnelManager()->getUnderlayIfFromGreTunnelEntry(tunnel_key); - EXPECT_TRUE(rif_id_or.ok()); - rif_id = *rif_id_or; + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry(tunnel_key); + EXPECT_TRUE(gre_tunnel_or.ok()); + rif_id = (*gre_tunnel_or).router_interface_id; + auto rif_oid = rif_id == kRouterInterfaceId1 ? kRouterInterfaceOid1 : kRouterInterfaceOid2; + neighbor_id = (*gre_tunnel_or).neighbor_id; + const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(rif_id); + if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, rif_oid)) + { + return false; + } } else { - rif_id = app_db_entry.router_interface_id; - const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_interface_id); + const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(rif_id); if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, oid)) { return false; } } - const std::string neighbor_key = KeyGenerator::generateNeighborKey(rif_id, app_db_entry.neighbor_id); + const std::string neighbor_key = KeyGenerator::generateNeighborKey(rif_id, neighbor_id); if (!p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key)) { return false; @@ -385,12 +408,32 @@ P4NextHopEntry *NextHopManagerTest::AddNextHopEntry1() return GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4NextHopAppDbEntry1.next_hop_id)); } +P4NextHopEntry *NextHopManagerTest::AddTunnelNextHopEntry1() +{ + if (!ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)) + { + return nullptr; + } + + // Set up mock call. + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + return GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)); +} + bool NextHopManagerTest::ValidateNextHopEntryAdd(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &expected_next_hop_oid) { const auto *p4_next_hop_entry = GetNextHopEntry(KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id)); if (p4_next_hop_entry == nullptr || p4_next_hop_entry->next_hop_id != app_db_entry.next_hop_id || - p4_next_hop_entry->neighbor_id != app_db_entry.neighbor_id || p4_next_hop_entry->next_hop_oid != expected_next_hop_oid) { return false; @@ -403,7 +446,8 @@ bool NextHopManagerTest::ValidateNextHopEntryAdd(const P4NextHopAppDbEntry &app_ } if (app_db_entry.action_str == p4orch::kSetIpNexthop && - p4_next_hop_entry->router_interface_id != app_db_entry.router_interface_id) + (p4_next_hop_entry->router_interface_id != app_db_entry.router_interface_id || + p4_next_hop_entry->neighbor_id != app_db_entry.neighbor_id)) { return false; } @@ -467,8 +511,8 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenDependingRifIsAbsentIn TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenDependingTunnelIsAbsentInCentralMapper) { - const std::string neighbor_key = KeyGenerator::generateNeighborKey(kP4TunnelNextHopAppDbEntry1.router_interface_id, - kP4TunnelNextHopAppDbEntry1.neighbor_id); + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(kP4TunnelNextHopAppDbEntry1.router_interface_id, kP4TunnelEntry1.neighbor_id); ASSERT_TRUE(p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key)); EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); @@ -526,11 +570,12 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldSuccessForTunnelNexthop) ASSERT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)); // Set up mock call. - EXPECT_CALL(mock_sai_next_hop_, - create_next_hop( - ::testing::NotNull(), Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, - CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1))))) + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); @@ -545,7 +590,7 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldSuccessForTunnelNexthop) EXPECT_TRUE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry1, kTunnelNextHopOid)); const std::string tunnel_key = KeyGenerator::generateTunnelKey(kP4TunnelNextHopAppDbEntry1.gre_tunnel_id); const std::string neighbor_key = - KeyGenerator::generateNeighborKey(kP4TunnelEntry1.router_interface_id, kP4TunnelNextHopAppDbEntry1.neighbor_id); + KeyGenerator::generateNeighborKey(kP4TunnelEntry1.router_interface_id, kP4TunnelEntry1.neighbor_id); EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, 1)); EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 1)); } @@ -814,7 +859,7 @@ TEST_F(NextHopManagerTest, DrainValidTunnelNexthopAppEntryShouldSucceed) // Validate ref count decrement. const std::string tunnel_key = KeyGenerator::generateTunnelKey(kP4TunnelNextHopAppDbEntry2.gre_tunnel_id); const std::string neighbor_key = - KeyGenerator::generateNeighborKey(kP4TunnelEntry2.router_interface_id, kP4TunnelNextHopAppDbEntry2.neighbor_id); + KeyGenerator::generateNeighborKey(kP4TunnelEntry2.router_interface_id, kP4TunnelEntry2.neighbor_id); EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, 0)); EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 0)); } @@ -896,7 +941,7 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); - // set_tunnel_encap_nexthop + invalid router_interface_id + // set_p2p_tunnel_encap_nexthop + invalid router_interface_id fvs = {{p4orch::kAction, p4orch::kSetTunnelNexthop}, {prependParamField(p4orch::kNeighborId), kNeighborId2}, {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}}; @@ -907,7 +952,7 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kNextHopOid)); - // set_tunnel_encap_nexthop + missing tunnel_id + // set_p2p_tunnel_encap_nexthop + missing tunnel_id fvs = {{p4orch::kAction, p4orch::kSetTunnelNexthop}, {prependParamField(p4orch::kNeighborId), kNeighborId2}}; app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; @@ -976,7 +1021,7 @@ TEST_F(NextHopManagerTest, DrainDeleteRequestShouldSucceedForExistingNextHop) EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 0)); } -TEST_F(NextHopManagerTest, VerifyStateTest) +TEST_F(NextHopManagerTest, VerifyIpNextHopStateTest) { auto *p4_next_hop_entry = AddNextHopEntry1(); ASSERT_NE(p4_next_hop_entry, nullptr); @@ -1045,6 +1090,74 @@ TEST_F(NextHopManagerTest, VerifyStateTest) p4_next_hop_entry->gre_tunnel_id = saved_gre_tunnel_id; } +TEST_F(NextHopManagerTest, VerifyTunnelNextHopStateTest) +{ + ASSERT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)); + + // Set up mock call. + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + auto p4_next_hop_entry = GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)); + ASSERT_NE(p4_next_hop_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEXT_HOP:oid:0x66", + std::vector{ + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TYPE", "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_IP", "10.0.0.1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TUNNEL_ID", "oid:0xb"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = kTunnelNextHopId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEXTHOP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNeighborId), kNeighborId1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if nexthop key mismatches. + auto saved_next_hop_key = p4_next_hop_entry->next_hop_key; + p4_next_hop_entry->next_hop_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_key = saved_next_hop_key; + + // Verification should fail if nexthop ID mismatches. + auto saved_next_hop_id = p4_next_hop_entry->next_hop_id; + p4_next_hop_entry->next_hop_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_id = saved_next_hop_id; + + // Verification should fail if ritf ID mismatches. + auto saved_router_interface_id = p4_next_hop_entry->router_interface_id; + p4_next_hop_entry->router_interface_id = kRouterInterfaceId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->router_interface_id = saved_router_interface_id; + + // Verification should fail if neighbor ID mismatches. + auto saved_neighbor_id = p4_next_hop_entry->neighbor_id; + p4_next_hop_entry->neighbor_id = swss::IpAddress(kNeighborId2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->neighbor_id = saved_neighbor_id; + + // Verification should fail if tunnel ID mismatches. + auto saved_gre_tunnel_id = p4_next_hop_entry->gre_tunnel_id; + p4_next_hop_entry->gre_tunnel_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->gre_tunnel_id = saved_gre_tunnel_id; +} + TEST_F(NextHopManagerTest, VerifyStateAsicDbTest) { auto *p4_next_hop_entry = AddNextHopEntry1(); diff --git a/orchagent/p4orch/tests/route_manager_test.cpp b/orchagent/p4orch/tests/route_manager_test.cpp index 06095d5ebe..6229f69c36 100644 --- a/orchagent/p4orch/tests/route_manager_test.cpp +++ b/orchagent/p4orch/tests/route_manager_test.cpp @@ -5,11 +5,11 @@ #include #include +#include #include #include #include "ipprefix.h" -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_route.h" #include "p4orch.h" @@ -242,9 +242,9 @@ class RouteManagerTest : public ::testing::Test return route_manager_.deleteRouteEntries(route_entries); } - void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) + void Enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { - route_manager_.enqueue(entry); + route_manager_.enqueue(table_name, entry); } void Drain() @@ -2469,7 +2469,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); @@ -2482,7 +2482,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId2); - Enqueue(key_op_fvs_2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), @@ -2505,7 +2505,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, p4orch::kSetMetadataAndDrop, "", kMetadata1); - Enqueue(key_op_fvs_3); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_3); EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) .WillRepeatedly(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), @@ -2530,7 +2530,7 @@ TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); @@ -2541,7 +2541,7 @@ TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) Drain(); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, DEL_COMMAND, "", ""); - Enqueue(key_op_fvs_2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); EXPECT_CALL(mock_sai_route_, remove_route_entries(_, _, _, _)) .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), @@ -2566,10 +2566,10 @@ TEST_F(RouteManagerTest, UpdateFailsWhenCreateAndUpdateTheSameRouteInDrain) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId2); - Enqueue(key_op_fvs_2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) @@ -2603,9 +2603,9 @@ TEST_F(RouteManagerTest, DeleteFailsWhenCreateAndDeleteTheSameRouteInDrain) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), DEL_COMMAND, "", ""); - Enqueue(key_op_fvs_2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) @@ -2639,7 +2639,7 @@ TEST_F(RouteManagerTest, RouteCreateInDrainSucceedsWhenVrfIsEmpty) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(kDefaultVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); sai_route_entry_t exp_sai_route_entry; exp_sai_route_entry.switch_id = gSwitchId; @@ -2678,7 +2678,7 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryInDrainFails) const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; auto key_op_fvs = swss::KeyOpFieldsValuesTuple(kKeyPrefix + "{{{{{{{{{{{{", SET_COMMAND, std::vector{}); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) @@ -2691,7 +2691,7 @@ TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenVrfDoesNotExist) p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); auto key_op_fvs = GenerateKeyOpFieldsValuesTuple("Invalid-Vrf", swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) @@ -2703,7 +2703,7 @@ TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenNexthopDoesNotExist) { auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) @@ -2717,7 +2717,7 @@ TEST_F(RouteManagerTest, InvalidateSetRouteEntryInDrainFails) // No nexthop ID with kSetNexthopId action. auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, ""); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) @@ -2730,7 +2730,7 @@ TEST_F(RouteManagerTest, InvalidateDelRouteEntryInDrainFails) // Route does not exist. auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), DEL_COMMAND, p4orch::kSetNexthopId, kNexthopId1); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) @@ -2749,7 +2749,7 @@ TEST_F(RouteManagerTest, InvalidCommandInDrainFails) attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); auto key_op_fvs = swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), "INVALID_COMMAND", attributes); - Enqueue(key_op_fvs); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) diff --git a/orchagent/p4orch/tests/router_interface_manager_test.cpp b/orchagent/p4orch/tests/router_interface_manager_test.cpp index 9c81310e5f..d1c7330cc7 100644 --- a/orchagent/p4orch/tests/router_interface_manager_test.cpp +++ b/orchagent/p4orch/tests/router_interface_manager_test.cpp @@ -164,7 +164,7 @@ class RouterInterfaceManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - router_intf_manager_.enqueue(entry); + router_intf_manager_.enqueue(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME, entry); } void Drain() @@ -965,4 +965,4 @@ TEST_F(RouterInterfaceManagerTest, VerifyStateAsicDbTest) router_intf_entry_ptr->port_name = "Ethernet8"; EXPECT_FALSE(VerifyState(db_key, attributes).empty()); router_intf_entry_ptr->port_name = "Ethernet7"; -} \ No newline at end of file +} diff --git a/orchagent/p4orch/tests/test_main.cpp b/orchagent/p4orch/tests/test_main.cpp index 203344e434..0170588a42 100644 --- a/orchagent/p4orch/tests/test_main.cpp +++ b/orchagent/p4orch/tests/test_main.cpp @@ -37,15 +37,9 @@ sai_object_id_t kMirrorSessionOid2 = 9002; sai_object_id_t gUnderlayIfId; #define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; #define DEFAULT_MAX_BULK_SIZE 1000 +extern int gBatchSize; size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; bool gSyncMode = false; bool gIsNatSupported = false; @@ -56,8 +50,6 @@ VRFOrch *gVrfOrch; FlowCounterRouteOrch *gFlowCounterRouteOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; -ofstream gRecordOfs; -string gRecordFile; swss::DBConnector *gAppDb; swss::DBConnector *gStateDb; swss::DBConnector *gConfigDb; @@ -73,12 +65,39 @@ sai_acl_api_t *sai_acl_api; sai_policer_api_t *sai_policer_api; sai_virtual_router_api_t *sai_virtual_router_api; sai_hostif_api_t *sai_hostif_api; +sai_hash_api_t *sai_hash_api; sai_switch_api_t *sai_switch_api; sai_mirror_api_t *sai_mirror_api; sai_udf_api_t *sai_udf_api; sai_tunnel_api_t *sai_tunnel_api; sai_my_mac_api_t *sai_my_mac_api; sai_counter_api_t *sai_counter_api; +sai_generic_programmable_api_t *sai_generic_programmable_api; + +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + return true; +} namespace { @@ -154,6 +173,7 @@ void AddVrf() int main(int argc, char *argv[]) { + gBatchSize = DEFAULT_BATCH_SIZE; testing::InitGoogleTest(&argc, argv); sai_router_interface_api_t router_intfs_api; @@ -165,12 +185,14 @@ int main(int argc, char *argv[]) sai_policer_api_t policer_api; sai_virtual_router_api_t virtual_router_api; sai_hostif_api_t hostif_api; + sai_hash_api_t hash_api; sai_switch_api_t switch_api; sai_mirror_api_t mirror_api; sai_udf_api_t udf_api; sai_my_mac_api_t my_mac_api; sai_tunnel_api_t tunnel_api; sai_counter_api_t counter_api; + sai_generic_programmable_api_t generic_programmable_api; sai_router_intfs_api = &router_intfs_api; sai_neighbor_api = &neighbor_api; sai_next_hop_api = &next_hop_api; @@ -180,12 +202,14 @@ int main(int argc, char *argv[]) sai_policer_api = &policer_api; sai_virtual_router_api = &virtual_router_api; sai_hostif_api = &hostif_api; + sai_hash_api = &hash_api; sai_switch_api = &switch_api; sai_mirror_api = &mirror_api; sai_udf_api = &udf_api; sai_my_mac_api = &my_mac_api; sai_tunnel_api = &tunnel_api; sai_counter_api = &counter_api; + sai_generic_programmable_api = &generic_programmable_api; swss::DBConnector appl_db("APPL_DB", 0); swss::DBConnector state_db("STATE_DB", 0); diff --git a/orchagent/p4orch/tests/wcmp_manager_test.cpp b/orchagent/p4orch/tests/wcmp_manager_test.cpp index 1c4981b665..088264bba4 100644 --- a/orchagent/p4orch/tests/wcmp_manager_test.cpp +++ b/orchagent/p4orch/tests/wcmp_manager_test.cpp @@ -3,9 +3,9 @@ #include #include +#include #include -#include "json.hpp" #include "mock_response_publisher.h" #include "mock_sai_acl.h" #include "mock_sai_hostif.h" @@ -45,6 +45,7 @@ using ::testing::DoAll; using ::testing::Eq; using ::testing::Return; using ::testing::SetArgPointee; +using ::testing::SetArrayArgument; using ::testing::StrictMock; using ::testing::Truly; @@ -70,6 +71,60 @@ const std::string kNexthopKey1 = KeyGenerator::generateNextHopKey(kNexthopId1); const std::string kNexthopKey2 = KeyGenerator::generateNextHopKey(kNexthopId2); const std::string kNexthopKey3 = KeyGenerator::generateNextHopKey(kNexthopId3); +// Matches two SAI attributes. +bool MatchSaiAttribute(const sai_attribute_t &attr, const sai_attribute_t &exp_attr) +{ + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID || attr.value.oid != exp_attr.value.oid) + { + return false; + } + } + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID || attr.value.oid != exp_attr.value.oid) + { + return false; + } + } + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT || attr.value.u32 != exp_attr.value.u32) + { + return false; + } + } + return true; +} + +MATCHER_P(ArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + if (arg[i] != array[i]) + { + return false; + } + } + return true; +} + +MATCHER_P(AttrArrayArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + for (size_t j = 0; j < array[i].size(); j++) + { + if (!MatchSaiAttribute(arg[i][j], array[i][j])) + { + return false; + } + } + } + return true; +} + // Matches the next hop group type sai_attribute_t argument. bool MatchSaiNextHopGroupAttribute(const sai_attribute_t *attr) { @@ -118,6 +173,26 @@ bool MatchSaiNextHopGroupMemberAttribute(const sai_object_id_t expected_next_hop return true; } +std::vector GetSaiNextHopGroupMemberAttribute(sai_object_id_t next_hop_oid, uint32_t weight, + sai_object_id_t group_oid) +{ + std::vector attrs; + sai_attribute_t attr; + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + attr.value.oid = group_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + attr.value.oid = next_hop_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT; + attr.value.u32 = weight; + attrs.push_back(attr); + + return attrs; +} + void VerifyWcmpGroupMemberEntry(const std::string &expected_next_hop_id, const int expected_weight, std::shared_ptr wcmp_gm_entry) { @@ -172,6 +247,8 @@ class WcmpManagerTest : public ::testing::Test sai_next_hop_group_api->create_next_hop_group_member = create_next_hop_group_member; sai_next_hop_group_api->remove_next_hop_group_member = remove_next_hop_group_member; sai_next_hop_group_api->set_next_hop_group_member_attribute = set_next_hop_group_member_attribute; + sai_next_hop_group_api->create_next_hop_group_members = create_next_hop_group_members; + sai_next_hop_group_api->remove_next_hop_group_members = remove_next_hop_group_members; sai_hostif_api->create_hostif_table_entry = mock_create_hostif_table_entry; sai_hostif_api->create_hostif_trap = mock_create_hostif_trap; @@ -196,7 +273,7 @@ class WcmpManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - wcmp_group_manager_->enqueue(entry); + wcmp_group_manager_->enqueue(APP_P4RT_WCMP_GROUP_TABLE_NAME, entry); } void Drain() @@ -229,18 +306,6 @@ class WcmpManagerTest : public ::testing::Test wcmp_group_manager_->restorePrunedNextHops(port); } - bool VerifyWcmpGroupMemberInPrunedSet(std::shared_ptr gm, bool expected_member_present, - long unsigned int expected_set_size) - { - if (wcmp_group_manager_->pruned_wcmp_members_set.size() != expected_set_size) - return false; - - return expected_member_present ? (wcmp_group_manager_->pruned_wcmp_members_set.find(gm) != - wcmp_group_manager_->pruned_wcmp_members_set.end()) - : (wcmp_group_manager_->pruned_wcmp_members_set.find(gm) == - wcmp_group_manager_->pruned_wcmp_members_set.end()); - } - bool VerifyWcmpGroupMemberInPortMap(std::shared_ptr gm, bool expected_member_present, long unsigned int expected_set_size) { @@ -338,13 +403,18 @@ P4WcmpGroupEntry WcmpManagerTest::AddWcmpGroupEntryWithWatchport(const std::stri .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); // For members with non empty watchport field, member creation in SAI happens // for operationally up ports only.. + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_status{SAI_STATUS_SUCCESS}; if (oper_up) { - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL( + mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); } EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(&app_db_entry)); EXPECT_NE(nullptr, GetWcmpGroupEntry(kWcmpGroupId1)); @@ -361,16 +431,16 @@ P4WcmpGroupEntry WcmpManagerTest::AddWcmpGroupEntry1() create_next_hop_group(_, Eq(gSwitchId), Eq(1), Truly(std::bind(MatchSaiNextHopGroupAttribute, std::placeholders::_1)))) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(&app_db_entry)); EXPECT_NE(nullptr, GetWcmpGroupEntry(kWcmpGroupId1)); return app_db_entry; @@ -420,21 +490,26 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFails) // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -454,23 +529,28 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlu // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_FAILURE)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); // TODO: Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); } TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlusGroupRecoveryFails) @@ -481,23 +561,28 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlu // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); // TODO: Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); } TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupSaiCallFails) @@ -534,52 +619,72 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenNotExist) TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenSaiCallFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenMemberRemovalFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenMemberRemovalFailsPlusRecoveryFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_FAILURE))); + std::vector exp_remove_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); + std::vector return_oids{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } @@ -594,20 +699,36 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_5{kWcmpGroupMemberOid5}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -622,15 +743,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) wcmp_group.wcmp_group_members.clear(); gm2 = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_2{kWcmpGroupMemberOid2}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2.begin(), return_oids_2.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -645,18 +778,30 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) std::shared_ptr updated_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 20); wcmp_group.wcmp_group_members.push_back(updated_gm1); wcmp_group.wcmp_group_members.push_back(updated_gm2); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 20, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -668,10 +813,17 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) // Update WCMP without group members wcmp_group.wcmp_group_members.clear(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -696,25 +848,38 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); wcmp_group.wcmp_group_members.push_back(gm3); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid3), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_5_6{kWcmpGroupMemberOid5, kWcmpGroupMemberOid3}; + std::vector exp_create_status_1{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status_1.begin(), exp_create_status_1.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5_6.begin(), return_oids_5_6.end()), + SetArrayArgument<6>(exp_create_status_2.begin(), exp_create_status_2.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -732,16 +897,23 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) wcmp_group.wcmp_group_members.clear(); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm3); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); - // Clean up - revert deletions -success + exp_remove_status = {SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid3, kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); + // Clean up - revert deletions -success + std::vector return_oids_5{kWcmpGroupMemberOid5}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status_1.begin(), exp_create_status_1.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessUpdateRequest(&wcmp_group)); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; expected_wcmp_group.wcmp_group_members.push_back(gm1); @@ -760,19 +932,26 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) // Remove WCMP group member with nexthop_id=kNexthopId1 and // nexthop_id=kNexthopId3(fail) - fail to clean up - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); - // Clean up - revert deletions -failure + exp_remove_status = {SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid3, kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); + // Clean up - revert deletions -failure + std::vector return_oids{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_TABLE_FULL}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_TABLE_FULL))); // TODO: Expect critical state. - EXPECT_EQ("Failed to remove WCMP group member with nexthop id " - "'ju1u32m3.atl11:qe-3/7'", + EXPECT_EQ("Failed to delete WCMP group member: 'ju1u32m3.atl11:qe-3/7'", ProcessUpdateRequest(&wcmp_group).message()); // WCMP group is as expected, but refcounts are not VerifyWcmpGroupEntry(expected_wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); @@ -796,15 +975,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails wcmp_group.wcmp_group_members.clear(); std::shared_ptr gm = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_5{kWcmpGroupMemberOid5}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -827,33 +1018,47 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails updated_wcmp_group.wcmp_group_members.push_back(updated_gm1); updated_wcmp_group.wcmp_group_members.push_back(updated_gm2); updated_wcmp_group.wcmp_group_members.push_back(updated_gm3); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + std::vector exp_create_status_fail{SAI_STATUS_SUCCESS, SAI_STATUS_TABLE_FULL}; + std::vector return_oids_2_null{kWcmpGroupMemberOid2, SAI_NULL_OBJECT_ID}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 20, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_TABLE_FULL))); // Clean up - success - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status_2.begin(), exp_remove_status_2.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_FALSE(ProcessUpdateRequest(&updated_wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm = createWcmpGroupMemberEntry(kNexthopId2, 15); @@ -869,44 +1074,48 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails EXPECT_EQ(0, nexthop_refcount); // Try again, but this time clean up failed to remove created group member - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 20, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_TABLE_FULL))); // Clean up - revert creation - failure - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status_fail{SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status_fail.begin(), exp_remove_status_fail.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); // TODO: Expect critical state. - EXPECT_EQ("Failed to create next hop group member 'ju1u32m3.atl11:qe-3/7'", + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m3.atl11:qe-3/7'", ProcessUpdateRequest(&updated_wcmp_group).message()); // WCMP group is as expected, but refcounts are not VerifyWcmpGroupEntry(expected_wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); - EXPECT_EQ(2, wcmp_group_refcount); // Corrupt status due to clean up failure + EXPECT_EQ(1, wcmp_group_refcount); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &nexthop_refcount)); EXPECT_EQ(0, nexthop_refcount); // Corrupt status due to clean up failure ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &nexthop_refcount)); - EXPECT_EQ(2, nexthop_refcount); + EXPECT_EQ(1, nexthop_refcount); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey3, &nexthop_refcount)); EXPECT_EQ(0, nexthop_refcount); } @@ -922,18 +1131,32 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenReduceGroupMemberWeightSaiCallFa std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 10); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_NOT_SUPPORTED}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); EXPECT_FALSE(ProcessUpdateRequest(&wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 2); @@ -962,33 +1185,54 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 10); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_NOT_SUPPORTED}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); // Clean up modified members - success - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1_2{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1_2.begin(), return_oids_1_2.end()), + SetArrayArgument<6>(exp_create_status_2.begin(), exp_create_status_2.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_FALSE(ProcessUpdateRequest(&wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 2); @@ -1005,38 +1249,52 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &nexthop_refcount)); EXPECT_EQ(1, nexthop_refcount); // Try again, the same error happens when update and new error during clean up - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); // Clean up modified members - failure - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_NOT_SUPPORTED))); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_2_null{SAI_NULL_OBJECT_ID, kWcmpGroupMemberOid2}; + std::vector exp_create_status_2_fail{SAI_STATUS_NOT_SUPPORTED, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_2_fail.begin(), exp_create_status_2_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); // TODO: Expect critical state. - EXPECT_EQ("Failed to create next hop group member " - "'ju1u32m2.atl11:qe-3/7'", - ProcessUpdateRequest(&wcmp_group).message()); + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m2.atl11:qe-3/7'", ProcessUpdateRequest(&wcmp_group).message()); // weight of wcmp_group_members[kNexthopId1] unable to revert // SAI object in ASIC DB: missing group member with // next_hop_id=kNexthopId1 @@ -1052,19 +1310,58 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenNextHopDoesNotExist) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = createWcmpGroupMemberEntry("Unregistered-Nexthop", 1); - app_db_entry.wcmp_group_members.push_back(gm); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + action[prependParamField(p4orch::kNexthopId)] = kNexthopId2; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenWeightLessThanOne) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = createWcmpGroupMemberEntry(kNexthopId1, 0); - app_db_entry.wcmp_group_members.push_back(gm); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessAddRequest(&app_db_entry)); + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, kNexthopOid2); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + action[p4orch::kWeight] = -1; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId2; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, WcmpGroupInvalidOperationInDrainFails) @@ -1137,16 +1434,17 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); Drain(); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); @@ -1158,10 +1456,13 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &ref_cnt)); EXPECT_EQ(1, ref_cnt); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); attributes.clear(); @@ -1195,11 +1496,16 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); Drain(); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); @@ -1215,13 +1521,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) // Update WCMP group with exact same members, the same entry will be removed // and created again Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid3), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid3}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1239,13 +1554,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) attributes.clear(); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid2}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid3}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1264,13 +1588,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) attributes.clear(); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid4}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1402,13 +1735,27 @@ TEST_F(WcmpManagerTest, DeserializeWcmpGroupFailsWithUndefinedAttributes) TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryWithInvalidWatchportAttributeFails) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = - createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 1, "EthernetXX", kWcmpGroupId1, kNexthopOid1); - app_db_entry.wcmp_group_members.push_back(gm); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessAddRequest(&app_db_entry)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm, false, 0)); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[p4orch::kWatchPort] = "EthernetXX"; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, PruneNextHopSucceeds) @@ -1417,13 +1764,13 @@ TEST_F(WcmpManagerTest, PruneNextHopSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); // Prune next hops associated with port PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, PruneNextHopFailsWithNextHopRemovalFailure) @@ -1432,13 +1779,14 @@ TEST_F(WcmpManagerTest, PruneNextHopFailsWithNextHopRemovalFailure) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); + // TODO: Expect critical state. // Prune next hops associated with port (fails) PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) @@ -1449,7 +1797,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, @@ -1459,7 +1807,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) // Restore next hops associated with port RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNoOidMappingForWcmpGroup) @@ -1470,12 +1818,12 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNoOidMappingForWcmpGroup) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1)); // TODO: Expect critical state. RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNextHopCreationFailure) @@ -1486,7 +1834,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNextHopCreationFailure) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, @@ -1495,7 +1843,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNextHopCreationFailure) // TODO: Expect critical state. RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, CreateGroupWithWatchportFailsWithNextHopCreationFailure) @@ -1514,26 +1862,31 @@ TEST_F(WcmpManagerTest, CreateGroupWithWatchportFailsWithNextHopCreationFailure) create_next_hop_group(_, Eq(gSwitchId), Eq(1), Truly(std::bind(MatchSaiNextHopGroupAttribute, std::placeholders::_1)))) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_FAILURE)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_FAILURE))); // Clean up created members - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm1, false, 0)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm2, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm1, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm2, false, 0)); + EXPECT_FALSE(gm1->pruned); + EXPECT_FALSE(gm2->pruned); } TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) @@ -1542,12 +1895,12 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1555,7 +1908,6 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) @@ -1565,7 +1917,29 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) // directly added to the pruned set of WCMP group members. P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport("Ethernet1"); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Remove Wcmp group. No SAI call for member removal is expected as it is + // already pruned. + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); +} + +TEST_F(WcmpManagerTest, RemoveNextHopWithPrunedMember) +{ + // Add member with operationally down watch port. Since associated watchport + // is operationally down, member will not be created in SAI but will be + // directly added to the pruned set of WCMP group members. + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport("Ethernet1"); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1573,7 +1947,81 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + + // Verify that the next hop reference count is now 0. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(WcmpManagerTest, RemoveNextHopWithRestoredPrunedMember) +{ + // Add member with operationally down watch port. Since associated watchport + // is operationally down, member will not be created in SAI but will be + // directly added to the pruned set of WCMP group members. + std::string port_name = "Ethernet1"; + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Restore member associated with port. + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, + kWcmpGroupOid1, std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + RestorePrunedNextHops(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count remains the same after restore. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Remove Wcmp group. + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); + + // Verify that the next hop reference count is now 0. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(WcmpManagerTest, VerifyNextHopRefCountWhenMemberPruned) +{ + // Add member with operationally up watch port + std::string port_name = "Ethernet6"; + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Prune member associated with port. + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + PruneNextHops(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count does not change on pruning. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceeds) @@ -1582,7 +2030,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceed std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1 and add kNexthopId2 P4WcmpGroupEntry updated_app_db_entry; @@ -1590,18 +2038,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceed std::shared_ptr updated_gm = createWcmpGroupMemberEntryWithWatchport(kNexthopId2, 1, port_name, kWcmpGroupId1, kNexthopOid2); updated_app_db_entry.wcmp_group_members.push_back(updated_gm); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm, true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm->pruned); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucceeds) @@ -1612,7 +2069,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucce std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1 and add kNexthopId2. No SAI calls // are expected as the associated watch port is operationally down. @@ -1624,8 +2081,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucce EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm, true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm, true, 1)); + EXPECT_TRUE(updated_gm->pruned); } TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) @@ -1634,7 +2090,7 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to modify weight of kNexthopId1. P4WcmpGroupEntry updated_app_db_entry; @@ -1642,25 +2098,33 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) std::shared_ptr updated_gm = createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 10, port_name, kWcmpGroupId1, kNexthopOid1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Prune members associated with port. EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1671,7 +2135,6 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); } TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) @@ -1682,7 +2145,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to modify weight of kNexthopId1. P4WcmpGroupEntry updated_app_db_entry; @@ -1693,8 +2156,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Restore members associated with port. // Verify that the weight of the restored member is updated. @@ -1705,7 +2167,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(updated_app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWithMemberRemovalFailure) @@ -1714,7 +2176,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWi std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1(fails) and add kNexthopId2 P4WcmpGroupEntry updated_app_db_entry; @@ -1725,61 +2187,96 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWi createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 1, port_name, kWcmpGroupId1, kNexthopOid1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_INSUFFICIENT_RESOURCES)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_INSUFFICIENT_RESOURCES}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); // Clean up created member-succeeds - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_FULL, ProcessUpdateRequest(&updated_app_db_entry)); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm2, false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm2, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm2->pruned); // Update again, this time clean up fails - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_INSUFFICIENT_RESOURCES)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); // Clean up created member(fails) - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); // TODO: Expect critical state. - EXPECT_EQ("Failed to create next hop group member " - "'ju1u32m2.atl11:qe-3/7'", + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m2.atl11:qe-3/7'", ProcessUpdateRequest(&updated_app_db_entry).message()); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm2, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm2, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm2->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) @@ -1788,7 +2285,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Send port down signal // Verify that the next hop member associated with the port is pruned. @@ -1799,7 +2296,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) @@ -1810,7 +2307,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Send port up signal. // Verify that the pruned next hop member associated with the port is @@ -1825,7 +2322,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnlyOnceSuceeds) @@ -1836,7 +2333,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnl std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Send port down signal. // Verify that the pruned next hop member is not pruned again. @@ -1845,7 +2342,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnl "STATUS_DOWN\"}]"; HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, VerifyStateTest) diff --git a/orchagent/p4orch/wcmp_manager.cpp b/orchagent/p4orch/wcmp_manager.cpp index 96c0a12eb3..81c373b16f 100644 --- a/orchagent/p4orch/wcmp_manager.cpp +++ b/orchagent/p4orch/wcmp_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/wcmp_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" @@ -24,6 +24,7 @@ extern sai_object_id_t gSwitchId; extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern CrmOrch *gCrmOrch; extern PortsOrch *gPortsOrch; +extern size_t gMaxBulkSize; namespace p4orch { @@ -51,6 +52,17 @@ std::vector getSaiGroupAttrs(const P4WcmpGroupEntry &wcmp_group } // namespace +WcmpManager::WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + : gNextHopGroupMemberBulker(sai_next_hop_group_api, gSwitchId, gMaxBulkSize) +{ + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; +} + std::vector WcmpManager::getSaiMemberAttrs(const P4WcmpGroupMemberEntry &wcmp_member_entry, const sai_object_id_t group_oid) { @@ -165,6 +177,7 @@ ReturnCodeOr WcmpManager::deserializeP4WcmpGroupAppDbEntry( wcmp_group_member->watch_port = action_item[kWatchPort]; } wcmp_group_member->wcmp_group_id = app_db_entry.wcmp_group_id; + wcmp_group_member->pruned = false; app_db_entry.wcmp_group_members.push_back(wcmp_group_member); } } @@ -196,14 +209,7 @@ P4WcmpGroupEntry *WcmpManager::getWcmpGroupEntry(const std::string &wcmp_group_i ReturnCode WcmpManager::processAddRequest(P4WcmpGroupEntry *app_db_entry) { SWSS_LOG_ENTER(); - auto status = validateWcmpGroupEntry(*app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", QuotedVar(app_db_entry->wcmp_group_id).c_str(), - status.message().c_str()); - return status; - } - status = createWcmpGroup(app_db_entry); + auto status = createWcmpGroup(app_db_entry); if (!status.ok()) { SWSS_LOG_ERROR("Failed to create WCMP group with id %s: %s", QuotedVar(app_db_entry->wcmp_group_id).c_str(), @@ -223,14 +229,10 @@ ReturnCode WcmpManager::createWcmpGroupMember(std::shared_ptrnext_hop_id)); // Update reference count - const auto &next_hop_key = KeyGenerator::generateNextHopKey(wcmp_group_member->next_hop_id); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, getWcmpGroupMemberKey(wcmp_group_key, wcmp_group_member->member_oid), wcmp_group_member->member_oid); - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); - return ReturnCode(); } @@ -277,87 +279,81 @@ ReturnCode WcmpManager::fetchPortOperStatus(const std::string &port_name, sai_po return ReturnCode(); } -ReturnCode WcmpManager::createWcmpGroupMemberWithWatchport(P4WcmpGroupEntry *wcmp_group, - std::shared_ptr member, - const std::string &wcmp_group_key) +ReturnCode WcmpManager::processWcmpGroupMembersAddition( + const std::vector> &members, const std::string &wcmp_group_key, + sai_object_id_t wcmp_group_oid, std::vector> &created_wcmp_group_members) { - // Create member in SAI only for operationally up ports - sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; - auto status = fetchPortOperStatus(member->watch_port, &oper_status); - if (!status.ok()) - { - return status; - } - - if (oper_status == SAI_PORT_OPER_STATUS_UP) + SWSS_LOG_ENTER(); + ReturnCode status; + vector nhgm_ids(members.size(), SAI_NULL_OBJECT_ID); + for (size_t i = 0; i < members.size(); ++i) { - auto status = createWcmpGroupMember(member, wcmp_group->wcmp_group_oid, wcmp_group_key); - if (!status.ok()) + bool insert_member = true; + auto &member = members[i]; + if (!member->watch_port.empty()) { - SWSS_LOG_ERROR("Failed to create next hop member %s with watch_port %s", member->next_hop_id.c_str(), - member->watch_port.c_str()); - return status; - } - } - else - { - pruned_wcmp_members_set.emplace(member); - SWSS_LOG_NOTICE("Member %s in group %s not created in asic as the associated watchport " - "(%s) is not operationally up", - member->next_hop_id.c_str(), member->wcmp_group_id.c_str(), member->watch_port.c_str()); - } - // Add member to port_name_to_wcmp_group_member_map - insertMemberInPortNameToWcmpGroupMemberMap(member); - return ReturnCode(); -} + // Create member in SAI only for operationally up ports + sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; + status = fetchPortOperStatus(member->watch_port, &oper_status); + if (!status.ok()) + { + break; + } -ReturnCode WcmpManager::processWcmpGroupMemberAddition(std::shared_ptr member, - P4WcmpGroupEntry *wcmp_group, const std::string &wcmp_group_key) -{ - ReturnCode status = ReturnCode(); - if (!member->watch_port.empty()) - { - status = createWcmpGroupMemberWithWatchport(wcmp_group, member, wcmp_group_key); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to create WCMP group member %s with watch_port %s", member->next_hop_id.c_str(), - member->watch_port.c_str()); + if (oper_status != SAI_PORT_OPER_STATUS_UP) + { + insert_member = false; + member->pruned = true; + SWSS_LOG_NOTICE("Member %s in group %s not created in asic as the associated " + "watchport " + "(%s) is not operationally up", + member->next_hop_id.c_str(), member->wcmp_group_id.c_str(), member->watch_port.c_str()); + } } - } - else - { - status = createWcmpGroupMember(member, wcmp_group->wcmp_group_oid, wcmp_group_key); - if (!status.ok()) + if (insert_member) { - SWSS_LOG_ERROR("Failed to create WCMP group member %s", member->next_hop_id.c_str()); + auto attrs = getSaiMemberAttrs(*(member.get()), wcmp_group_oid); + gNextHopGroupMemberBulker.create_entry(&nhgm_ids[i], (uint32_t)attrs.size(), attrs.data()); } } - return status; -} - -ReturnCode WcmpManager::processWcmpGroupMemberRemoval(std::shared_ptr member, - const std::string &wcmp_group_key) -{ - // If member exists in pruned_wcmp_members_set, remove from set. Else, remove - // member using SAI. - auto it = pruned_wcmp_members_set.find(member); - if (it != pruned_wcmp_members_set.end()) - { - pruned_wcmp_members_set.erase(it); - SWSS_LOG_NOTICE("Removed pruned member %s from group %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str()); - } - else + if (status.ok()) { - auto status = removeWcmpGroupMember(member, wcmp_group_key); - if (!status.ok()) + gNextHopGroupMemberBulker.flush(); + for (size_t i = 0; i < members.size(); ++i) { - return status; + auto &member = members[i]; + if (!member->pruned) + { + if (nhgm_ids[i] == SAI_NULL_OBJECT_ID) + { + if (status.ok()) + { + status = ReturnCode(StatusCode::SWSS_RC_UNKNOWN) + << "Fail to create wcmp group member: " << QuotedVar(member->next_hop_id); + } + else + { + status << "; Fail to create wcmp group member: " << QuotedVar(member->next_hop_id); + } + continue; + } + member->member_oid = nhgm_ids[i]; + m_p4OidMapper->setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + getWcmpGroupMemberKey(wcmp_group_key, member->member_oid), member->member_oid); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); + } + if (!member->watch_port.empty()) + { + // Add member to port_name_to_wcmp_group_member_map + insertMemberInPortNameToWcmpGroupMemberMap(member); + } + const std::string &next_hop_key = KeyGenerator::generateNextHopKey(member->next_hop_id); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); + created_wcmp_group_members.push_back(member); } } - // Remove member from port_name_to_wcmp_group_member_map - removeMemberFromPortNameToWcmpGroupMemberMap(member); - return ReturnCode(); + return status; } ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) @@ -375,16 +371,8 @@ ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) // Create next hop group members std::vector> created_wcmp_group_members; - ReturnCode status; - for (auto &wcmp_group_member : wcmp_group->wcmp_group_members) - { - status = processWcmpGroupMemberAddition(wcmp_group_member, wcmp_group, wcmp_group_key); - if (!status.ok()) - { - break; - } - created_wcmp_group_members.push_back(wcmp_group_member); - } + ReturnCode status = processWcmpGroupMembersAddition(wcmp_group->wcmp_group_members, wcmp_group_key, + wcmp_group->wcmp_group_oid, created_wcmp_group_members); if (!status.ok()) { // Clean up created group members and the group @@ -401,6 +389,7 @@ ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); return status; } + m_wcmpGroupTable[wcmp_group->wcmp_group_id] = *wcmp_group; return ReturnCode(); } @@ -410,33 +399,28 @@ void WcmpManager::recoverGroupMembers( const std::vector> &created_wcmp_group_members, const std::vector> &removed_wcmp_group_members) { - // Keep track of recovery status during clean up + SWSS_LOG_ENTER(); + std::vector> members; ReturnCode recovery_status; // Clean up created group members - remove created new members - for (const auto &new_member : created_wcmp_group_members) + if (created_wcmp_group_members.size() != 0) { - auto status = processWcmpGroupMemberRemoval(new_member, wcmp_group_key); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to remove created next hop group member %s in " - "processUpdateRequest().", - QuotedVar(new_member->next_hop_id).c_str()); - recovery_status.ok() ? recovery_status = status.prepend("Error during recovery: ") - : recovery_status << "; Error during recovery: " << status.message(); - } + recovery_status = processWcmpGroupMembersRemoval(created_wcmp_group_members, wcmp_group_key, members) + .prepend("Error during recovery: "); } + // Clean up removed group members - create removed old members - for (auto &old_member : removed_wcmp_group_members) + if (recovery_status.ok() && removed_wcmp_group_members.size() != 0) { - auto status = processWcmpGroupMemberAddition(old_member, wcmp_group_entry, wcmp_group_key); - if (!status.ok()) - { - recovery_status.ok() ? recovery_status = status.prepend("Error during recovery: ") - : recovery_status << "; Error during recovery: " << status.message(); - } + recovery_status = processWcmpGroupMembersAddition(removed_wcmp_group_members, wcmp_group_key, + wcmp_group_entry->wcmp_group_oid, members) + .prepend("Error during recovery: "); } + if (!recovery_status.ok()) + { SWSS_RAISE_CRITICAL_STATE(recovery_status.message()); + } } ReturnCode WcmpManager::processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry) @@ -459,93 +443,88 @@ ReturnCode WcmpManager::processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry) // 5. Make SAI call to remove the reserved old member // 6. Make SAI calls to create remaining new members ReturnCode update_request_status; - auto find_smallest_index = [&](p4orch::P4WcmpGroupEntry *wcmp) { + auto find_smallest_index = [&](p4orch::P4WcmpGroupEntry *wcmp, + std::vector> &other_members) -> int { + other_members.clear(); if (wcmp->wcmp_group_members.empty()) + { return -1; + } int reserved_idx = 0; for (int i = 1; i < (int)wcmp->wcmp_group_members.size(); i++) { if (wcmp->wcmp_group_members[i]->weight < wcmp->wcmp_group_members[reserved_idx]->weight) { + other_members.push_back(wcmp->wcmp_group_members[reserved_idx]); reserved_idx = i; } + else + { + other_members.push_back(wcmp->wcmp_group_members[i]); + } } return reserved_idx; }; // Find the old member who has the smallest weight, -1 if the member list is // empty - int reserved_old_member_index = find_smallest_index(old_wcmp); + std::vector> other_old_members; + int reserved_old_member_index = find_smallest_index(old_wcmp, other_old_members); // Find the new member who has the smallest weight, -1 if the member list is // empty - int reserved_new_member_index = find_smallest_index(wcmp_group_entry); + std::vector> other_new_members; + int reserved_new_member_index = find_smallest_index(wcmp_group_entry, other_new_members); // Remove stale group members except the member with the smallest weight - for (int i = 0; i < (int)old_wcmp->wcmp_group_members.size(); i++) + if (other_old_members.size() != 0) { - // Reserve the old member with smallest weight - if (i == reserved_old_member_index) - continue; - auto &stale_member = old_wcmp->wcmp_group_members[i]; - update_request_status = processWcmpGroupMemberRemoval(stale_member, wcmp_group_key); + update_request_status = + processWcmpGroupMembersRemoval(other_old_members, wcmp_group_key, removed_wcmp_group_members); if (!update_request_status.ok()) { - SWSS_LOG_ERROR("Failed to remove stale next hop group member %s in " - "processUpdateRequest().", - QuotedVar(sai_serialize_object_id(stale_member->member_oid)).c_str()); recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - removed_wcmp_group_members.push_back(stale_member); } // Create the new member with the smallest weight if member list is nonempty - if (!wcmp_group_entry->wcmp_group_members.empty()) + if (reserved_new_member_index != -1) { - auto &member = wcmp_group_entry->wcmp_group_members[reserved_new_member_index]; - update_request_status = processWcmpGroupMemberAddition(member, wcmp_group_entry, wcmp_group_key); + update_request_status = processWcmpGroupMembersAddition( + {wcmp_group_entry->wcmp_group_members[reserved_new_member_index]}, wcmp_group_key, + wcmp_group_entry->wcmp_group_oid, created_wcmp_group_members); if (!update_request_status.ok()) { recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - created_wcmp_group_members.push_back(member); } // Remove the old member with the smallest weight if member list is nonempty - if (!old_wcmp->wcmp_group_members.empty()) + if (reserved_old_member_index != -1) { - auto &stale_member = old_wcmp->wcmp_group_members[reserved_old_member_index]; - update_request_status = processWcmpGroupMemberRemoval(stale_member, wcmp_group_key); + update_request_status = processWcmpGroupMembersRemoval( + {old_wcmp->wcmp_group_members[reserved_old_member_index]}, wcmp_group_key, removed_wcmp_group_members); if (!update_request_status.ok()) { - SWSS_LOG_ERROR("Failed to remove stale next hop group member %s in " - "processUpdateRequest().", - QuotedVar(sai_serialize_object_id(stale_member->member_oid)).c_str()); recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - removed_wcmp_group_members.push_back(stale_member); } // Create new group members - for (int i = 0; i < (int)wcmp_group_entry->wcmp_group_members.size(); i++) + if (other_new_members.size() != 0) { - // Skip the new member with the lowest weight as it is already created - if (i == reserved_new_member_index) - continue; - auto &member = wcmp_group_entry->wcmp_group_members[i]; - // Create new group member - update_request_status = processWcmpGroupMemberAddition(member, wcmp_group_entry, wcmp_group_key); + update_request_status = processWcmpGroupMembersAddition( + other_new_members, wcmp_group_key, wcmp_group_entry->wcmp_group_oid, created_wcmp_group_members); if (!update_request_status.ok()) { recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - created_wcmp_group_members.push_back(member); } m_wcmpGroupTable[wcmp_group_entry->wcmp_group_id] = *wcmp_group_entry; @@ -556,19 +535,72 @@ ReturnCode WcmpManager::removeWcmpGroupMember(const std::shared_ptrnext_hop_id); CHECK_ERROR_AND_LOG_AND_RETURN(sai_next_hop_group_api->remove_next_hop_group_member(wcmp_group_member->member_oid), "Failed to remove WCMP group member with nexthop id " << QuotedVar(wcmp_group_member->next_hop_id)); m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, getWcmpGroupMemberKey(wcmp_group_key, wcmp_group_member->member_oid)); - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); return ReturnCode(); } +ReturnCode WcmpManager::processWcmpGroupMembersRemoval( + const std::vector> &members, const std::string &wcmp_group_key, + std::vector> &removed_wcmp_group_members) +{ + SWSS_LOG_ENTER(); + ReturnCode status; + std::vector statuses(members.size(), SAI_STATUS_FAILURE); + for (size_t i = 0; i < members.size(); ++i) + { + auto &member = members[i]; + if (!member->pruned) + { + gNextHopGroupMemberBulker.remove_entry(&statuses[i], member->member_oid); + } + } + gNextHopGroupMemberBulker.flush(); + for (size_t i = 0; i < members.size(); ++i) + { + auto &member = members[i]; + if (member->pruned) + { + SWSS_LOG_NOTICE("Removed pruned member %s from group %s", member->next_hop_id.c_str(), + member->wcmp_group_id.c_str()); + member->pruned = false; + } + else + { + if (statuses[i] != SAI_STATUS_SUCCESS) + { + if (status.ok()) + { + status = ReturnCode(statuses[i]) + << "Failed to delete WCMP group member: " << QuotedVar(member->next_hop_id); + } + else + { + status << "; Failed to delete WCMP group member: " << QuotedVar(member->next_hop_id); + } + continue; + } + else + { + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + getWcmpGroupMemberKey(wcmp_group_key, member->member_oid)); + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); + } + } + const std::string &next_hop_key = KeyGenerator::generateNextHopKey(member->next_hop_id); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); + removeMemberFromPortNameToWcmpGroupMemberMap(member); + removed_wcmp_group_members.push_back(member); + } + return status; +} + ReturnCode WcmpManager::removeWcmpGroup(const std::string &wcmp_group_id) { SWSS_LOG_ENTER(); @@ -590,18 +622,12 @@ ReturnCode WcmpManager::removeWcmpGroup(const std::string &wcmp_group_id) << wcmp_group_refcount - expected_refcount << " more objects than its group members (size=" << expected_refcount << ") referencing it."); } - std::vector> removed_wcmp_group_members; - ReturnCode status; + // Delete group members - for (const auto &member : wcmp_group->wcmp_group_members) - { - status = processWcmpGroupMemberRemoval(member, wcmp_group_key); - if (!status.ok()) - { - break; - } - removed_wcmp_group_members.push_back(member); - } + std::vector> removed_wcmp_group_members; + ReturnCode status = + processWcmpGroupMembersRemoval(wcmp_group->wcmp_group_members, wcmp_group_key, removed_wcmp_group_members); + // Delete group if (status.ok()) { @@ -631,24 +657,22 @@ void WcmpManager::pruneNextHops(const std::string &port) { for (const auto &member : port_name_to_wcmp_group_member_map[port]) { - auto it = pruned_wcmp_members_set.find(member); // Prune a member if it is not already pruned. - if (it == pruned_wcmp_members_set.end()) + if (!member->pruned) { const auto &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(member->wcmp_group_id); auto status = removeWcmpGroupMember(member, wcmp_group_key); if (!status.ok()) { - SWSS_LOG_NOTICE("Failed to remove member %s from group %s, rv: %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str(), status.message().c_str()); - } - else - { - // Add pruned member to pruned set - pruned_wcmp_members_set.emplace(member); - SWSS_LOG_NOTICE("Pruned member %s from group %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str()); + std::stringstream msg; + msg << "Failed to prune member " << member->next_hop_id << " from group " << member->wcmp_group_id + << ": " << status.message(); + SWSS_RAISE_CRITICAL_STATE(msg.str()); + return; } + member->pruned = true; + SWSS_LOG_NOTICE("Pruned member %s from group %s", member->next_hop_id.c_str(), + member->wcmp_group_id.c_str()); } } } @@ -665,8 +689,7 @@ void WcmpManager::restorePrunedNextHops(const std::string &port) ReturnCode status; for (auto member : port_name_to_wcmp_group_member_map[port]) { - auto it = pruned_wcmp_members_set.find(member); - if (it != pruned_wcmp_members_set.end()) + if (member->pruned) { const auto &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(member->wcmp_group_id); sai_object_id_t wcmp_group_oid = SAI_NULL_OBJECT_ID; @@ -688,7 +711,7 @@ void WcmpManager::restorePrunedNextHops(const std::string &port) SWSS_RAISE_CRITICAL_STATE(status.message()); return; } - pruned_wcmp_members_set.erase(it); + member->pruned = false; SWSS_LOG_NOTICE("Restored pruned member %s in group %s", member->next_hop_id.c_str(), member->wcmp_group_id.c_str()); } @@ -711,7 +734,35 @@ void WcmpManager::updatePortOperStatusMap(const std::string &port, const sai_por port_oper_status_map[port] = status; } -void WcmpManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode WcmpManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kWcmpGroupId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kWcmpGroupId)).get(); + object_key = KeyGenerator::generateWcmpGroupKey(value); + object_type = SAI_OBJECT_TYPE_NEXT_HOP_GROUP; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kWcmpGroupId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void WcmpManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -744,6 +795,16 @@ void WcmpManager::drain() const std::string &operation = kfvOp(key_op_fvs_tuple); if (operation == SET_COMMAND) { + status = validateWcmpGroupEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", QuotedVar(app_db_entry.wcmp_group_id).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } auto *wcmp_group_entry = getWcmpGroupEntry(app_db_entry.wcmp_group_id); if (wcmp_group_entry == nullptr) { @@ -901,9 +962,7 @@ std::string WcmpManager::verifyStateCache(const P4WcmpGroupEntry &app_db_entry, << QuotedVar(wcmp_group_entry->wcmp_group_members[i]->wcmp_group_id) << " in wcmp manager."; return msg.str(); } - // Group member might not be created if it is a watch port. - if (!app_db_entry.wcmp_group_members[i]->watch_port.empty() && - wcmp_group_entry->wcmp_group_members[i]->member_oid == SAI_NULL_OBJECT_ID) + if (!app_db_entry.wcmp_group_members[i]->watch_port.empty() && wcmp_group_entry->wcmp_group_members[i]->pruned) { continue; } @@ -944,8 +1003,7 @@ std::string WcmpManager::verifyStateAsicDb(const P4WcmpGroupEntry *wcmp_group_en for (const auto &member : wcmp_group_entry->wcmp_group_members) { - // Group member might not be created if it is a watch port. - if (!member->watch_port.empty() && member->member_oid == SAI_NULL_OBJECT_ID) + if (!member->watch_port.empty() && member->pruned) { continue; } diff --git a/orchagent/p4orch/wcmp_manager.h b/orchagent/p4orch/wcmp_manager.h index d1a6e025bc..7d533bf28f 100644 --- a/orchagent/p4orch/wcmp_manager.h +++ b/orchagent/p4orch/wcmp_manager.h @@ -4,6 +4,7 @@ #include #include +#include "bulker.h" #include "notificationconsumer.h" #include "orch.h" #include "p4orch/object_manager_interface.h" @@ -28,6 +29,7 @@ struct P4WcmpGroupMemberEntry // Default ECMP(weight=1) int weight = 1; std::string watch_port; + bool pruned; sai_object_id_t member_oid = SAI_NULL_OBJECT_ID; std::string wcmp_group_id; }; @@ -63,21 +65,15 @@ struct P4WcmpGroupEntry class WcmpManager : public ObjectManagerInterface { public: - WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) - { - SWSS_LOG_ENTER(); - - assert(p4oidMapper != nullptr); - m_p4OidMapper = p4oidMapper; - assert(publisher != nullptr); - m_publisher = publisher; - } + WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher); virtual ~WcmpManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Prunes next hop members egressing through the given port. void pruneNextHops(const std::string &port); @@ -112,20 +108,18 @@ class WcmpManager : public ObjectManagerInterface ReturnCode createWcmpGroupMember(std::shared_ptr wcmp_group_member, const sai_object_id_t group_oid, const std::string &wcmp_group_key); - // Creates WCMP group member with an associated watch_port. - ReturnCode createWcmpGroupMemberWithWatchport(P4WcmpGroupEntry *wcmp_group, - std::shared_ptr member, - const std::string &wcmp_group_key); - // Performs watchport related addition operations and creates WCMP group - // member. - ReturnCode processWcmpGroupMemberAddition(std::shared_ptr member, - P4WcmpGroupEntry *wcmp_group, const std::string &wcmp_group_key); + // members. + ReturnCode processWcmpGroupMembersAddition( + const std::vector> &members, const std::string &wcmp_group_key, + sai_object_id_t wcmp_group_oid, + std::vector> &created_wcmp_group_members); // Performs watchport related removal operations and removes WCMP group - // member. - ReturnCode processWcmpGroupMemberRemoval(std::shared_ptr member, - const std::string &wcmp_group_key); + // members. + ReturnCode processWcmpGroupMembersRemoval( + const std::vector> &members, const std::string &wcmp_group_key, + std::vector> &removed_wcmp_group_members); // Processes update operation for a WCMP group entry. ReturnCode processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry); @@ -172,9 +166,6 @@ class WcmpManager : public ObjectManagerInterface std::unordered_map>> port_name_to_wcmp_group_member_map; - // Set of pruned P4WcmpGroupMemberEntry entries - std::unordered_set> pruned_wcmp_members_set; - // Maps port name to oper-status std::unordered_map port_oper_status_map; @@ -182,6 +173,7 @@ class WcmpManager : public ObjectManagerInterface P4OidMapper *m_p4OidMapper; std::deque m_entries; ResponsePublisherInterface *m_publisher; + ObjectBulker gNextHopGroupMemberBulker; friend class p4orch::test::WcmpManagerTest; }; diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_innovium.lua index 8deedeaa4f..a948bd6fad 100644 --- a/orchagent/pfc_detect_innovium.lua +++ b/orchagent/pfc_detect_innovium.lua @@ -79,7 +79,7 @@ for i = n, 1, -1 do -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) or -- DEBUG CODE START. Uncomment to enable (debug_storm == "enabled") or -- DEBUG CODE END. diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua index e805ad9cff..826a577d62 100644 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -12,6 +12,17 @@ local rets = {} redis.call('SELECT', counters_db) +-- Record the polling time +local timestamp_last = redis.call('HGET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last') +local timestamp_struct = redis.call('TIME') +local timestamp_current = timestamp_struct[1] + timestamp_struct[2] / 1000000 +local timestamp_string = tostring(timestamp_current) +redis.call('HSET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last', timestamp_string) +local real_poll_time = poll_time +if timestamp_last ~= false then + real_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 +end + -- Iterate through each queue local n = table.getn(KEYS) for i = n, 1, -1 do @@ -78,7 +89,12 @@ for i = n, 1, -1 do if time_left <= poll_time then redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + local occupancy_string = '"occupancy","' .. tostring(occupancy_bytes) .. '",' + local packets_string = '"packets","' .. tostring(packets) .. '","packets_last","' .. tostring(packets_last) .. '",' + local pfc_rx_packets_string = '"pfc_rx_packets","' .. tostring(pfc_rx_packets) .. '","pfc_rx_packets_last","' .. tostring(pfc_rx_packets_last) .. '",' + local storm_condition_string = '"pfc_duration","' .. tostring(pfc_duration) .. '","pfc_duration_last","' .. tostring(pfc_duration_last) .. '",' + local timestamps = '"timestamp","' .. timestamp_string .. '","timestamp_last","' .. timestamp_last .. '","real_poll_time","' .. real_poll_time .. '"' + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm",' .. occupancy_string .. packets_string .. pfc_rx_packets_string .. storm_condition_string .. timestamps .. ']') is_deadlock = true time_left = detection_time else diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index dee433bcd2..305ed4421d 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -25,6 +25,7 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; +extern SwitchOrch *gSwitchOrch; extern AclOrch * gAclOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; @@ -483,7 +484,7 @@ PfcWdLossyHandler::PfcWdLossyHandler(sai_object_id_t port, sai_object_id_t queue SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; - if (platform == CISCO_8000_PLATFORM_SUBSTRING) + if (platform == CISCO_8000_PLATFORM_SUBSTRING || ((platform == BRCM_PLATFORM_SUBSTRING) && (gSwitchOrch->checkPfcDlrInitEnable()))) { SWSS_LOG_DEBUG("Skipping in constructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, platform.c_str(), port); @@ -510,7 +511,7 @@ PfcWdLossyHandler::~PfcWdLossyHandler(void) SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; - if (platform == CISCO_8000_PLATFORM_SUBSTRING) + if (platform == CISCO_8000_PLATFORM_SUBSTRING || ((platform == BRCM_PLATFORM_SUBSTRING) && (gSwitchOrch->checkPfcDlrInitEnable()))) { SWSS_LOG_DEBUG("Skipping in destructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, platform.c_str(), getPort()); diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index da092387af..7c78f81d6b 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -32,6 +32,8 @@ extern sai_switch_api_t* sai_switch_api; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; +extern event_handle_t g_events_handle; + extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; @@ -236,11 +238,11 @@ task_process_status PfcWdOrch::createEntry(const st { if(gSwitchOrch->checkPfcDlrInitEnable()) { - if(getPfcDlrPacketAction() == PfcWdAction::PFC_WD_ACTION_UNKNOWN) + if(m_pfcwd_ports.empty()) { sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION; - attr.value.u32 = (sai_uint32_t)action; + attr.value.u32 = packet_action_map.at(value); sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if(status != SAI_STATUS_SUCCESS) @@ -305,6 +307,7 @@ task_process_status PfcWdOrch::createEntry(const st } SWSS_LOG_NOTICE("Started PFC Watchdog on port %s", port.m_alias.c_str()); + m_pfcwd_ports.insert(port.m_alias); return task_process_status::task_success; } @@ -323,6 +326,7 @@ task_process_status PfcWdOrch::deleteEntry(const st } SWSS_LOG_NOTICE("Stopped PFC Watchdog on port %s", name.c_str()); + m_pfcwd_ports.erase(port.m_alias); return task_process_status::task_success; } @@ -909,10 +913,20 @@ void PfcWdSwOrch::doTask(swss::NotificationConsumer wdNotification.pop(queueIdStr, event, values); + string info; + for (auto &fv : values) + { + info += fvField(fv) + ":" + fvValue(fv) + "|"; + } + if (!info.empty()) + { + info.pop_back(); + } + sai_object_id_t queueId = SAI_NULL_OBJECT_ID; sai_deserialize_object_id(queueIdStr, queueId); - if (!startWdActionOnQueue(event, queueId)) + if (!startWdActionOnQueue(event, queueId, info)) { SWSS_LOG_ERROR("Failed to start PFC watchdog %s event action on queue %s", event.c_str(), queueIdStr.c_str()); } @@ -934,7 +948,41 @@ void PfcWdSwOrch::doTask(SelectableTimer &timer) } template -bool PfcWdSwOrch::startWdActionOnQueue(const string &event, sai_object_id_t queueId) +void PfcWdSwOrch::report_pfc_storm( + sai_object_id_t id, const PfcWdQueueEntry *entry, const string &info) +{ + event_params_t params = { + { "ifname", entry->portAlias }, + { "queue_index", to_string(entry->index) }, + { "queue_id", to_string(id) }, + { "port_id", to_string(entry->portId) }}; + + if (info.empty()) + { + SWSS_LOG_NOTICE( + "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64, + entry->portAlias.c_str(), + entry->index, + id, + entry->portId); + } + else + { + SWSS_LOG_NOTICE( + "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ", additional info: %s.", + entry->portAlias.c_str(), + entry->index, + id, + entry->portId, + info.c_str()); + params["additional_info"] = info; + } + + event_publish(g_events_handle, "pfc-storm", ¶ms); +} + +template +bool PfcWdSwOrch::startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info) { auto entry = m_entryMap.find(queueId); if (entry == m_entryMap.end()) @@ -955,12 +1003,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, @@ -977,12 +1020,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, @@ -999,12 +1037,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, @@ -1097,5 +1130,5 @@ bool PfcWdSwOrch::bake() // Trick to keep member functions in a separate file template class PfcWdSwOrch; template class PfcWdSwOrch; -template class PfcWdSwOrch; +template class PfcWdSwOrch; template class PfcWdSwOrch; diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 63be1be036..935582289c 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -7,6 +7,7 @@ #include "producertable.h" #include "notificationconsumer.h" #include "timer.h" +#include "events.h" extern "C" { #include "sai.h" @@ -22,6 +23,12 @@ enum class PfcWdAction PFC_WD_ACTION_ALERT, }; +static const map packet_action_map = { + {"drop", SAI_PACKET_ACTION_DROP}, + {"forward", SAI_PACKET_ACTION_FORWARD}, + {"alert", SAI_PACKET_ACTION_FORWARD} +}; + template class PfcWdOrch: public Orch { @@ -53,14 +60,14 @@ class PfcWdOrch: public Orch void setPfcDlrPacketAction(PfcWdAction action) { PfcDlrPacketAction = action; } protected: - virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) = 0; + virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") = 0; string m_platform = ""; - private: shared_ptr m_countersDb = nullptr; shared_ptr
m_countersTable = nullptr; PfcWdAction PfcDlrPacketAction = PfcWdAction::PFC_WD_ACTION_UNKNOWN; + std::set m_pfcwd_ports; }; template @@ -89,7 +96,7 @@ class PfcWdSwOrch: public PfcWdOrch void doTask() override; protected: - bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) override; + bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") override; private: struct PfcWdQueueEntry @@ -121,6 +128,8 @@ class PfcWdSwOrch: public PfcWdOrch void enableBigRedSwitchMode(); void setBigRedSwitchMode(string value); + void report_pfc_storm(sai_object_id_t id, const PfcWdQueueEntry *, const string&); + map m_entryMap; map m_brsEntryMap; diff --git a/orchagent/port.h b/orchagent/port.h index a5e003584b..dc8241ce3a 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -2,7 +2,7 @@ #define SWSS_PORT_H extern "C" { -#include "sai.h" +#include } #include @@ -12,6 +12,8 @@ extern "C" { #include #include +#include + #define DEFAULT_PORT_VLAN_ID 1 /* * Default MTU is derived from SAI_PORT_ATTR_MTU (1514) @@ -73,6 +75,9 @@ struct SystemLagInfo class Port { +public: + typedef sai_bridge_port_fdb_learning_mode_t port_learn_mode_t; + public: enum Type { CPU, @@ -85,14 +90,22 @@ class Port SUBPORT, SYSTEM, UNKNOWN - } ; + }; - enum AutoNegMode { - AUTONEG_NOT_SET = -1, - AUTONEG_OFF = 0, - AUTONEG_ON = 1 + enum Role + { + Ext, // external + Int, // internal + Inb, // inband + Rec, // recirculation + Dpc // DPU Connect Port on SmartSwitch }; +public: + static constexpr std::size_t max_lanes = 8; // Max HW lanes + static constexpr std::size_t max_fec_modes = 3; // Max FEC modes (sync with SAI) + +public: Port() {}; Port(std::string alias, Type type) : m_alias(alias), m_type(type) {}; @@ -113,13 +126,13 @@ class Port } std::string m_alias; - Type m_type; - int m_index = 0; // PHY_PORT: index + Type m_type = UNKNOWN; + uint16_t m_index = 0; // PHY_PORT: index uint32_t m_mtu = DEFAULT_MTU; uint32_t m_speed = 0; // Mbps - std::string m_learn_mode = "hardware"; - AutoNegMode m_autoneg = Port::AutoNegMode::AUTONEG_NOT_SET; - int m_link_training = -1; // -1 means not set, 0 = disabled, 1 = enabled + port_learn_mode_t m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; + bool m_autoneg = false; + bool m_link_training = false; bool m_admin_state_up = false; bool m_init = false; bool m_l3_vni = false; @@ -152,11 +165,12 @@ class Port uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; uint32_t m_fdb_count = 0; + uint64_t m_flap_count = 0; uint32_t m_up_member_count = 0; uint32_t m_maximum_headroom = 0; - std::vector m_adv_speeds; + std::set m_adv_speeds; sai_port_interface_type_t m_interface_type = SAI_PORT_INTERFACE_TYPE_NONE; - std::vector m_adv_interface_types; + std::set m_adv_interface_types; bool m_mpls = false; /* * Following bit vector is used to lock @@ -181,8 +195,16 @@ class Port /* pre-emphasis */ std::map> m_preemphasis; - bool m_fec_cfg = false; - bool m_an_cfg = false; + /* Force initial parameter configuration flags */ + bool m_an_cfg = false; // Auto-negotiation (AN) + bool m_adv_speed_cfg = false; // Advertised speed + bool m_intf_cfg = false; // Interface type + bool m_adv_intf_cfg = false; // Advertised interface type + bool m_fec_cfg = false; // Forward Error Correction (FEC) + bool m_override_fec = false; // Enable Override FEC + bool m_pfc_asym_cfg = false; // Asymmetric Priority Flow Control (PFC) + bool m_lm_cfg = false; // Forwarding Database (FDB) Learning Mode (LM) + bool m_lt_cfg = false; // Link Training (LT) int m_cap_an = -1; /* Capability - AutoNeg, -1 means not set */ int m_cap_lt = -1; /* Capability - LinkTraining, -1 means not set */ diff --git a/orchagent/port/port_capabilities.cpp b/orchagent/port/port_capabilities.cpp new file mode 100644 index 0000000000..a55334d9f1 --- /dev/null +++ b/orchagent/port/port_capabilities.cpp @@ -0,0 +1,78 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +} + +#include + +#include +#include + +#include "port_capabilities.h" + +using namespace swss; + +// variables ---------------------------------------------------------------------------------------------------------- + +extern sai_object_id_t gSwitchId; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) noexcept +{ + const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); + + return meta != nullptr ? meta->attridname : "UNKNOWN"; +} + +// Port capabilities -------------------------------------------------------------------------------------------------- + +PortCapabilities::PortCapabilities() +{ + queryPortAttrCapabilities(portCapabilities.pfc, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL); + queryPortAttrCapabilities(portCapabilities.pfcTx, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_TX); + queryPortAttrCapabilities(portCapabilities.pfcRx, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX); + queryPortAttrCapabilities(portCapabilities.pfcMode, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE); +} + +bool PortCapabilities::isPortPfcAsymSupported() const +{ + auto supported = portCapabilities.pfcMode.attrCap.set_implemented; + supported = supported && portCapabilities.pfc.attrCap.set_implemented; + supported = supported && portCapabilities.pfcTx.attrCap.set_implemented; + supported = supported && portCapabilities.pfcRx.attrCap.set_implemented; + + return supported; +} + +sai_status_t PortCapabilities::queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + return sai_query_attribute_capability(gSwitchId, objType, attrId, &attrCap); +} + +template +void PortCapabilities::queryPortAttrCapabilities(T &obj, sai_port_attr_t attrId) +{ + SWSS_LOG_ENTER(); + + auto status = queryAttrCapabilitiesSai( + obj.attrCap, SAI_OBJECT_TYPE_PORT, attrId + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_PORT, attrId).c_str() + ); + return; + } + + if (!obj.attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_PORT, attrId).c_str() + ); + } +} diff --git a/orchagent/port/port_capabilities.h b/orchagent/port/port_capabilities.h new file mode 100644 index 0000000000..e937e7b943 --- /dev/null +++ b/orchagent/port/port_capabilities.h @@ -0,0 +1,41 @@ +#pragma once + +extern "C" { +#include +#include +#include +} + +class PortCapabilities final +{ +public: + PortCapabilities(); + ~PortCapabilities() = default; + + bool isPortPfcAsymSupported() const; + +private: + sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; + + template + void queryPortAttrCapabilities(T &obj, sai_port_attr_t attrId); + + // Port SAI capabilities + struct { + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcRx; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcTx; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_TX + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfc; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcMode; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE + } portCapabilities; +}; diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h new file mode 100644 index 0000000000..26c8a603f6 --- /dev/null +++ b/orchagent/port/portcnt.h @@ -0,0 +1,209 @@ +#pragma once + +extern "C" { +#include +#include +} + +#include +#include + +#include +#include +#include +#include + +#include "../port.h" + +class PortConfig final +{ +public: + PortConfig() = default; + ~PortConfig() = default; + + PortConfig(const std::string &key, const std::string &op) noexcept + { + this->key = key; + this->op = op; + } + + struct { + std::string value; + bool is_set = false; + } alias; // Port alias + + struct { + std::uint16_t value; + bool is_set = false; + } index; // Interface index + + struct { + std::set value; + bool is_set = false; + } lanes; // Lane information of a physical port + + struct { + std::uint32_t value; + bool is_set = false; + } speed; // Port speed + + struct { + bool value; + bool is_set = false; + } autoneg; // Port autoneg + + struct { + std::set value; + bool is_set = false; + } adv_speeds; // Port advertised speeds + + struct { + sai_port_interface_type_t value; + bool is_set = false; + } interface_type; // Port interface type + + struct { + std::set value; + bool is_set = false; + } adv_interface_types; // Port advertised interface types + + struct { + sai_port_fec_mode_t value; + bool is_set = false; + bool override_fec = false; + } fec; // Port FEC + + struct { + std::uint32_t value; + bool is_set = false; + } mtu; // Port MTU + + struct { + std::uint16_t value; + bool is_set = false; + } tpid; // Port TPID + + struct { + sai_port_priority_flow_control_mode_t value; + bool is_set = false; + } pfc_asym; // Port asymmetric PFC + + struct { + sai_bridge_port_fdb_learning_mode_t value; + bool is_set = false; + } learn_mode; // Port FDB learn mode + + struct { + bool value; + bool is_set = false; + } link_training; // Port link training + + struct { + + struct { + std::vector value; + bool is_set = false; + } preemphasis; // Port serdes pre-emphasis + + struct { + std::vector value; + bool is_set = false; + } idriver; // Port serdes idriver + + struct { + std::vector value; + bool is_set = false; + } ipredriver; // Port serdes ipredriver + + struct { + std::vector value; + bool is_set = false; + } pre1; // Port serdes pre1 + + struct { + std::vector value; + bool is_set = false; + } pre2; // Port serdes pre2 + + struct { + std::vector value; + bool is_set = false; + } pre3; // Port serdes pre3 + + struct { + std::vector value; + bool is_set = false; + } main; // Port serdes main + + struct { + std::vector value; + bool is_set = false; + } post1; // Port serdes post1 + + struct { + std::vector value; + bool is_set = false; + } post2; // Port serdes post2 + + struct { + std::vector value; + bool is_set = false; + } post3; // Port serdes post3 + + struct { + std::vector value; + bool is_set = false; + } attn; // Port serdes attn + + struct { + std::vector value; + bool is_set = false; + } ob_m2lp; // Port serdes ob_m2lp + + struct { + std::vector value; + bool is_set = false; + } ob_alev_out; // Port serdes ob_alev_out + + struct { + std::vector value; + bool is_set = false; + } obplev; // Port serdes obplev + + struct { + std::vector value; + bool is_set = false; + } obnlev; // Port serdes obnlev + + struct { + std::vector value; + bool is_set = false; + } regn_bfm1p; // Port serdes regn_bfm1p + + struct { + std::vector value; + bool is_set = false; + } regn_bfm1n; // Port serdes regn_bfm1n + + } serdes; // Port serdes + + struct { + swss::Port::Role value; + bool is_set = false; + } role; // Port role + + struct { + bool value; + bool is_set = false; + } admin_status; // Port admin status + + struct { + std::string value; + bool is_set = false; + } description; // Port description + + std::string key; + std::string op; + + std::unordered_map fieldValueMap; +}; diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp new file mode 100644 index 0000000000..419cb7ff84 --- /dev/null +++ b/orchagent/port/porthlpr.cpp @@ -0,0 +1,1039 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include + +#include +#include +#include + +#include + +#include "portschema.h" +#include "converter.h" +#include "tokenize.h" +#include "logger.h" + +#include "porthlpr.h" + +using namespace swss; + +// types -------------------------------------------------------------------------------------------------------------- + +typedef decltype(PortConfig::serdes) PortSerdes_t; + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::uint32_t minPortSpeed = 1; +static const std::uint32_t maxPortSpeed = 800000; + +static const std::uint32_t minPortMtu = 68; +static const std::uint32_t maxPortMtu = 9216; + +static const std::unordered_map portModeMap = +{ + { PORT_MODE_ON, true }, + { PORT_MODE_OFF, false } +}; + +static const std::unordered_map portStatusMap = +{ + { PORT_STATUS_UP, true }, + { PORT_STATUS_DOWN, false } +}; + +static const std::unordered_map portInterfaceTypeMap = +{ + { PORT_INTERFACE_TYPE_NONE, SAI_PORT_INTERFACE_TYPE_NONE }, + { PORT_INTERFACE_TYPE_CR, SAI_PORT_INTERFACE_TYPE_CR }, + { PORT_INTERFACE_TYPE_CR2, SAI_PORT_INTERFACE_TYPE_CR2 }, + { PORT_INTERFACE_TYPE_CR4, SAI_PORT_INTERFACE_TYPE_CR4 }, + { PORT_INTERFACE_TYPE_CR8, SAI_PORT_INTERFACE_TYPE_CR8 }, + { PORT_INTERFACE_TYPE_SR, SAI_PORT_INTERFACE_TYPE_SR }, + { PORT_INTERFACE_TYPE_SR2, SAI_PORT_INTERFACE_TYPE_SR2 }, + { PORT_INTERFACE_TYPE_SR4, SAI_PORT_INTERFACE_TYPE_SR4 }, + { PORT_INTERFACE_TYPE_SR8, SAI_PORT_INTERFACE_TYPE_SR8 }, + { PORT_INTERFACE_TYPE_LR, SAI_PORT_INTERFACE_TYPE_LR }, + { PORT_INTERFACE_TYPE_LR4, SAI_PORT_INTERFACE_TYPE_LR4 }, + { PORT_INTERFACE_TYPE_LR8, SAI_PORT_INTERFACE_TYPE_LR8 }, + { PORT_INTERFACE_TYPE_KR, SAI_PORT_INTERFACE_TYPE_KR }, + { PORT_INTERFACE_TYPE_KR4, SAI_PORT_INTERFACE_TYPE_KR4 }, + { PORT_INTERFACE_TYPE_KR8, SAI_PORT_INTERFACE_TYPE_KR8 }, + { PORT_INTERFACE_TYPE_CAUI, SAI_PORT_INTERFACE_TYPE_CAUI }, + { PORT_INTERFACE_TYPE_GMII, SAI_PORT_INTERFACE_TYPE_GMII }, + { PORT_INTERFACE_TYPE_SFI, SAI_PORT_INTERFACE_TYPE_SFI }, + { PORT_INTERFACE_TYPE_XLAUI, SAI_PORT_INTERFACE_TYPE_XLAUI }, + { PORT_INTERFACE_TYPE_KR2, SAI_PORT_INTERFACE_TYPE_KR2 }, + { PORT_INTERFACE_TYPE_CAUI4, SAI_PORT_INTERFACE_TYPE_CAUI4 }, + { PORT_INTERFACE_TYPE_XAUI, SAI_PORT_INTERFACE_TYPE_XAUI }, + { PORT_INTERFACE_TYPE_XFI, SAI_PORT_INTERFACE_TYPE_XFI }, + { PORT_INTERFACE_TYPE_XGMII, SAI_PORT_INTERFACE_TYPE_XGMII } +}; + +static const std::unordered_map portFecMap = +{ + { PORT_FEC_NONE, SAI_PORT_FEC_MODE_NONE }, + { PORT_FEC_RS, SAI_PORT_FEC_MODE_RS }, + { PORT_FEC_FC, SAI_PORT_FEC_MODE_FC }, + { PORT_FEC_AUTO, SAI_PORT_FEC_MODE_NONE } +}; + +static const std::unordered_map portFecRevMap = +{ + { SAI_PORT_FEC_MODE_NONE, PORT_FEC_NONE }, + { SAI_PORT_FEC_MODE_RS, PORT_FEC_RS }, + { SAI_PORT_FEC_MODE_FC, PORT_FEC_FC } +}; + +static const std::unordered_map portFecOverrideMap = +{ + { PORT_FEC_NONE, true }, + { PORT_FEC_RS, true }, + { PORT_FEC_FC, true }, + { PORT_FEC_AUTO, false } +}; + +static const std::unordered_map portPfcAsymMap = +{ + { PORT_MODE_ON, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, + { PORT_MODE_OFF, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED } +}; + +static const std::unordered_map portLearnModeMap = +{ + { PORT_LEARN_MODE_DROP, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DROP }, + { PORT_LEARN_MODE_DISABLE, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE }, + { PORT_LEARN_MODE_HARDWARE, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW }, + { PORT_LEARN_MODE_CPU_TRAP, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_TRAP }, + { PORT_LEARN_MODE_CPU_LOG, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_LOG }, + { PORT_LEARN_MODE_NOTIFICATION, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_FDB_NOTIFICATION } +}; + +static const std::unordered_map portRoleMap = +{ + { PORT_ROLE_EXT, Port::Role::Ext }, + { PORT_ROLE_INT, Port::Role::Int }, + { PORT_ROLE_INB, Port::Role::Inb }, + { PORT_ROLE_REC, Port::Role::Rec }, + { PORT_ROLE_DPC, Port::Role::Dpc } +}; + +// functions ---------------------------------------------------------------------------------------------------------- + +template +static inline T toUInt(const std::string &hexStr) +{ + if (hexStr.substr(0, 2) != "0x") + { + throw std::invalid_argument("Invalid argument: '" + hexStr + "'"); + } + + return to_uint(hexStr); +} + +static inline std::uint16_t toUInt16(const std::string &hexStr) +{ + return toUInt(hexStr); +} + +static inline std::uint32_t toUInt32(const std::string &hexStr) +{ + return toUInt(hexStr); +} + +// Port helper -------------------------------------------------------------------------------------------------------- + +bool PortHelper::fecToStr(std::string &str, sai_port_fec_mode_t value) const +{ + const auto &cit = portFecRevMap.find(value); + if (cit == portFecRevMap.cend()) + { + return false; + } + + str = cit->second; + + return true; +} + +bool PortHelper::fecToSaiFecMode(const std::string &str, sai_port_fec_mode_t &value) const +{ + const auto &cit = portFecMap.find(str); + if (cit == portFecMap.cend()) + { + return false; + } + + value = cit->second; + + return true; +} + +bool PortHelper::fecIsOverrideRequired(const std::string &str) const +{ + const auto &cit = portFecMap.find(str); + if (cit == portFecMap.cend()) + { + return false; + } + + return cit->second; + +} +std::string PortHelper::getFieldValueStr(const PortConfig &port, const std::string &field) const +{ + static std::string str; + + const auto &cit = port.fieldValueMap.find(field); + if (cit != port.fieldValueMap.cend()) + { + return cit->second; + } + + return str; +} + +std::string PortHelper::getAutonegStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_AUTONEG); +} + +std::string PortHelper::getPortInterfaceTypeStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_INTERFACE_TYPE); +} + +std::string PortHelper::getAdvInterfaceTypesStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_ADV_INTERFACE_TYPES); +} + +std::string PortHelper::getFecStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_FEC); +} + +std::string PortHelper::getPfcAsymStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_PFC_ASYM); +} + +std::string PortHelper::getLearnModeStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_LEARN_MODE); +} + +std::string PortHelper::getLinkTrainingStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_LINK_TRAINING); +} + +std::string PortHelper::getAdminStatusStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_ADMIN_STATUS); +} + +bool PortHelper::parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + port.alias.value = value; + port.alias.is_set = true; + + return true; +} + +bool PortHelper::parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.index.value = to_uint(value); + port.index.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + const auto &laneList = tokenize(value, ','); + + try + { + for (const auto &cit : laneList) + { + port.lanes.value.insert(to_uint(cit)); + } + + port.lanes.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (port.lanes.value.size() != laneList.size()) + { + SWSS_LOG_WARN("Duplicate lanes in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortSpeed(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.speed.value = to_uint(value); + port.speed.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (!((minPortSpeed <= port.speed.value) && (port.speed.value <= maxPortSpeed))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= speed <= %u", + field.c_str(), value.c_str(), minPortSpeed, maxPortSpeed + ); + return false; + } + + return true; +} + +bool PortHelper::parsePortAutoneg(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portModeMap.find(value); + if (cit == portModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.autoneg.value = cit->second; + port.autoneg.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + if (nValue == PORT_ADV_ALL) + { + port.adv_speeds.is_set = true; + return true; + } + + const auto &speedList = tokenize(nValue, ','); + + try + { + for (const auto &cit : speedList) + { + auto speed = to_uint(cit); + + if (!((minPortSpeed <= speed) && (speed <= maxPortSpeed))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= speed <= %u", + field.c_str(), value.c_str(), minPortSpeed, maxPortSpeed + ); + return false; + } + + port.adv_speeds.value.insert(speed); + } + + port.adv_speeds.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (port.adv_speeds.value.size() != speedList.size()) + { + SWSS_LOG_WARN("Duplicate speeds in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortInterfaceType(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + const auto &cit = portInterfaceTypeMap.find(nValue); + if (cit == portInterfaceTypeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.interface_type.value = cit->second; + port.interface_type.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdvInterfaceTypes(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + if (nValue == PORT_ADV_ALL) + { + port.adv_interface_types.is_set = true; + return true; + } + + const auto &intfTypeList = tokenize(nValue, ','); + + for (const auto &cit1 : intfTypeList) + { + const auto &cit2 = portInterfaceTypeMap.find(cit1); + if (cit2 == portInterfaceTypeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.adv_interface_types.value.insert(cit2->second); + } + + port.adv_interface_types.is_set = true; + + if (port.adv_interface_types.value.size() != intfTypeList.size()) + { + SWSS_LOG_WARN("Duplicate interface types in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortFec(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portFecMap.find(value); + if (cit == portFecMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + const auto &override_cit = portFecOverrideMap.find(value); + if (override_cit == portFecOverrideMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s) in override map", field.c_str(), value.c_str()); + return false; + } + + port.fec.value = cit->second; + port.fec.is_set = true; + port.fec.override_fec =override_cit->second; + + return true; +} + +bool PortHelper::parsePortMtu(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.mtu.value = to_uint(value); + port.mtu.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (!((minPortMtu <= port.mtu.value) && (port.mtu.value <= maxPortMtu))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= mtu <= %u", + field.c_str(), value.c_str(), minPortMtu, maxPortMtu + ); + return false; + } + + return true; +} + +bool PortHelper::parsePortTpid(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.tpid.value = toUInt16(value); + port.tpid.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortPfcAsym(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portPfcAsymMap.find(value); + if (cit == portPfcAsymMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.pfc_asym.value = cit->second; + port.pfc_asym.is_set = true; + + return true; +} + +bool PortHelper::parsePortLearnMode(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portLearnModeMap.find(value); + if (cit == portLearnModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.learn_mode.value = cit->second; + port.learn_mode.is_set = true; + + return true; +} + +bool PortHelper::parsePortLinkTraining(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portModeMap.find(value); + if (cit == portModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.link_training.value = cit->second; + port.link_training.is_set = true; + + return true; +} + +template +bool PortHelper::parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + const auto &serdesList = tokenize(value, ','); + + try + { + for (const auto &cit : serdesList) + { + serdes.value.push_back(toUInt32(cit)); + } + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + serdes.is_set = true; + + return true; +} + +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::preemphasis) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::idriver) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ipredriver) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre1) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre2) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre3) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::main) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post1) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post2) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post3) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::attn) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ob_m2lp) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ob_alev_out) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obplev) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obnlev) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1p) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1n) &serdes, const std::string &field, const std::string &value) const; + + + +bool PortHelper::parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portRoleMap.find(value); + if (cit == portRoleMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.role.value = cit->second; + port.role.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdminStatus(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portStatusMap.find(value); + if (cit == portStatusMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.admin_status.value = cit->second; + port.admin_status.is_set = true; + + return true; +} + +bool PortHelper::parsePortDescription(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + port.description.value = value; + port.description.is_set = true; + + return true; +} + +bool PortHelper::parsePortConfig(PortConfig &port) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : port.fieldValueMap) + { + const auto &field = cit.first; + const auto &value = cit.second; + + if (field == PORT_ALIAS) + { + if (!this->parsePortAlias(port, field, value)) + { + return false; + } + } + else if (field == PORT_INDEX) + { + if (!this->parsePortIndex(port, field, value)) + { + return false; + } + } + else if (field == PORT_LANES) + { + if (!this->parsePortLanes(port, field, value)) + { + return false; + } + } + else if (field == PORT_SPEED) + { + if (!this->parsePortSpeed(port, field, value)) + { + return false; + } + } + else if (field == PORT_AUTONEG) + { + if (!this->parsePortAutoneg(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADV_SPEEDS) + { + if (!this->parsePortAdvSpeeds(port, field, value)) + { + return false; + } + } + else if (field == PORT_INTERFACE_TYPE) + { + if (!this->parsePortInterfaceType(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADV_INTERFACE_TYPES) + { + if (!this->parsePortAdvInterfaceTypes(port, field, value)) + { + return false; + } + } + else if (field == PORT_FEC) + { + if (!this->parsePortFec(port, field, value)) + { + return false; + } + } + else if (field == PORT_MTU) + { + if (!this->parsePortMtu(port, field, value)) + { + return false; + } + } + else if (field == PORT_TPID) + { + if (!this->parsePortTpid(port, field, value)) + { + return false; + } + } + else if (field == PORT_PFC_ASYM) + { + if (!this->parsePortPfcAsym(port, field, value)) + { + return false; + } + } + else if (field == PORT_LEARN_MODE) + { + if (!this->parsePortLearnMode(port, field, value)) + { + return false; + } + } + else if (field == PORT_LINK_TRAINING) + { + if (!this->parsePortLinkTraining(port, field, value)) + { + return false; + } + } + else if (field == PORT_PREEMPHASIS) + { + if (!this->parsePortSerdes(port.serdes.preemphasis, field, value)) + { + return false; + } + } + else if (field == PORT_IDRIVER) + { + if (!this->parsePortSerdes(port.serdes.idriver, field, value)) + { + return false; + } + } + else if (field == PORT_IPREDRIVER) + { + if (!this->parsePortSerdes(port.serdes.ipredriver, field, value)) + { + return false; + } + } + else if (field == PORT_PRE1) + { + if (!this->parsePortSerdes(port.serdes.pre1, field, value)) + { + return false; + } + } + else if (field == PORT_PRE2) + { + if (!this->parsePortSerdes(port.serdes.pre2, field, value)) + { + return false; + } + } + else if (field == PORT_PRE3) + { + if (!this->parsePortSerdes(port.serdes.pre3, field, value)) + { + return false; + } + } + else if (field == PORT_MAIN) + { + if (!this->parsePortSerdes(port.serdes.main, field, value)) + { + return false; + } + } + else if (field == PORT_POST1) + { + if (!this->parsePortSerdes(port.serdes.post1, field, value)) + { + return false; + } + } + else if (field == PORT_POST2) + { + if (!this->parsePortSerdes(port.serdes.post2, field, value)) + { + return false; + } + } + else if (field == PORT_POST3) + { + if (!this->parsePortSerdes(port.serdes.post3, field, value)) + { + return false; + } + } + else if (field == PORT_ATTN) + { + if (!this->parsePortSerdes(port.serdes.attn, field, value)) + { + return false; + } + } + else if (field == PORT_OB_M2LP) + { + if (!this->parsePortSerdes(port.serdes.ob_m2lp, field, value)) + { + return false; + } + } + else if (field == PORT_OB_ALEV_OUT) + { + if (!this->parsePortSerdes(port.serdes.ob_alev_out, field, value)) + { + return false; + } + } + else if (field == PORT_OBPLEV) + { + if (!this->parsePortSerdes(port.serdes.obplev, field, value)) + { + return false; + } + } + else if (field == PORT_OBNLEV) + { + if (!this->parsePortSerdes(port.serdes.obnlev, field, value)) + { + return false; + } + } + else if (field == PORT_REGN_BFM1P) + { + if (!this->parsePortSerdes(port.serdes.regn_bfm1p, field, value)) + { + return false; + } + } + else if (field == PORT_REGN_BFM1N) + { + if (!this->parsePortSerdes(port.serdes.regn_bfm1n, field, value)) + { + return false; + } + } + else if (field == PORT_ROLE) + { + if (!this->parsePortRole(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADMIN_STATUS) + { + if (!this->parsePortAdminStatus(port, field, value)) + { + return false; + } + } + else if (field == PORT_DESCRIPTION) + { + if (!this->parsePortDescription(port, field, value)) + { + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); + } + } + + return true; +} + +bool PortHelper::validatePortConfig(PortConfig &port) const +{ + SWSS_LOG_ENTER(); + + if (!port.lanes.is_set) + { + SWSS_LOG_WARN("Validation error: missing mandatory field(%s)", PORT_LANES); + return false; + } + + if (!port.speed.is_set) + { + SWSS_LOG_WARN("Validation error: missing mandatory field(%s)", PORT_SPEED); + return false; + } + + if (!port.admin_status.is_set) + { + SWSS_LOG_INFO( + "Missing non mandatory field(%s): setting default value(%s)", + PORT_ADMIN_STATUS, + PORT_STATUS_DOWN + ); + + port.admin_status.value = false; + port.admin_status.is_set = true; + + port.fieldValueMap[PORT_ADMIN_STATUS] = PORT_STATUS_DOWN; + } + + return true; +} diff --git a/orchagent/port/porthlpr.h b/orchagent/port/porthlpr.h new file mode 100644 index 0000000000..6729a83a4d --- /dev/null +++ b/orchagent/port/porthlpr.h @@ -0,0 +1,56 @@ +#pragma once + +#include + +#include +#include + +#include "portcnt.h" + +class PortHelper final +{ +public: + PortHelper() = default; + ~PortHelper() = default; + +public: + bool fecToStr(std::string &str, sai_port_fec_mode_t value) const; + bool fecToSaiFecMode(const std::string &str, sai_port_fec_mode_t &value) const; + bool fecIsOverrideRequired(const std::string &str) const; + + std::string getAutonegStr(const PortConfig &port) const; + std::string getPortInterfaceTypeStr(const PortConfig &port) const; + std::string getAdvInterfaceTypesStr(const PortConfig &port) const; + std::string getFecStr(const PortConfig &port) const; + std::string getPfcAsymStr(const PortConfig &port) const; + std::string getLearnModeStr(const PortConfig &port) const; + std::string getLinkTrainingStr(const PortConfig &port) const; + std::string getAdminStatusStr(const PortConfig &port) const; + + bool parsePortConfig(PortConfig &port) const; + bool validatePortConfig(PortConfig &port) const; + +private: + std::string getFieldValueStr(const PortConfig &port, const std::string &field) const; + + template + bool parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const; + + bool parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortSpeed(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAutoneg(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortInterfaceType(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdvInterfaceTypes(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortFec(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortMtu(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortTpid(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortPfcAsym(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLearnMode(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLinkTraining(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdminStatus(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortDescription(PortConfig &port, const std::string &field, const std::string &value) const; +}; diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h new file mode 100644 index 0000000000..5c4ad0d542 --- /dev/null +++ b/orchagent/port/portschema.h @@ -0,0 +1,89 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PORT_MODE_OFF "off" +#define PORT_MODE_ON "on" + +#define PORT_STATUS_DOWN "down" +#define PORT_STATUS_UP "up" + +#define PORT_ADV_ALL "all" + +#define PORT_INTERFACE_TYPE_NONE "none" +#define PORT_INTERFACE_TYPE_CR "cr" +#define PORT_INTERFACE_TYPE_CR2 "cr2" +#define PORT_INTERFACE_TYPE_CR4 "cr4" +#define PORT_INTERFACE_TYPE_CR8 "cr8" +#define PORT_INTERFACE_TYPE_SR "sr" +#define PORT_INTERFACE_TYPE_SR2 "sr2" +#define PORT_INTERFACE_TYPE_SR4 "sr4" +#define PORT_INTERFACE_TYPE_SR8 "sr8" +#define PORT_INTERFACE_TYPE_LR "lr" +#define PORT_INTERFACE_TYPE_LR4 "lr4" +#define PORT_INTERFACE_TYPE_LR8 "lr8" +#define PORT_INTERFACE_TYPE_KR "kr" +#define PORT_INTERFACE_TYPE_KR4 "kr4" +#define PORT_INTERFACE_TYPE_KR8 "kr8" +#define PORT_INTERFACE_TYPE_CAUI "caui" +#define PORT_INTERFACE_TYPE_GMII "gmii" +#define PORT_INTERFACE_TYPE_SFI "sfi" +#define PORT_INTERFACE_TYPE_XLAUI "xlaui" +#define PORT_INTERFACE_TYPE_KR2 "kr2" +#define PORT_INTERFACE_TYPE_CAUI4 "caui4" +#define PORT_INTERFACE_TYPE_XAUI "xaui" +#define PORT_INTERFACE_TYPE_XFI "xfi" +#define PORT_INTERFACE_TYPE_XGMII "xgmii" + +#define PORT_FEC_NONE "none" +#define PORT_FEC_RS "rs" +#define PORT_FEC_FC "fc" +#define PORT_FEC_AUTO "auto" + +#define PORT_LEARN_MODE_DROP "drop" +#define PORT_LEARN_MODE_DISABLE "disable" +#define PORT_LEARN_MODE_HARDWARE "hardware" +#define PORT_LEARN_MODE_CPU_TRAP "cpu_trap" +#define PORT_LEARN_MODE_CPU_LOG "cpu_log" +#define PORT_LEARN_MODE_NOTIFICATION "notification" + +#define PORT_ROLE_EXT "Ext" +#define PORT_ROLE_INT "Int" +#define PORT_ROLE_INB "Inb" +#define PORT_ROLE_REC "Rec" +#define PORT_ROLE_DPC "Dpc" + +#define PORT_ALIAS "alias" +#define PORT_INDEX "index" +#define PORT_LANES "lanes" +#define PORT_SPEED "speed" +#define PORT_AUTONEG "autoneg" +#define PORT_ADV_SPEEDS "adv_speeds" +#define PORT_INTERFACE_TYPE "interface_type" +#define PORT_ADV_INTERFACE_TYPES "adv_interface_types" +#define PORT_FEC "fec" +#define PORT_MTU "mtu" +#define PORT_TPID "tpid" +#define PORT_PFC_ASYM "pfc_asym" +#define PORT_LEARN_MODE "learn_mode" +#define PORT_LINK_TRAINING "link_training" +#define PORT_PREEMPHASIS "preemphasis" +#define PORT_IDRIVER "idriver" +#define PORT_IPREDRIVER "ipredriver" +#define PORT_PRE1 "pre1" +#define PORT_PRE2 "pre2" +#define PORT_PRE3 "pre3" +#define PORT_MAIN "main" +#define PORT_POST1 "post1" +#define PORT_POST2 "post2" +#define PORT_POST3 "post3" +#define PORT_ATTN "attn" +#define PORT_OB_M2LP "ob_m2lp" +#define PORT_OB_ALEV_OUT "ob_alev_out" +#define PORT_OBPLEV "obplev" +#define PORT_OBNLEV "obnlev" +#define PORT_REGN_BFM1P "regn_bfm1p" +#define PORT_REGN_BFM1N "regn_bfm1n" +#define PORT_ROLE "role" +#define PORT_ADMIN_STATUS "admin_status" +#define PORT_DESCRIPTION "description" diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 14e5108660..50c23d4aec 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -6,6 +6,7 @@ #include "vxlanorch.h" #include "directory.h" #include "subintf.h" +#include "notifications.h" #include #include @@ -16,7 +17,6 @@ #include #include #include -#include #include #include "net/if.h" @@ -30,6 +30,7 @@ #include "countercheckorch.h" #include "notifier.h" #include "fdborch.h" +#include "switchorch.h" #include "stringutility.h" #include "subscriberstatetable.h" @@ -49,17 +50,22 @@ extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; extern BufferOrch *gBufferOrch; extern FdbOrch *gFdbOrch; +extern SwitchOrch *gSwitchOrch; extern Directory gDirectory; extern sai_system_port_api_t *sai_system_port_api; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern string gMyHostName; extern string gMyAsicName; +extern event_handle_t g_events_handle; + +// defines ------------------------------------------------------------------------------------------------------------ #define DEFAULT_SYSTEM_PORT_MTU 9100 #define VLAN_PREFIX "Vlan" #define DEFAULT_VLAN_ID 1 #define MAX_VALID_VLAN_ID 4094 +#define DEFAULT_HOSTIF_TX_QUEUE 7 #define PORT_SPEED_LIST_DEFAULT_SIZE 16 #define PORT_STATE_POLLING_SEC 5 @@ -71,26 +77,17 @@ extern string gMyAsicName; #define PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS "10000" #define PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS "1000" +// types -------------------------------------------------------------------------------------------------------------- -static map fec_mode_map = +struct PortAttrValue { - { "none", SAI_PORT_FEC_MODE_NONE }, - { "rs", SAI_PORT_FEC_MODE_RS }, - { "fc", SAI_PORT_FEC_MODE_FC } + std::vector lanes; }; -static map fec_mode_reverse_map = -{ - { SAI_PORT_FEC_MODE_NONE, "none" }, - { SAI_PORT_FEC_MODE_RS, "rs" }, - { SAI_PORT_FEC_MODE_FC, "fc" } -}; +typedef PortAttrValue PortAttrValue_t; +typedef std::map> PortSerdesAttrMap_t; -static map pfc_asym_map = -{ - { "on", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, - { "off", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED } -}; +// constants ---------------------------------------------------------------------------------------------------------- static map learn_mode_map = { @@ -121,12 +118,6 @@ static map autoneg_mode_map = { "off", 0 } }; -static map link_training_mode_map = -{ - { "on", 1 }, - { "off", 0 } -}; - static map link_training_failure_map = { { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_NO_ERROR, "none" }, @@ -159,52 +150,6 @@ static map interface_type_map = { "kr8", SAI_PORT_INTERFACE_TYPE_KR8 } }; -// Interface type map used for auto negotiation -static map interface_type_map_for_an = -{ - { "none", SAI_PORT_INTERFACE_TYPE_NONE }, - { "cr", SAI_PORT_INTERFACE_TYPE_CR }, - { "cr2", SAI_PORT_INTERFACE_TYPE_CR2 }, - { "cr4", SAI_PORT_INTERFACE_TYPE_CR4 }, - { "cr8", SAI_PORT_INTERFACE_TYPE_CR8 }, - { "sr", SAI_PORT_INTERFACE_TYPE_SR }, - { "sr2", SAI_PORT_INTERFACE_TYPE_SR2 }, - { "sr4", SAI_PORT_INTERFACE_TYPE_SR4 }, - { "sr8", SAI_PORT_INTERFACE_TYPE_SR8 }, - { "lr", SAI_PORT_INTERFACE_TYPE_LR }, - { "lr4", SAI_PORT_INTERFACE_TYPE_LR4 }, - { "lr8", SAI_PORT_INTERFACE_TYPE_LR8 }, - { "kr", SAI_PORT_INTERFACE_TYPE_KR }, - { "kr4", SAI_PORT_INTERFACE_TYPE_KR4 }, - { "kr8", SAI_PORT_INTERFACE_TYPE_KR8 }, - { "caui", SAI_PORT_INTERFACE_TYPE_CAUI }, - { "gmii", SAI_PORT_INTERFACE_TYPE_GMII }, - { "sfi", SAI_PORT_INTERFACE_TYPE_SFI }, - { "xlaui", SAI_PORT_INTERFACE_TYPE_XLAUI }, - { "kr2", SAI_PORT_INTERFACE_TYPE_KR2 }, - { "caui4", SAI_PORT_INTERFACE_TYPE_CAUI4 }, - { "xaui", SAI_PORT_INTERFACE_TYPE_XAUI }, - { "xfi", SAI_PORT_INTERFACE_TYPE_XFI }, - { "xgmii", SAI_PORT_INTERFACE_TYPE_XGMII } -}; - -static const std::string& getValidInterfaceTypes() -{ - static std::string validInterfaceTypes; - if (validInterfaceTypes.empty()) - { - std::ostringstream oss; - for (auto &iter : interface_type_map_for_an) - { - oss << iter.first << " "; - } - validInterfaceTypes = oss.str(); - boost::to_upper(validInterfaceTypes); - } - - return validInterfaceTypes; -} - const vector port_stat_ids = { SAI_PORT_STAT_IF_IN_OCTETS, @@ -271,7 +216,23 @@ const vector port_stat_ids = SAI_PORT_STAT_IP_IN_RECEIVES, SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES, SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES, - SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS + SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15 }; const vector gbport_stat_ids = @@ -332,11 +293,106 @@ static char* hostif_vlan_tag[] = { [SAI_HOSTIF_VLAN_TAG_ORIGINAL] = "SAI_HOSTIF_VLAN_TAG_ORIGINAL" }; +// functions ---------------------------------------------------------------------------------------------------------- + static bool isValidPortTypeForLagMember(const Port& port) { return (port.m_type == Port::Type::PHY || port.m_type == Port::Type::SYSTEM); } +static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) +{ + if (port.serdes.preemphasis.is_set) + { + map[SAI_PORT_SERDES_ATTR_PREEMPHASIS] = port.serdes.preemphasis.value; + } + + if (port.serdes.idriver.is_set) + { + map[SAI_PORT_SERDES_ATTR_IDRIVER] = port.serdes.idriver.value; + } + + if (port.serdes.ipredriver.is_set) + { + map[SAI_PORT_SERDES_ATTR_IPREDRIVER] = port.serdes.ipredriver.value; + } + + if (port.serdes.pre1.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE1] = port.serdes.pre1.value; + } + + if (port.serdes.pre2.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE2] = port.serdes.pre2.value; + } + + if (port.serdes.pre3.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE3] = port.serdes.pre3.value; + } + + if (port.serdes.main.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_MAIN] = port.serdes.main.value; + } + + if (port.serdes.post1.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST1] = port.serdes.post1.value; + } + + if (port.serdes.post2.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST2] = port.serdes.post2.value; + } + + if (port.serdes.post3.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST3] = port.serdes.post3.value; + } + + if (port.serdes.attn.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_ATTN] = port.serdes.attn.value; + } + + if (port.serdes.ob_m2lp.is_set) + { + + map[SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO] = port.serdes.ob_m2lp.value; + } + + if (port.serdes.ob_alev_out.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE] = port.serdes.ob_alev_out.value; + } + + if (port.serdes.obplev.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE] = port.serdes.obplev.value; + } + + if (port.serdes.obnlev.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE] = port.serdes.obnlev.value; + } + + if (port.serdes.regn_bfm1p.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG] = port.serdes.regn_bfm1p.value; + } + + if (port.serdes.regn_bfm1n.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG] = port.serdes.regn_bfm1n.value; + } + + +} + +// Port OA ------------------------------------------------------------------------------------------------------------ + /* * Initialize PortsOrch * 0) If Gearbox is enabled, then initialize the external PHYs as defined in @@ -368,6 +424,8 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(new DBConnector("COUNTERS_DB", 0)); m_counterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PORT_NAME_MAP)); + m_counterSysPortTable = unique_ptr
( + new Table(m_counter_db.get(), COUNTERS_SYSTEM_PORT_NAME_MAP)); m_counterLagTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_LAG_NAME_MAP)); FieldValueTuple tuple("", ""); vector defaultLagFv; @@ -376,12 +434,14 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(new Table(db, APP_PORT_TABLE_NAME)); + m_sendToIngressPortTable = unique_ptr
(new Table(db, APP_SEND_TO_INGRESS_PORT_TABLE_NAME)); /* Initialize gearbox */ m_gearboxTable = unique_ptr
(new Table(db, "_GEARBOX_TABLE")); /* Initialize queue tables */ m_queueTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_NAME_MAP)); + m_voqTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_VOQ_NAME_MAP)); m_queuePortTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_PORT_MAP)); m_queueIndexTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_INDEX_MAP)); m_queueTypeTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_TYPE_MAP)); @@ -444,100 +504,11 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorget_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get CPU port, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - m_cpuPort = Port("CPU", Port::CPU); - m_cpuPort.m_port_id = attr.value.oid; - m_portList[m_cpuPort.m_alias] = m_cpuPort; - m_port_ref_count[m_cpuPort.m_alias] = 0; - - /* Get port number */ - attr.id = SAI_SWITCH_ATTR_PORT_NUMBER; - - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get port number, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - m_portCount = attr.value.u32; - SWSS_LOG_NOTICE("Get %d ports", m_portCount); - - /* Get port list */ - vector port_list; - port_list.resize(m_portCount); - - attr.id = SAI_SWITCH_ATTR_PORT_LIST; - attr.value.objlist.count = (uint32_t)port_list.size(); - attr.value.objlist.list = port_list.data(); - - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get port list, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - /* Get port hardware lane info */ - for (i = 0; i < m_portCount; i++) - { - sai_uint32_t lanes[8] = { 0,0,0,0,0,0,0,0 }; - attr.id = SAI_PORT_ATTR_HW_LANE_LIST; - attr.value.u32list.count = 8; - attr.value.u32list.list = lanes; - - status = sai_port_api->get_port_attribute(port_list[i], 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get hardware lane list pid:%" PRIx64, port_list[i]); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - set tmp_lane_set; - for (j = 0; j < attr.value.u32list.count; j++) - { - tmp_lane_set.insert(attr.value.u32list.list[j]); - } - - string tmp_lane_str = ""; - for (auto s : tmp_lane_set) - { - tmp_lane_str += to_string(s) + " "; - } - tmp_lane_str = tmp_lane_str.substr(0, tmp_lane_str.size()-1); + this->initializeCpuPort(); - SWSS_LOG_NOTICE("Get port with lanes pid:%" PRIx64 " lanes:%s", port_list[i], tmp_lane_str.c_str()); - m_portListLaneMap[tmp_lane_set] = port_list[i]; - } + /* Get ports */ + this->initializePorts(); /* Get the flood control types and check if combined mode is supported */ vector supported_flood_control_types(max_flood_control_types, 0); @@ -578,39 +549,125 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector attrs; - attr.id = SAI_SWITCH_ATTR_DEFAULT_1Q_BRIDGE_ID; - attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_DEFAULT_VLAN_ID; - attrs.push_back(attr); + // Query whether SAI supports Host Tx Signal and Host Tx Notification - status = sai_switch_api->get_switch_attribute(gSwitchId, (uint32_t)attrs.size(), attrs.data()); - if (status != SAI_STATUS_SUCCESS) + sai_attr_capability_t capability; + + bool saiHwTxSignalSupported = false; + bool saiTxReadyNotifySupported = false; + + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE, + &capability) == SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get default 1Q bridge and/or default VLAN, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) + if (capability.create_implemented == true) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_DEBUG("SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE is true"); + saiHwTxSignalSupported = true; + } + } + + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY, + &capability) == SAI_STATUS_SUCCESS) + { + if (capability.create_implemented == true) + { + SWSS_LOG_DEBUG("SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY is true"); + saiTxReadyNotifySupported = true; + } + } + + if (saiHwTxSignalSupported && saiTxReadyNotifySupported) + { + SWSS_LOG_DEBUG("m_cmisModuleAsicSyncSupported is true"); + m_cmisModuleAsicSyncSupported = true; + + // set HOST_TX_READY callback function attribute to SAI, only if the feature is enabled + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY; + attr.value.ptr = (void *)on_port_host_tx_ready; + + if (sai_switch_api->set_switch_attribute(gSwitchId, &attr) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("PortsOrch failed to set SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY attribute"); } + + Orch::addExecutor(new Consumer(new SubscriberStateTable(stateDb, STATE_TRANSCEIVER_INFO_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, 0), this, STATE_TRANSCEIVER_INFO_TABLE_NAME)); } - m_default1QBridge = attrs[0].value.oid; - m_defaultVlan = attrs[1].value.oid; + if (gMySwitchType != "dpu") + { + sai_attr_capability_t attr_cap; + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE, + &attr_cap) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Unable to query autoneg fec mode override"); + } + else if (attr_cap.set_implemented && attr_cap.create_implemented) + { + fec_override_sup = true; + } + + sai_attr_capability_t oper_fec_cap; + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_OPER_PORT_FEC_MODE, &oper_fec_cap) + != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Unable to query capability support for oper fec mode"); + } + else if (oper_fec_cap.get_implemented) + { + oper_fec_sup = true; + } + + /* Get default 1Q bridge and default VLAN */ + sai_status_t status; + sai_attribute_t attr; + vector attrs; + attr.id = SAI_SWITCH_ATTR_DEFAULT_1Q_BRIDGE_ID; + attrs.push_back(attr); + attr.id = SAI_SWITCH_ATTR_DEFAULT_VLAN_ID; + attrs.push_back(attr); + + status = sai_switch_api->get_switch_attribute(gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get default 1Q bridge and/or default VLAN, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + + m_default1QBridge = attrs[0].value.oid; + m_defaultVlan = attrs[1].value.oid; + } /* Get System ports */ getSystemPorts(); - removeDefaultVlanMembers(); - removeDefaultBridgePorts(); + if (gMySwitchType != "dpu") + { + removeDefaultVlanMembers(); + removeDefaultBridgePorts(); + } /* Add port oper status notification support */ - DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); - m_portStatusNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + m_notificationsDb = make_shared("ASIC_DB", 0); + m_portStatusNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); auto portStatusNotificatier = new Notifier(m_portStatusNotificationConsumer, this, "PORT_STATUS_NOTIFICATIONS"); Orch::addExecutor(portStatusNotificatier); + if (m_cmisModuleAsicSyncSupported) + { + m_portHostTxReadyNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); + auto portHostTxReadyNotificatier = new Notifier(m_portHostTxReadyNotificationConsumer, this, "PORT_HOST_TX_NOTIFICATIONS"); + Orch::addExecutor(portHostTxReadyNotificatier); + } + if (gMySwitchType == "voq") { string tableName; @@ -631,78 +688,391 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector vlan_member_list(m_portCount + m_systemPortCount); + SWSS_LOG_ENTER(); sai_attribute_t attr; - attr.id = SAI_VLAN_ATTR_MEMBER_LIST; - attr.value.objlist.count = (uint32_t)vlan_member_list.size(); - attr.value.objlist.list = vlan_member_list.data(); + attr.id = SAI_SWITCH_ATTR_CPU_PORT; - sai_status_t status = sai_vlan_api->get_vlan_attribute(m_defaultVlan, 1, &attr); + auto status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get VLAN member list in default VLAN, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_VLAN, status); + SWSS_LOG_ERROR("Failed to get CPU port, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); if (handle_status != task_process_status::task_success) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_THROW("PortsOrch initialization failure"); } } - /* Remove VLAN members in default VLAN */ - for (uint32_t i = 0; i < attr.value.objlist.count; i++) - { - status = sai_vlan_api->remove_vlan_member(vlan_member_list[i]); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove VLAN member, rv:%d", status); - throw runtime_error("PortsOrch initialization failure"); - } - } + this->m_cpuPort = Port("CPU", Port::CPU); + this->m_cpuPort.m_port_id = attr.value.oid; + this->m_portList[m_cpuPort.m_alias] = m_cpuPort; + this->m_port_ref_count[m_cpuPort.m_alias] = 0; - SWSS_LOG_NOTICE("Remove %d VLAN members from default VLAN", attr.value.objlist.count); + SWSS_LOG_NOTICE("Get CPU port pid:%" PRIx64, this->m_cpuPort.m_port_id); } -void PortsOrch::removeDefaultBridgePorts() +void PortsOrch::initializePorts() { - /* Get bridge ports in default 1Q bridge - * By default, there will be (m_portCount + m_systemPortCount) number of SAI_BRIDGE_PORT_TYPE_PORT - * ports and one SAI_BRIDGE_PORT_TYPE_1Q_ROUTER port. The former type of - * ports will be removed. */ - vector bridge_port_list(m_portCount + m_systemPortCount + 1); + SWSS_LOG_ENTER(); + sai_status_t status; sai_attribute_t attr; - attr.id = SAI_BRIDGE_ATTR_PORT_LIST; - attr.value.objlist.count = (uint32_t)bridge_port_list.size(); - attr.value.objlist.list = bridge_port_list.data(); - sai_status_t status = sai_bridge_api->get_bridge_attribute(m_default1QBridge, 1, &attr); + // Get port number + attr.id = SAI_SWITCH_ATTR_PORT_NUMBER; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get bridge port list in default 1Q bridge, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + SWSS_LOG_ERROR("Failed to get port number, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); if (handle_status != task_process_status::task_success) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_THROW("PortsOrch initialization failure"); } } - auto bridge_port_count = attr.value.objlist.count; + this->m_portCount = attr.value.u32; - /* Remove SAI_BRIDGE_PORT_TYPE_PORT bridge ports in default 1Q bridge */ - for (uint32_t i = 0; i < bridge_port_count; i++) + SWSS_LOG_NOTICE("Get %d ports", this->m_portCount); + + // Get port list + std::vector portList(this->m_portCount, SAI_NULL_OBJECT_ID); + + attr.id = SAI_SWITCH_ATTR_PORT_LIST; + attr.value.objlist.count = static_cast(portList.size()); + attr.value.objlist.list = portList.data(); + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) { - attr.id = SAI_BRIDGE_PORT_ATTR_TYPE; - attr.value.s32 = SAI_NULL_OBJECT_ID; + SWSS_LOG_ERROR("Failed to get port list, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch initialization failure"); + } + } - status = sai_bridge_api->get_bridge_port_attribute(bridge_port_list[i], 1, &attr); + // Get port hardware lane info + for (const auto &portId : portList) + { + std::vector laneList(Port::max_lanes, 0); + + attr.id = SAI_PORT_ATTR_HW_LANE_LIST; + attr.value.u32list.count = static_cast(laneList.size()); + attr.value.u32list.list = laneList.data(); + + status = sai_port_api->get_port_attribute(portId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get bridge port type, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + SWSS_LOG_ERROR("Failed to get hardware lane list pid:%" PRIx64, portId); + auto handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch initialization failure"); + } + } + + std::set laneSet; + for (sai_uint32_t i = 0; i < attr.value.u32list.count; i++) + { + laneSet.insert(attr.value.u32list.list[i]); + } + + this->m_portListLaneMap[laneSet] = portId; + + SWSS_LOG_NOTICE( + "Get port with lanes pid:%" PRIx64 " lanes:%s", + portId, swss::join(" ", laneSet.cbegin(), laneSet.cend()).c_str() + ); + } +} + +auto PortsOrch::getPortConfigState() const -> port_config_state_t +{ + return this->m_portConfigState; +} + +void PortsOrch::setPortConfigState(port_config_state_t value) +{ + this->m_portConfigState = value; +} + +bool PortsOrch::addPortBulk(const std::vector &portList) +{ + // The method is used to create ports in a bulk mode. + // The action takes place when: + // 1. Ports are being initialized at system start + // 2. Ports are being added/removed by a user at runtime + + SWSS_LOG_ENTER(); + + if (portList.empty()) + { + return true; + } + + std::vector attrValueList; + std::vector> attrDataList; + std::vector attrCountList; + std::vector attrPtrList; + + auto portCount = static_cast(portList.size()); + std::vector oidList(portCount, SAI_NULL_OBJECT_ID); + std::vector statusList(portCount, SAI_STATUS_SUCCESS); + + for (const auto &cit : portList) + { + sai_attribute_t attr; + std::vector attrList; + + if (cit.lanes.is_set) + { + PortAttrValue_t attrValue; + auto &outList = attrValue.lanes; + auto &inList = cit.lanes.value; + outList.insert(outList.begin(), inList.begin(), inList.end()); + attrValueList.push_back(attrValue); + + attr.id = SAI_PORT_ATTR_HW_LANE_LIST; + attr.value.u32list.count = static_cast(attrValueList.back().lanes.size()); + attr.value.u32list.list = attrValueList.back().lanes.data(); + attrList.push_back(attr); + } + + if (cit.speed.is_set) + { + attr.id = SAI_PORT_ATTR_SPEED; + attr.value.u32 = cit.speed.value; + attrList.push_back(attr); + } + + if (cit.autoneg.is_set) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; + attr.value.booldata = cit.autoneg.value; + attrList.push_back(attr); + } + + if (cit.fec.is_set) + { + attr.id = SAI_PORT_ATTR_FEC_MODE; + attr.value.s32 = cit.fec.value; + attrList.push_back(attr); + } + + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrList.push_back(attr); + } + + attrDataList.push_back(attrList); + attrCountList.push_back(static_cast(attrDataList.back().size())); + attrPtrList.push_back(attrDataList.back().data()); + } + + auto status = sai_port_api->create_ports( + gSwitchId, portCount, attrCountList.data(), attrPtrList.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, + oidList.data(), statusList.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ports with bulk operation, rv:%d", status); + + auto handle_status = handleSaiCreateStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk create failure"); + } + + return false; + } + + for (std::uint32_t i = 0; i < portCount; i++) + { + if (statusList.at(i) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to create port %s with bulk operation, rv:%d", + portList.at(i).key.c_str(), statusList.at(i) + ); + + auto handle_status = handleSaiCreateStatus(SAI_API_PORT, statusList.at(i)); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk create failure"); + } + + return false; + } + + m_portListLaneMap[portList.at(i).lanes.value] = oidList.at(i); + m_portCount++; + } + + // newly created ports might be put in the default vlan so remove all ports from + // the default vlan. + if (gMySwitchType == "voq") { + removeDefaultVlanMembers(); + removeDefaultBridgePorts(); + } + + SWSS_LOG_NOTICE("Created ports: %s", swss::join(',', oidList.begin(), oidList.end()).c_str()); + + return true; +} + +bool PortsOrch::removePortBulk(const std::vector &portList) +{ + SWSS_LOG_ENTER(); + + if (portList.empty()) + { + return true; + } + + for (const auto &cit : portList) + { + Port p; + + // Make sure to bring down admin state + if (getPort(cit, p)) + { + setPortAdminStatus(p, false); + } + // else : port is in default state or not yet created + + // Remove port serdes (if exists) before removing port since this reference is dependency + removePortSerdesAttribute(cit); + } + + auto portCount = static_cast(portList.size()); + std::vector statusList(portCount, SAI_STATUS_SUCCESS); + + auto status = sai_port_api->remove_ports( + portCount, portList.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, + statusList.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ports with bulk operation, rv:%d", status); + + auto handle_status = handleSaiRemoveStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk remove failure"); + } + + return false; + } + + for (std::uint32_t i = 0; i < portCount; i++) + { + if (statusList.at(i) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to remove port %" PRIx64 " with bulk operation, rv:%d", + portList.at(i), statusList.at(i) + ); + + auto handle_status = handleSaiRemoveStatus(SAI_API_PORT, statusList.at(i)); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk remove failure"); + } + + return false; + } + + m_portSupportedSpeeds.erase(portList.at(i)); + m_portCount--; + } + + SWSS_LOG_NOTICE("Removed ports: %s", swss::join(',', portList.begin(), portList.end()).c_str()); + + return true; +} + +void PortsOrch::removeDefaultVlanMembers() +{ + /* Get VLAN members in default VLAN */ + vector vlan_member_list(m_portCount + m_systemPortCount); + + sai_attribute_t attr; + attr.id = SAI_VLAN_ATTR_MEMBER_LIST; + attr.value.objlist.count = (uint32_t)vlan_member_list.size(); + attr.value.objlist.list = vlan_member_list.data(); + + sai_status_t status = sai_vlan_api->get_vlan_attribute(m_defaultVlan, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get VLAN member list in default VLAN, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_VLAN, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + + /* Remove VLAN members in default VLAN */ + for (uint32_t i = 0; i < attr.value.objlist.count; i++) + { + status = sai_vlan_api->remove_vlan_member(vlan_member_list[i]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove VLAN member, rv:%d", status); + throw runtime_error("PortsOrch initialization failure"); + } + } + + SWSS_LOG_NOTICE("Remove %d VLAN members from default VLAN", attr.value.objlist.count); +} + +void PortsOrch::removeDefaultBridgePorts() +{ + /* Get bridge ports in default 1Q bridge + * By default, there will be (m_portCount + m_systemPortCount) number of SAI_BRIDGE_PORT_TYPE_PORT + * ports and one SAI_BRIDGE_PORT_TYPE_1Q_ROUTER port. The former type of + * ports will be removed. */ + vector bridge_port_list(m_portCount + m_systemPortCount + 1); + + sai_attribute_t attr; + attr.id = SAI_BRIDGE_ATTR_PORT_LIST; + attr.value.objlist.count = (uint32_t)bridge_port_list.size(); + attr.value.objlist.list = bridge_port_list.data(); + + sai_status_t status = sai_bridge_api->get_bridge_attribute(m_default1QBridge, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get bridge port list in default 1Q bridge, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + + auto bridge_port_count = attr.value.objlist.count; + + /* Remove SAI_BRIDGE_PORT_TYPE_PORT bridge ports in default 1Q bridge */ + for (uint32_t i = 0; i < bridge_port_count; i++) + { + attr.id = SAI_BRIDGE_PORT_ATTR_TYPE; + attr.value.s32 = SAI_NULL_OBJECT_ID; + + status = sai_bridge_api->get_bridge_port_attribute(bridge_port_list[i], 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get bridge port type, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); if (handle_status != task_process_status::task_success) { throw runtime_error("PortsOrch initialization failure"); @@ -798,6 +1168,11 @@ map& PortsOrch::getAllPorts() return m_portList; } +unordered_set& PortsOrch::getAllVlans() +{ + return m_vlanPorts; +} + bool PortsOrch::getPort(string alias, Port &p) { SWSS_LOG_ENTER(); @@ -824,7 +1199,10 @@ bool PortsOrch::getPort(sai_object_id_t id, Port &port) } else { - getPort(itr->second, port); + if (!getPort(itr->second, port)) + { + SWSS_LOG_THROW("Inconsistent saiOidToAlias map and m_portList map: oid=%" PRIx64, id); + } return true; } @@ -1065,9 +1443,9 @@ void PortsOrch::getCpuPort(Port &port) port = m_cpuPort; } -/* - * Create host_tx_ready field in PORT_TABLE of STATE-DB - * and set the field to false by default for the +/* + * Create host_tx_ready field in PORT_TABLE of STATE-DB + * and set the field to false by default for the * front port. */ void PortsOrch::initHostTxReadyState(Port &port) @@ -1091,9 +1469,9 @@ void PortsOrch::initHostTxReadyState(Port &port) if (hostTxReady.empty()) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); - SWSS_LOG_INFO("initalize hostTxReady %s with status %s", - port.m_alias.c_str(), hostTxReady.c_str()); + setHostTxReady(port.m_port_id, "false"); + SWSS_LOG_NOTICE("initialize host_tx_ready as false for port %s", + port.m_alias.c_str()); } } @@ -1105,20 +1483,28 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = state; + // if sync between cmis module configuration and asic is supported, + // do not change host_tx_ready value in STATE DB when admin status is changed. + /* Update the host_tx_ready to false before setting admin_state, when admin state is false */ - if (!state) + if (!state && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); - SWSS_LOG_INFO("Set admin status DOWN host_tx_ready to false to port pid:%" PRIx64, - port.m_port_id); + setHostTxReady(port.m_port_id, "false"); + SWSS_LOG_NOTICE("Set admin status DOWN host_tx_ready to false for port %s", + port.m_alias.c_str()); } - + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set admin status %s to port pid:%" PRIx64, - state ? "UP" : "DOWN", port.m_port_id); - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_ERROR("Failed to set admin status %s for port %s." + " Setting host_tx_ready as false", + state ? "UP" : "DOWN", port.m_alias.c_str()); + + if (!m_cmisModuleAsicSyncSupported) + { + setHostTxReady(port.m_port_id, "false"); + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1127,22 +1513,38 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } bool gbstatus = setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); - if (gbstatus != true) + if (gbstatus != true && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + setHostTxReady(port.m_port_id, "false"); + SWSS_LOG_NOTICE("Set host_tx_ready to false as gbstatus is false " + "for port %s", port.m_alias.c_str()); } - + /* Update the state table for host_tx_ready*/ - if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) ) + if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "true"); - SWSS_LOG_INFO("Set admin status UP host_tx_ready to true to port pid:%" PRIx64, - port.m_port_id); - } + setHostTxReady(port.m_port_id, "true"); + SWSS_LOG_NOTICE("Set admin status UP host_tx_ready to true for port %s", + port.m_alias.c_str()); + } return true; } +void PortsOrch::setHostTxReady(sai_object_id_t portId, const std::string &status) +{ + Port p; + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return; + } + + SWSS_LOG_NOTICE("Setting host_tx_ready status = %s, alias = %s, port_id = 0x%" PRIx64, status.c_str(), p.m_alias.c_str(), portId); + m_portStateTable.hset(p.m_alias, "host_tx_ready", status); +} + bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) { SWSS_LOG_ENTER(); @@ -1168,6 +1570,25 @@ bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) return true; } +bool PortsOrch::getPortHostTxReady(const Port& port, bool &hostTxReadyVal) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_READY_STATUS; + + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + hostTxReadyVal = false; + return false; + } + + hostTxReadyVal = attr.value.s32; + + return true; +} + bool PortsOrch::getPortMtu(const Port& port, sai_uint32_t &mtu) { SWSS_LOG_ENTER(); @@ -1184,7 +1605,8 @@ bool PortsOrch::getPortMtu(const Port& port, sai_uint32_t &mtu) mtu = attr.value.u32 - (uint32_t)(sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); - if (isMACsecPort(port.m_port_id)) + /* Reduce the default MTU got from ASIC by MAX_MACSEC_SECTAG_SIZE */ + if (mtu > MAX_MACSEC_SECTAG_SIZE) { mtu -= MAX_MACSEC_SECTAG_SIZE; } @@ -1228,59 +1650,65 @@ bool PortsOrch::setPortMtu(const Port& port, sai_uint32_t mtu) } -bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) +bool PortsOrch::setPortTpid(Port &port, sai_uint16_t tpid) { SWSS_LOG_ENTER(); - sai_status_t status = SAI_STATUS_SUCCESS; - sai_attribute_t attr; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_TPID; + attr.value.u16 = tpid; - attr.value.u16 = (uint16_t)tpid; - - status = sai_port_api->set_port_attribute(id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set TPID 0x%x to port pid:%" PRIx64 ", rv:%d", - attr.value.u16, id, status); + SWSS_LOG_ERROR("Failed to set TPID 0x%x to port %s, rv:%d", + attr.value.u16, port.m_alias.c_str(), status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } - else - { - SWSS_LOG_NOTICE("Set TPID 0x%x to port pid:%" PRIx64, attr.value.u16, id); - } + + SWSS_LOG_NOTICE("Set TPID 0x%x to port %s", attr.value.u16, port.m_alias.c_str()); + return true; } -bool PortsOrch::setPortFec(Port &port, string &mode) +bool PortsOrch::setPortFecOverride(sai_object_id_t port_obj, bool override_fec) { - SWSS_LOG_ENTER(); + sai_attribute_t attr; + sai_status_t status; + + attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; + attr.value.booldata = override_fec; - auto searchRef = m_portSupportedFecModes.find(port.m_port_id); - if (searchRef != m_portSupportedFecModes.end()) + status = sai_port_api->set_port_attribute(port_obj, &attr); + if (status != SAI_STATUS_SUCCESS) { - auto &supportedFecModes = searchRef->second; - if (!supportedFecModes.empty() && (supportedFecModes.find(mode) == supportedFecModes.end())) + SWSS_LOG_ERROR("Failed to set fec override %d to port pid:%" PRIx64, attr.value.booldata, port_obj); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) { - SWSS_LOG_ERROR("Unsupported mode %s on port %s", mode.c_str(), port.m_alias.c_str()); - // We return true becase the caller will keep the item in m_toSync and retry it later if we return false - // As the FEC mode is not supported it doesn't make sense to retry. - return true; + return parseHandleSaiStatusFailure(handle_status); } } + SWSS_LOG_INFO("Set fec override %d to port pid:%" PRIx64, attr.value.booldata, port_obj); + return true; +} + +bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t fec_mode, bool override_fec) +{ + SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = port.m_fec_mode; + attr.value.s32 = fec_mode; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set FEC mode %s to port %s", mode.c_str(), port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to set FEC mode %d to port %s", fec_mode, port.m_alias.c_str()); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1288,9 +1716,13 @@ bool PortsOrch::setPortFec(Port &port, string &mode) } } - SWSS_LOG_NOTICE("Set port %s FEC mode %s", port.m_alias.c_str(), mode.c_str()); + if (fec_override_sup && !setPortFecOverride(port.m_port_id, override_fec)) + { + return false; + } + setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &fec_mode, override_fec); - setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &port.m_fec_mode); + SWSS_LOG_NOTICE("Set port %s FEC mode %d", port.m_alias.c_str(), fec_mode); return true; } @@ -1372,9 +1804,9 @@ bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_b SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; - + m_portList[p.m_alias] = p; SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x", portId, pfcwd_bitmask); @@ -1392,48 +1824,33 @@ bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfcwd_ SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; - + return true; } -bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) +bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym) { SWSS_LOG_ENTER(); - sai_attribute_t attr; uint8_t pfc = 0; - if (!getPortPfc(port.m_port_id, &pfc)) { return false; } - auto found = pfc_asym_map.find(pfc_asym); - if (found == pfc_asym_map.end()) - { - SWSS_LOG_ERROR("Incorrect asymmetric PFC mode: %s", pfc_asym.c_str()); - return false; - } - - auto new_pfc_asym = found->second; - if (port.m_pfc_asym == new_pfc_asym) - { - SWSS_LOG_NOTICE("Already set asymmetric PFC mode: %s", pfc_asym.c_str()); - return true; - } - - port.m_pfc_asym = new_pfc_asym; + port.m_pfc_asym = pfc_asym; m_portList[port.m_alias] = port; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE; - attr.value.s32 = (int32_t) port.m_pfc_asym; + attr.value.s32 = pfc_asym; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", port.m_pfc_asym, port.m_port_id, status); + SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", pfc_asym, port.m_port_id, status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1446,7 +1863,7 @@ bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) return false; } - if (port.m_pfc_asym == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE) + if (pfc_asym == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE) { attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX; attr.value.u8 = static_cast(0xff); @@ -1457,13 +1874,13 @@ bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) SWSS_LOG_ERROR("Failed to set RX PFC 0x%x to port id 0x%" PRIx64 " (rc:%d)", attr.value.u8, port.m_port_id, status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + { + return parseHandleSaiStatusFailure(handle_status); + } } } - SWSS_LOG_INFO("Set asymmetric PFC %s to port id 0x%" PRIx64, pfc_asym.c_str(), port.m_port_id); + SWSS_LOG_INFO("Set asymmetric PFC %d to port id 0x%" PRIx64, pfc_asym, port.m_port_id); return true; } @@ -1815,7 +2232,7 @@ bool PortsOrch::bindAclTable(sai_object_id_t port_oid, member_attrs.push_back(member_attr); member_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY; - member_attr.value.u32 = 100; // TODO: double check! + member_attr.value.u32 = 100; member_attrs.push_back(member_attr); status = sai_acl_api->create_acl_table_group_member(&group_member_oid, gSwitchId, (uint32_t)member_attrs.size(), member_attrs.data()); @@ -1951,7 +2368,7 @@ bool PortsOrch::setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip) return false; } - for (const auto p: portv) + for (const auto &p: portv) { sai_attribute_t attr; attr.id = SAI_HOSTIF_ATTR_VLAN_TAG; @@ -2095,64 +2512,81 @@ void PortsOrch::initPortCapLinkTraining(Port &port) SWSS_LOG_WARN("Unable to get %s LT support capability", port.m_alias.c_str()); } -void PortsOrch::getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes) +bool PortsOrch::isFecModeSupported(const Port &port, sai_port_fec_mode_t fec_mode) { - sai_attribute_t attr; - sai_status_t status; - vector fecModes(fec_mode_reverse_map.size()); + initPortSupportedFecModes(port.m_alias, port.m_port_id); + + const auto &obj = m_portSupportedFecModes.at(port.m_port_id); + + if (!obj.supported) + { + return true; + } + + if (obj.data.empty()) + { + return false; + } + + return std::find(obj.data.cbegin(), obj.data.cend(), fec_mode) != obj.data.cend(); +} + +sai_status_t PortsOrch::getPortSupportedFecModes(PortSupportedFecModes &supported_fecmodes, sai_object_id_t port_id) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + std::vector fecModes(Port::max_fec_modes); attr.id = SAI_PORT_ATTR_SUPPORTED_FEC_MODE; attr.value.s32list.count = static_cast(fecModes.size()); attr.value.s32list.list = fecModes.data(); - status = sai_port_api->get_port_attribute(port_id, 1, &attr); - fecModes.resize(attr.value.s32list.count); + auto status = sai_port_api->get_port_attribute(port_id, 1, &attr); if (status == SAI_STATUS_SUCCESS) { - if (fecModes.empty()) - { - supported_fecmodes.insert("N/A"); - } - else + for (std::uint32_t i = 0; i < attr.value.s32list.count; i++) { - for(auto fecMode : fecModes) - { - supported_fecmodes.insert(fec_mode_reverse_map[static_cast(fecMode)]); - } + supported_fecmodes.insert(static_cast(attr.value.s32list.list[i])); } } else { if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) || - status == SAI_STATUS_NOT_IMPLEMENTED) + (status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED)) { // unable to validate FEC mode if attribute is not supported on platform - SWSS_LOG_NOTICE("Unable to validate FEC mode for port %s id=%" PRIx64 " due to unsupported by platform", - alias.c_str(), port_id); + SWSS_LOG_NOTICE( + "Unable to validate FEC mode for port id=%" PRIx64 " due to unsupported by platform", port_id + ); } else { - SWSS_LOG_ERROR("Failed to get a list of supported FEC modes for port %s id=%" PRIx64 ". Error=%d", - alias.c_str(), port_id, status); + SWSS_LOG_ERROR( + "Failed to get a list of supported FEC modes for port id=%" PRIx64 ". Error=%d", port_id, status + ); } - - supported_fecmodes.clear(); // return empty } + + return status; } void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id) { + SWSS_LOG_ENTER(); + // If port supported speeds map already contains the information, save the SAI call - if (m_portSupportedFecModes.count(port_id)) + if (m_portSupportedFecModes.count(port_id) > 0) { return; } - PortSupportedFecModes supported_fec_modes; - getPortSupportedFecModes(alias, port_id, supported_fec_modes); - m_portSupportedFecModes[port_id] = supported_fec_modes; - if (supported_fec_modes.empty()) + auto &obj = m_portSupportedFecModes[port_id]; + auto &supported_fec_modes = obj.data; + + auto status = getPortSupportedFecModes(supported_fec_modes, port_id); + if (status != SAI_STATUS_SUCCESS) { // Do not expose "supported_fecs" in case fetching FEC modes is not supported by the vendor SWSS_LOG_INFO("No supported_fecs exposed to STATE_DB for port %s since fetching supported FEC modes is not supported by the vendor", @@ -2160,34 +2594,54 @@ void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_i return; } - vector v; - std::string supported_fec_modes_str; - bool first = true; - for(auto fec : supported_fec_modes) + obj.supported = true; + + std::vector fecModeList; + if (supported_fec_modes.empty()) { - if (first) - first = false; - else - supported_fec_modes_str += ','; - supported_fec_modes_str += fec; + fecModeList.push_back("N/A"); + } + else + { + for (const auto &cit : supported_fec_modes) + { + std::string fecMode; + if (!m_portHlpr.fecToStr(fecMode, cit)) + { + SWSS_LOG_ERROR( + "Failed to convert FEC mode for port %s: unknown value %d", + alias.c_str(), static_cast(cit) + ); + continue; + } + + fecModeList.push_back(fecMode); + } + if (!fecModeList.empty() && fec_override_sup) + { + fecModeList.push_back(PORT_FEC_AUTO); + } } + std::vector v; + std::string supported_fec_modes_str = swss::join(',', fecModeList.begin(), fecModeList.end()); v.emplace_back(std::make_pair("supported_fecs", supported_fec_modes_str)); + m_portStateTable.set(alias, v); } /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ -bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value, bool override_fec) { bool status = false; - status = setGearboxPortAttr(port, PHY_PORT_TYPE, id, value); + status = setGearboxPortAttr(port, PHY_PORT_TYPE, id, value, override_fec); if (status == true) { - status = setGearboxPortAttr(port, LINE_PORT_TYPE, id, value); + status = setGearboxPortAttr(port, LINE_PORT_TYPE, id, value, override_fec); } return status; @@ -2197,7 +2651,7 @@ bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void * * If Gearbox is enabled and this is a Gearbox port then set the specific lane attribute. * Note: the appl_db is also updated (Gearbox config_db tables are TBA). */ -bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value, bool override_fec) { sai_status_t status = SAI_STATUS_SUCCESS; sai_object_id_t dest_port_id; @@ -2273,6 +2727,10 @@ bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, m_gearboxTable->hset(key, speed_attr, to_string(speed)); SWSS_LOG_NOTICE("BOX: Updated APPL_DB key:%s %s %d", key.c_str(), speed_attr.c_str(), speed); } + else if (id == SAI_PORT_ATTR_FEC_MODE && fec_override_sup && !setPortFecOverride(dest_port_id, override_fec)) + { + return false; + } } else { @@ -2378,17 +2836,17 @@ bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, string& adv_spee return rc; } -task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list) +task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &speed_list) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - sai_status_t status; + sai_attribute_t attr; + std::vector speedList(speed_list.begin(), speed_list.end()); attr.id = SAI_PORT_ATTR_ADVERTISED_SPEED; - attr.value.u32list.list = speed_list.data(); - attr.value.u32list.count = static_cast(speed_list.size()); + attr.value.u32list.list = speedList.data(); + attr.value.u32list.count = static_cast(speedList.size()); - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2397,16 +2855,15 @@ task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::ve return task_success; } -task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai_port_interface_type_t interface_type) +task_process_status PortsOrch::setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - sai_status_t status; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_INTERFACE_TYPE; - attr.value.u32 = static_cast(interface_type); + attr.value.s32 = interface_type; - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2415,17 +2872,17 @@ task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai return task_success; } -task_process_status PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t port_id, std::vector &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, std::set &interface_types) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - sai_status_t status; + sai_attribute_t attr; + std::vector interfaceTypeList(interface_types.begin(), interface_types.end()); attr.id = SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE; - attr.value.u32list.list = interface_types.data(); - attr.value.u32list.count = static_cast(interface_types.size()); + attr.value.s32list.list = interfaceTypeList.data(); + attr.value.s32list.count = static_cast(interfaceTypeList.size()); - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2438,19 +2895,36 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin { SWSS_LOG_ENTER(); - sai_attribute_t attr[2]; - attr[0].id = SAI_QUEUE_ATTR_TYPE; - attr[1].id = SAI_QUEUE_ATTR_INDEX; + auto const &queueInfoRef = m_queueInfo.find(queue_id); - sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr); - if (status != SAI_STATUS_SUCCESS) + sai_attribute_t attr[2]; + if (queueInfoRef == m_queueInfo.end()) { - SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status); - if (handle_status != task_process_status::task_success) + attr[0].id = SAI_QUEUE_ATTR_TYPE; + attr[1].id = SAI_QUEUE_ATTR_INDEX; + + sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr); + if (status != SAI_STATUS_SUCCESS) { - return false; + SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status); + if (handle_status != task_process_status::task_success) + { + return false; + } } + + SWSS_LOG_INFO("Caching information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); + + m_queueInfo[queue_id].type = static_cast(attr[0].value.s32); + m_queueInfo[queue_id].index = attr[1].value.u8; + } + else + { + attr[0].value.s32 = m_queueInfo[queue_id].type; + attr[1].value.u8 = m_queueInfo[queue_id].index; + + SWSS_LOG_INFO("Fetched cached information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); } switch (attr[0].value.s32) @@ -2464,8 +2938,11 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin case SAI_QUEUE_TYPE_MULTICAST: type = "SAI_QUEUE_TYPE_MULTICAST"; break; + case SAI_QUEUE_TYPE_UNICAST_VOQ: + type = "SAI_QUEUE_TYPE_UNICAST_VOQ"; + break; default: - SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIu64 " queue", attr[0].value.s32, queue_id); + SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); throw runtime_error("Got unsupported queue type"); } @@ -2474,21 +2951,38 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) +bool PortsOrch::isAutoNegEnabled(sai_object_id_t id) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; + + sai_status_t status = sai_port_api->get_port_attribute(id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get port AutoNeg status for port pid:%" PRIx64, id); + return false; + } + + return attr.value.booldata; +} + +task_process_status PortsOrch::setPortAutoNeg(Port &port, bool autoneg) { SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; - attr.value.booldata = (an == 1 ? true : false); + attr.value.booldata = autoneg; - sai_status_t status = sai_port_api->set_port_attribute(id, &attr); + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); + SWSS_LOG_ERROR("Failed to set AutoNeg %u to port %s", attr.value.booldata, port.m_alias.c_str()); return handleSaiSetStatus(SAI_API_PORT, status); } - SWSS_LOG_INFO("Set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); + SWSS_LOG_INFO("Set AutoNeg %u to port %s", attr.value.booldata, port.m_alias.c_str()); return task_success; } @@ -2537,6 +3031,8 @@ bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const SWSS_LOG_NOTICE("Set operation status %s to host interface %s", isUp ? "UP" : "DOWN", port.m_alias.c_str()); + event_params_t params = {{"ifname",port.m_alias},{"status",isUp ? "up" : "down"}}; + event_publish(g_events_handle, "if-state", ¶ms); return true; } @@ -2570,6 +3066,23 @@ bool PortsOrch::createVlanHostIntf(Port& vl, string hostif_name) attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); + bool set_hostif_tx_queue = false; + if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) + { + set_hostif_tx_queue = true; + } + else + { + SWSS_LOG_WARN("Hostif queue attribute not supported"); + } + + if (set_hostif_tx_queue) + { + attr.id = SAI_HOSTIF_ATTR_QUEUE; + attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; + attrs.push_back(attr); + } + sai_status_t status = sai_hostif_api->create_hostif(&vl.m_vlan_info.host_intf_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -2594,6 +3107,30 @@ bool PortsOrch::removeVlanHostIntf(Port vl) return true; } +void PortsOrch::updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus) +{ + SWSS_LOG_ENTER(); + + ++port.m_flap_count; + vector tuples; + FieldValueTuple tuple("flap_count", std::to_string(port.m_flap_count)); + tuples.push_back(tuple); + + auto now = std::chrono::system_clock::now(); + std::time_t now_c = std::chrono::system_clock::to_time_t(now); + if (pstatus == SAI_PORT_OPER_STATUS_DOWN) + { + FieldValueTuple tuple("last_down_time", std::ctime(&now_c)); + tuples.push_back(tuple); + } + else if (pstatus == SAI_PORT_OPER_STATUS_UP) + { + FieldValueTuple tuple("last_up_time", std::ctime(&now_c)); + tuples.push_back(tuple); + } + m_portTable->set(port.m_alias, tuples); +} + void PortsOrch::updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const { SWSS_LOG_ENTER(); @@ -2611,58 +3148,6 @@ void PortsOrch::updateDbPortOperStatus(const Port& port, sai_port_oper_status_t m_portTable->set(port.m_alias, tuples); } -bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string fec_mode) -{ - SWSS_LOG_ENTER(); - - vector lanes(lane_set.begin(), lane_set.end()); - - sai_attribute_t attr; - vector attrs; - - attr.id = SAI_PORT_ATTR_SPEED; - attr.value.u32 = speed; - attrs.push_back(attr); - - attr.id = SAI_PORT_ATTR_HW_LANE_LIST; - attr.value.u32list.list = lanes.data(); - attr.value.u32list.count = static_cast(lanes.size()); - attrs.push_back(attr); - - if (an == true) - { - attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; - attr.value.booldata = true; - attrs.push_back(attr); - } - - if (!fec_mode.empty()) - { - attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.u32 = fec_mode_map[fec_mode]; - attrs.push_back(attr); - } - - sai_object_id_t port_id; - sai_status_t status = sai_port_api->create_port(&port_id, gSwitchId, static_cast(attrs.size()), attrs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create port with the speed %u, rv:%d", speed, status); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - - m_portListLaneMap[lane_set] = port_id; - m_portCount++; - - SWSS_LOG_NOTICE("Create port %" PRIx64 " with the speed %u", port_id, speed); - - return true; -} - sai_status_t PortsOrch::removePort(sai_object_id_t port_id) { SWSS_LOG_ENTER(); @@ -2675,7 +3160,11 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) */ if (getPort(port_id, port)) { - setPortAdminStatus(port, false); + /* Bring port down before removing port */ + if (!setPortAdminStatus(port, false)) + { + SWSS_LOG_ERROR("Failed to set admin status to DOWN to remove port %" PRIx64, port_id); + } } /* else : port is in default state or not yet created */ @@ -2686,6 +3175,12 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) removePortSerdesAttribute(port_id); + for (auto queue_id : port.m_queue_ids) + { + SWSS_LOG_INFO("Removing cached information for queue %" PRIx64, queue_id); + m_queueInfo.erase(queue_id); + } + sai_status_t status = sai_port_api->remove_port(port_id); if (status != SAI_STATUS_SUCCESS) { @@ -2714,10 +3209,15 @@ string PortsOrch::getPriorityGroupDropPacketsFlexCounterTableKey(string key) return string(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP) + ":" + key; } -bool PortsOrch::initPort(const string &alias, const string &role, const int index, const set &lane_set) +bool PortsOrch::initPort(const PortConfig &port) { SWSS_LOG_ENTER(); + const auto &alias = port.key; + const auto &role = port.role.value; + const auto &index = port.index.value; + const auto &lane_set = port.lanes.value; + /* Determine if the lane combination exists in switch */ if (m_portListLaneMap.find(lane_set) != m_portListLaneMap.end()) { @@ -2781,7 +3281,7 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde m_portList[alias].m_init = true; - if (role == "Rec" || role == "Inb") + if (role == Port::Role::Rec || role == Port::Role::Inb) { m_recircPortRole[alias] = role; } @@ -2891,6 +3391,7 @@ bool PortsOrch::bake() addExistingData(APP_LAG_MEMBER_TABLE_NAME); addExistingData(APP_VLAN_TABLE_NAME); addExistingData(APP_VLAN_MEMBER_TABLE_NAME); + addExistingData(STATE_TRANSCEIVER_INFO_TABLE_NAME); return true; } @@ -2906,10 +3407,9 @@ void PortsOrch::cleanPortTable(const vector& keys) void PortsOrch::removePortFromLanesMap(string alias) { - for (auto it = m_lanesAliasSpeedMap.begin(); it != m_lanesAliasSpeedMap.end(); it++) { - if (get<0>(it->second) == alias) + if (it->second.key == alias) { SWSS_LOG_NOTICE("Removing port %s from lanes map", alias.c_str()); it = m_lanesAliasSpeedMap.erase(it); @@ -2932,8 +3432,7 @@ void PortsOrch::removePortFromPortListMap(sai_object_id_t port_id) } } - -void PortsOrch::doPortTask(Consumer &consumer) +void PortsOrch::doSendToIngressPortTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -2944,34 +3443,117 @@ void PortsOrch::doPortTask(Consumer &consumer) string alias = kfvKey(t); string op = kfvOp(t); + ReturnCode rc; + std::vector app_state_db_attrs; - if (alias == "PortConfigDone") + if (op == SET_COMMAND) { - if (m_portConfigState != PORT_CONFIG_MISSING) + if (m_isSendToIngressPortConfigured) { - // Already received, ignore this task + rc = ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Update operation on SendToIngress port with alias=" + << alias << " is not suported"; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + m_publisher.publish(consumer.getTableName(), kfvKey(t), + kfvFieldsValues(t), rc); it = consumer.m_toSync.erase(it); continue; } + rc = addSendToIngressHostIf(alias); + if (!rc.ok()) + { + SWSS_LOG_ERROR("%s", rc.message().c_str()); + } + else + { + m_isSendToIngressPortConfigured = true; + } + } + else if (op == DEL_COMMAND) + { + // For SendToIngress port, delete the host interface and unbind from the CPU port + rc = removeSendToIngressHostIf(); + if (!rc.ok()) + { + SWSS_LOG_ERROR("Failed to remove SendToIngress port rc=%s", + rc.message().c_str()); + } + else + { + m_isSendToIngressPortConfigured = false; + } + } + else + { + rc = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << + "Unknown operation type " << op; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + } + m_publisher.publish(consumer.getTableName(), kfvKey(t), + kfvFieldsValues(t), rc); + it = consumer.m_toSync.erase(it); + } +} + +void PortsOrch::doPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); - m_portConfigState = PORT_CONFIG_RECEIVED; + auto &taskMap = consumer.m_toSync; + auto it = taskMap.begin(); - for (auto i : kfvFieldsValues(t)) + while (it != taskMap.end()) + { + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); + + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse port key: empty string"); + it = taskMap.erase(it); + continue; + } + + /* Got notification from portsyncd application: + * + * When portsorch receives 'PortConfigDone' message, it indicates port configuration + * procedure is done. Port configuration assumes all data has been read from config db + * and pushed to application db. + * + * Before port configuration procedure, none of other tasks are executed. + */ + if (key == "PortConfigDone") + { + it = taskMap.erase(it); + + /* portsyncd restarting case: + * When portsyncd restarts, duplicate notifications may be received. + */ + if (getPortConfigState() != PORT_CONFIG_MISSING) { - if (fvField(i) == "count") - { - m_portCount = to_uint(fvValue(i)); - } + // Already received, ignore this task + continue; } + + setPortConfigState(PORT_CONFIG_RECEIVED); + + SWSS_LOG_INFO("Got PortConfigDone notification from portsyncd"); + + it = taskMap.begin(); + continue; } - /* Get notification from application */ - /* portsyncd application: + /* Got notification from portsyncd application: + * * When portsorch receives 'PortInitDone' message, it indicates port initialization - * procedure is done. Before port initialization procedure, none of other tasks - * are executed. + * procedure is done. Port initialization assumes all netdevs have been created. + * + * Before port initialization procedure, none of other tasks are executed. */ - if (alias == "PortInitDone") + if (key == "PortInitDone") { /* portsyncd restarting case: * When portsyncd restarts, duplicate notifications may be received. @@ -2980,203 +3562,69 @@ void PortsOrch::doPortTask(Consumer &consumer) { addSystemPorts(); m_initDone = true; - SWSS_LOG_INFO("Get PortInitDone notification from portsyncd."); + SWSS_LOG_INFO("Got PortInitDone notification from portsyncd"); } - it = consumer.m_toSync.erase(it); - return; - + it = taskMap.erase(it); + continue; } + PortConfig pCfg(key, op); + if (op == SET_COMMAND) { - set lane_set; - vector attr_val; - map> serdes_attr; - typedef pair> serdes_attr_pair; - string admin_status; - string fec_mode; - string pfc_asym; - uint32_t mtu = 0; - uint32_t speed = 0; - string learn_mode; - string an_str; - string lt_str; - int an = -1; - int lt = -1; - int index = -1; - string role; - string adv_speeds_str; - string interface_type_str; - string adv_interface_types_str; - vector adv_speeds; - sai_port_interface_type_t interface_type; - vector adv_interface_types; - string tpid_string; - uint16_t tpid = 0; - - for (auto i : kfvFieldsValues(t)) + auto parsePortFvs = [&](auto& fvMap) -> bool { - attr_val.clear(); - /* Set interface index */ - if (fvField(i) == "index") + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) { - index = (int)stoul(fvValue(i)); - } - /* Get lane information of a physical port and initialize the port */ - else if (fvField(i) == "lanes") - { - string lane_str; - istringstream iss(fvValue(i)); + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); - while (getline(iss, lane_str, ',')) - { - int lane = stoi(lane_str); - lane_set.insert(lane); - } - } - /* Set port admin status */ - else if (fvField(i) == "admin_status") - { - admin_status = fvValue(i); - } - /* Set port MTU */ - else if (fvField(i) == "mtu") - { - mtu = (uint32_t)stoul(fvValue(i)); - } - /* Set port TPID */ - if (fvField(i) == "tpid") - { - tpid_string = fvValue(i); - // Need to get rid of the leading 0x - tpid_string.erase(0,2); - tpid = (uint16_t)stoi(tpid_string, 0, 16); - SWSS_LOG_DEBUG("Handling TPID to 0x%x, string value:%s", tpid, tpid_string.c_str()); - } - /* Set port speed */ - else if (fvField(i) == "speed") - { - speed = (uint32_t)stoul(fvValue(i)); - } - /* Set port fec */ - else if (fvField(i) == "fec") - { - fec_mode = fvValue(i); - } - /* Get port fdb learn mode*/ - else if (fvField(i) == "learn_mode") - { - learn_mode = fvValue(i); - } - /* Set port asymmetric PFC */ - else if (fvField(i) == "pfc_asym") - { - pfc_asym = fvValue(i); - } - /* Set autoneg and ignore the port speed setting */ - else if (fvField(i) == "autoneg") - { - an_str = fvValue(i); - } - /* Set advertised speeds */ - else if (fvField(i) == "adv_speeds") - { - adv_speeds_str = fvValue(i); - } - /* Set interface type */ - else if (fvField(i) == "interface_type") - { - interface_type_str = fvValue(i); - } - /* Set advertised interface type */ - else if (fvField(i) == "adv_interface_types") - { - adv_interface_types_str = fvValue(i); - } - /* Set link training */ - else if (fvField(i) == "link_training") - { - lt_str = fvValue(i); - } - /* Set port serdes Pre-emphasis */ - else if (fvField(i) == "preemphasis") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_PREEMPHASIS, attr_val)); - } - /* Set port serdes idriver */ - else if (fvField(i) == "idriver") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_IDRIVER, attr_val)); - } - /* Set port serdes ipredriver */ - else if (fvField(i) == "ipredriver") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_IPREDRIVER, attr_val)); - } - /* Set port serdes pre1 */ - else if (fvField(i) == "pre1") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1, attr_val)); - } - /* Set port serdes pre2 */ - else if (fvField(i) == "pre2") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2, attr_val)); - } - /* Set port serdes pre3 */ - else if (fvField(i) == "pre3") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3, attr_val)); - } - /* Set port serdes main */ - else if (fvField(i) == "main") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN, attr_val)); - } - /* Set port serdes post1 */ - else if (fvField(i) == "post1") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST1, attr_val)); - } - /* Set port serdes post2 */ - else if (fvField(i) == "post2") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST2, attr_val)); + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + + fvMap[fieldName] = fieldValue; } - /* Set port serdes post3 */ - else if (fvField(i) == "post3") + + pCfg.fieldValueMap = fvMap; + + if (!m_portHlpr.parsePortConfig(pCfg)) { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST3, attr_val)); + return false; } - /* Set port serdes attn */ - else if (fvField(i) == "attn") + + return true; + }; + + if (m_portList.find(key) == m_portList.end()) + { + // Aggregate configuration while the port is not created. + auto &fvMap = m_portConfigMap[key]; + + if (!parsePortFvs(fvMap)) { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN, attr_val)); + it = taskMap.erase(it); + continue; } - /* Get port role */ - if (fvField(i) == "role") + if (!m_portHlpr.validatePortConfig(pCfg)) { - role = fvValue(i); + it = taskMap.erase(it); + continue; } - } - /* Collect information about all received ports */ - if (lane_set.size()) + /* Collect information about all received ports */ + m_lanesAliasSpeedMap[pCfg.lanes.value] = pCfg; + } + else { - m_lanesAliasSpeedMap[lane_set] = make_tuple(alias, speed, an, fec_mode, index, role); + // Port is already created, gather updated field-values. + std::unordered_map fvMap; + + if (!parsePortFvs(fvMap)) + { + it = taskMap.erase(it); + continue; + } } // TODO: @@ -3190,108 +3638,126 @@ void PortsOrch::doPortTask(Consumer &consumer) * 2. Create new ports * 3. Initialize all ports */ - if (m_portConfigState == PORT_CONFIG_RECEIVED || m_portConfigState == PORT_CONFIG_DONE) + if (getPortConfigState() != PORT_CONFIG_MISSING) { + std::vector portsToAddList; + std::vector portsToRemoveList; + + // Port remove comparison logic for (auto it = m_portListLaneMap.begin(); it != m_portListLaneMap.end();) { if (m_lanesAliasSpeedMap.find(it->first) == m_lanesAliasSpeedMap.end()) { - if (SAI_STATUS_SUCCESS != removePort(it->second)) - { - throw runtime_error("PortsOrch initialization failure."); - } + portsToRemoveList.push_back(it->second); it = m_portListLaneMap.erase(it); + continue; } - else + + it++; + } + + // Bulk port remove + if (!portsToRemoveList.empty()) + { + if (!removePortBulk(portsToRemoveList)) { - it++; + SWSS_LOG_THROW("PortsOrch initialization failure"); } } + // Port add comparison logic for (auto it = m_lanesAliasSpeedMap.begin(); it != m_lanesAliasSpeedMap.end();) { if (m_portListLaneMap.find(it->first) == m_portListLaneMap.end()) { - if (!addPort(it->first, get<1>(it->second), get<2>(it->second), get<3>(it->second))) - { - throw runtime_error("PortsOrch initialization failure."); - } + portsToAddList.push_back(it->second); + it++; + continue; } - if (!initPort(get<0>(it->second), get<5>(it->second), get<4>(it->second), it->first)) + if (!initPort(it->second)) { // Failure has been recorded in initPort it++; continue; } - initPortSupportedSpeeds(get<0>(it->second), m_portListLaneMap[it->first]); - initPortSupportedFecModes(get<0>(it->second), m_portListLaneMap[it->first]); + initPortSupportedSpeeds(it->second.key, m_portListLaneMap[it->first]); + initPortSupportedFecModes(it->second.key, m_portListLaneMap[it->first]); + it++; } - m_portConfigState = PORT_CONFIG_DONE; + // Bulk port add + if (!portsToAddList.empty()) + { + if (!addPortBulk(portsToAddList)) + { + SWSS_LOG_THROW("PortsOrch initialization failure"); + } + + for (const auto &cit : portsToAddList) + { + if (!initPort(cit)) + { + // Failure has been recorded in initPort + continue; + } + + initPortSupportedSpeeds(cit.key, m_portListLaneMap[cit.lanes.value]); + initPortSupportedFecModes(cit.key, m_portListLaneMap[cit.lanes.value]); + } + } + + setPortConfigState(PORT_CONFIG_DONE); } - if (m_portConfigState != PORT_CONFIG_DONE) + if (getPortConfigState() != PORT_CONFIG_DONE) { // Not yet receive PortConfigDone. Save it for future retry it++; continue; } - if (alias == "PortConfigDone") - { - it = consumer.m_toSync.erase(it); - continue; - } - - if (!gBufferOrch->isPortReady(alias)) + if (!gBufferOrch->isPortReady(pCfg.key)) { // buffer configuration hasn't been applied yet. save it for future retry - m_pendingPortSet.emplace(alias); + m_pendingPortSet.emplace(pCfg.key); it++; continue; } else { - m_pendingPortSet.erase(alias); + m_pendingPortSet.erase(pCfg.key); } Port p; - if (!getPort(alias, p)) + if (!getPort(pCfg.key, p)) { - SWSS_LOG_ERROR("Failed to get port id by alias:%s", alias.c_str()); + SWSS_LOG_ERROR("Failed to get port id by alias: %s", pCfg.key.c_str()); } else { - if (admin_status.empty()) - { - admin_status = p.m_admin_state_up ? "up" : "down"; - } + PortSerdesAttrMap_t serdes_attr; + getPortSerdesAttr(serdes_attr, pCfg); - if (!an_str.empty()) + // Saved configured admin status + bool admin_status = p.m_admin_state_up; + + if (pCfg.autoneg.is_set) { - if (autoneg_mode_map.find(an_str) == autoneg_mode_map.end()) - { - SWSS_LOG_ERROR("Failed to parse autoneg value: %s", an_str.c_str()); - // Invalid auto negotiation mode configured, don't retry - it = consumer.m_toSync.erase(it); - continue; - } - an = autoneg_mode_map[an_str]; - if (an != p.m_autoneg) + if (!p.m_an_cfg || p.m_autoneg != pCfg.autoneg.value) { if (p.m_cap_an < 0) { initPortCapAutoNeg(p); - m_portList[alias] = p; + m_portList[p.m_alias] = p; } if (p.m_cap_an < 1) { SWSS_LOG_ERROR("%s: autoneg is not supported (cap=%d)", p.m_alias.c_str(), p.m_cap_an); // autoneg is not supported, don't retry - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); continue; } if (p.m_admin_state_up) @@ -3299,467 +3765,591 @@ void PortsOrch::doPortTask(Consumer &consumer) /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set port autoneg mode", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set port autoneg mode", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortAutoNeg(p.m_port_id, an); + auto status = setPortAutoNeg(p, pCfg.autoneg.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s AN from %d to %d", alias.c_str(), p.m_autoneg, an); + SWSS_LOG_ERROR( + "Failed to set port %s AN from %d to %d", + p.m_alias.c_str(), p.m_autoneg, pCfg.autoneg.value + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); - p.m_autoneg = static_cast(an); - m_portList[alias] = p; + + p.m_autoneg = pCfg.autoneg.value; + p.m_an_cfg = true; + m_portList[p.m_alias] = p; m_portStateTable.hdel(p.m_alias, "rmt_adv_speeds"); - updatePortStatePoll(p, PORT_STATE_POLL_AN, (an > 0)); + updatePortStatePoll(p, PORT_STATE_POLL_AN, pCfg.autoneg.value); + + SWSS_LOG_NOTICE( + "Set port %s autoneg to %s", + p.m_alias.c_str(), m_portHlpr.getAutonegStr(pCfg).c_str() + ); } } - if (!lt_str.empty() && (p.m_type == Port::PHY)) + if (pCfg.link_training.is_set) { - if (link_training_mode_map.find(lt_str) == link_training_mode_map.end()) - { - SWSS_LOG_ERROR("Failed to parse LT value: %s", lt_str.c_str()); - // Invalid link training mode configured, don't retry - it = consumer.m_toSync.erase(it); - continue; - } - - lt = link_training_mode_map[lt_str]; - if (lt != p.m_link_training) + if (!p.m_lt_cfg || ((p.m_link_training != pCfg.link_training.value) && (p.m_type == Port::PHY))) { if (p.m_cap_lt < 0) { initPortCapLinkTraining(p); - m_portList[alias] = p; + m_portList[p.m_alias] = p; } if (p.m_cap_lt < 1) { - SWSS_LOG_WARN("%s: LT is not supported(cap=%d)", alias.c_str(), p.m_cap_lt); + SWSS_LOG_WARN("%s: LT is not supported(cap=%d)", p.m_alias.c_str(), p.m_cap_lt); // Don't retry - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); continue; } - auto status = setPortLinkTraining(p, lt > 0 ? true : false); + auto status = setPortLinkTraining(p, pCfg.link_training.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s LT from %d to %d", alias.c_str(), p.m_link_training, lt); + SWSS_LOG_ERROR( + "Failed to set port %s LT from %d to %d", + p.m_alias.c_str(), p.m_link_training, pCfg.link_training.value + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - m_portStateTable.hset(alias, "link_training_status", lt_str); - SWSS_LOG_NOTICE("Set port %s LT from %d to %d", alias.c_str(), p.m_link_training, lt); - p.m_link_training = lt; - m_portList[alias] = p; - updatePortStatePoll(p, PORT_STATE_POLL_LT, (lt > 0)); + + m_portStateTable.hset(p.m_alias, "link_training_status", m_portHlpr.getLinkTrainingStr(pCfg)); + p.m_link_training = pCfg.link_training.value; + p.m_lt_cfg = true; + m_portList[p.m_alias] = p; + updatePortStatePoll(p, PORT_STATE_POLL_LT, pCfg.link_training.value); // Restore pre-emphasis when LT is transitioned from ON to OFF - if ((p.m_link_training < 1) && (serdes_attr.size() == 0)) + if (!p.m_link_training && serdes_attr.empty()) { serdes_attr = p.m_preemphasis; } + + SWSS_LOG_NOTICE( + "Set port %s link training to %s", + p.m_alias.c_str(), m_portHlpr.getLinkTrainingStr(pCfg).c_str() + ); } } - if (speed != 0) + if (pCfg.speed.is_set) { - if (speed != p.m_speed) + if (p.m_speed != pCfg.speed.value) { - if (!isSpeedSupported(alias, p.m_port_id, speed)) + if (!isSpeedSupported(p.m_alias, p.m_port_id, pCfg.speed.value)) { - SWSS_LOG_ERROR("Unsupported port speed %u", speed); + SWSS_LOG_ERROR( + "Unsupported port %s speed %u", + p.m_alias.c_str(), pCfg.speed.value + ); // Speed not supported, dont retry - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); continue; } - // for backward compatible, if p.m_autoneg != 1, toggle admin status - if (p.m_admin_state_up && p.m_autoneg != 1) + // for backward compatible, if autoneg is off, toggle admin status + if (p.m_admin_state_up && !p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set speed", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set speed", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortSpeed(p, speed); + auto status = setPortSpeed(p, pCfg.speed.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s speed from %u to %u", alias.c_str(), p.m_speed, speed); + SWSS_LOG_ERROR( + "Failed to set port %s speed from %u to %u", + p.m_alias.c_str(), p.m_speed, pCfg.speed.value + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s speed from %u to %u", alias.c_str(), p.m_speed, speed); - p.m_speed = speed; - m_portList[alias] = p; + p.m_speed = pCfg.speed.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s speed to %u", + p.m_alias.c_str(), pCfg.speed.value + ); } else { /* Always update Gearbox speed on Gearbox ports */ - setGearboxPortsAttr(p, SAI_PORT_ATTR_SPEED, &speed); + setGearboxPortsAttr(p, SAI_PORT_ATTR_SPEED, &pCfg.speed.value); } } - if (!adv_speeds_str.empty()) + if (pCfg.adv_speeds.is_set) { - boost::to_lower(adv_speeds_str); - if (!getPortAdvSpeedsVal(adv_speeds_str, adv_speeds)) - { - // Invalid advertised speeds configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (adv_speeds != p.m_adv_speeds) + if (!p.m_adv_speed_cfg || p.m_adv_speeds != pCfg.adv_speeds.value) { - if (p.m_admin_state_up && p.m_autoneg == 1) + if (p.m_admin_state_up && p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } + auto adv_speeds = swss::join(',', pCfg.adv_speeds.value.begin(), pCfg.adv_speeds.value.end()); auto ori_adv_speeds = swss::join(',', p.m_adv_speeds.begin(), p.m_adv_speeds.end()); - auto status = setPortAdvSpeeds(p.m_port_id, adv_speeds); + auto status = setPortAdvSpeeds(p, pCfg.adv_speeds.value); if (status != task_success) { - - SWSS_LOG_ERROR("Failed to set port %s advertised speed from %s to %s", alias.c_str(), - ori_adv_speeds.c_str(), - adv_speeds_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s advertised speed from %s to %s", + p.m_alias.c_str(), ori_adv_speeds.c_str(), adv_speeds.c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s advertised speed from %s to %s", alias.c_str(), - ori_adv_speeds.c_str(), - adv_speeds_str.c_str()); - p.m_adv_speeds.swap(adv_speeds); - m_portList[alias] = p; + + p.m_adv_speeds = pCfg.adv_speeds.value; + p.m_adv_speed_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s advertised speed from %s to %s", + p.m_alias.c_str(), ori_adv_speeds.c_str(), adv_speeds.c_str() + ); } } - if (!interface_type_str.empty()) + if (pCfg.interface_type.is_set) { - boost::to_lower(interface_type_str); - if (!getPortInterfaceTypeVal(interface_type_str, interface_type)) - { - // Invalid interface type configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (interface_type != p.m_interface_type) + if (!p.m_intf_cfg || p.m_interface_type != pCfg.interface_type.value) { - if (p.m_admin_state_up && p.m_autoneg == 0) + if (p.m_admin_state_up && !p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortInterfaceType(p.m_port_id, interface_type); + auto status = setPortInterfaceType(p, pCfg.interface_type.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s interface type to %s", alias.c_str(), interface_type_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s interface type to %s", + p.m_alias.c_str(), m_portHlpr.getPortInterfaceTypeStr(pCfg).c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s interface type to %s", alias.c_str(), interface_type_str.c_str()); - p.m_interface_type = interface_type; - m_portList[alias] = p; + p.m_interface_type = pCfg.interface_type.value; + p.m_intf_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s interface type to %s", + p.m_alias.c_str(), m_portHlpr.getPortInterfaceTypeStr(pCfg).c_str() + ); } } - if (!adv_interface_types_str.empty()) + if (pCfg.adv_interface_types.is_set) { - boost::to_lower(adv_interface_types_str); - if (!getPortAdvInterfaceTypesVal(adv_interface_types_str, adv_interface_types)) - { - // Invalid advertised interface types configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (adv_interface_types != p.m_adv_interface_types && p.m_autoneg == 1) + if (!p.m_adv_intf_cfg || p.m_adv_interface_types != pCfg.adv_interface_types.value) { - if (p.m_admin_state_up) + if (p.m_admin_state_up && p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortAdvInterfaceTypes(p.m_port_id, adv_interface_types); + auto status = setPortAdvInterfaceTypes(p, pCfg.adv_interface_types.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s advertised interface type to %s", alias.c_str(), adv_interface_types_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s advertised interface types to %s", + p.m_alias.c_str(), m_portHlpr.getAdvInterfaceTypesStr(pCfg).c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s advertised interface type to %s", alias.c_str(), adv_interface_types_str.c_str()); - p.m_adv_interface_types.swap(adv_interface_types); - m_portList[alias] = p; + p.m_adv_interface_types = pCfg.adv_interface_types.value; + p.m_adv_intf_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s advertised interface type to %s", + p.m_alias.c_str(), m_portHlpr.getAdvInterfaceTypesStr(pCfg).c_str() + ); } } - if (mtu != 0 && mtu != p.m_mtu) + if (pCfg.mtu.is_set) { - if (setPortMtu(p, mtu)) + if (p.m_mtu != pCfg.mtu.value) { - p.m_mtu = mtu; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s MTU to %u", alias.c_str(), mtu); + if (!setPortMtu(p, pCfg.mtu.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s MTU to %u", + p.m_alias.c_str(), pCfg.mtu.value + ); + it++; + continue; + } + + p.m_mtu = pCfg.mtu.value; + m_portList[p.m_alias] = p; + if (p.m_rif_id) { gIntfsOrch->setRouterIntfsMtu(p); } + // Sub interfaces inherit parent physical port mtu - updateChildPortsMtu(p, mtu); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s MTU to %u", alias.c_str(), mtu); - it++; - continue; + updateChildPortsMtu(p, pCfg.mtu.value); + + SWSS_LOG_NOTICE( + "Set port %s MTU to %u", + p.m_alias.c_str(), pCfg.mtu.value + ); } } - if (tpid != 0 && tpid != p.m_tpid) + if (pCfg.tpid.is_set) { - SWSS_LOG_DEBUG("Set port %s TPID to 0x%x", alias.c_str(), tpid); - if (setPortTpid(p.m_port_id, tpid)) - { - p.m_tpid = tpid; - m_portList[alias] = p; - } - else + if (p.m_tpid != pCfg.tpid.value) { - SWSS_LOG_ERROR("Failed to set port %s TPID to 0x%x", alias.c_str(), tpid); - it++; - continue; + if (!setPortTpid(p, pCfg.tpid.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s TPID to 0x%x", + p.m_alias.c_str(), pCfg.tpid.value + ); + it++; + continue; + } + + p.m_tpid = pCfg.tpid.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s TPID to 0x%x", + p.m_alias.c_str(), pCfg.tpid.value + ); } } - if (!fec_mode.empty()) + if (pCfg.fec.is_set) { - if (fec_mode_map.find(fec_mode) != fec_mode_map.end()) + /* reset fec mode upon mode change */ + if (!p.m_fec_cfg || p.m_fec_mode != pCfg.fec.value || p.m_override_fec != pCfg.fec.override_fec) { - /* reset fec mode upon mode change */ - if (!p.m_fec_cfg || p.m_fec_mode != fec_mode_map[fec_mode]) + if (!pCfg.fec.override_fec && !fec_override_sup) { - if (p.m_admin_state_up) - { - /* Bring port down before applying fec mode*/ - if (!setPortAdminStatus(p, false)) - { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set fec mode", alias.c_str()); - it++; - continue; - } + SWSS_LOG_ERROR("Auto FEC mode is not supported"); + it = taskMap.erase(it); + continue; + } + if (!isFecModeSupported(p, pCfg.fec.value)) + { + SWSS_LOG_ERROR( + "Unsupported port %s FEC mode %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); + // FEC mode is not supported, don't retry + it = taskMap.erase(it); + continue; + } - p.m_admin_state_up = false; - p.m_fec_mode = fec_mode_map[fec_mode]; - p.m_fec_cfg = true; + if (!pCfg.fec.override_fec && !p.m_autoneg) + { + SWSS_LOG_NOTICE("Autoneg must be enabled for port fec mode auto to work"); + } - if (setPortFec(p, fec_mode)) - { - m_portList[alias] = p; - } - else - { - it++; - continue; - } - } - else + if (p.m_admin_state_up) + { + /* Bring port down before applying fec mode*/ + if (!setPortAdminStatus(p, false)) { - /* Port is already down, setting fec mode*/ - p.m_fec_mode = fec_mode_map[fec_mode]; - p.m_fec_cfg = true; - if (setPortFec(p, fec_mode)) - { - m_portList[alias] = p; - } - else - { - it++; - continue; - } + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set fec mode", + p.m_alias.c_str() + ); + it++; + continue; } + + p.m_admin_state_up = false; + m_portList[p.m_alias] = p; } - } - else - { - SWSS_LOG_ERROR("Unknown fec mode %s", fec_mode.c_str()); + + if (!setPortFec(p, pCfg.fec.value, pCfg.fec.override_fec)) + { + SWSS_LOG_ERROR( + "Failed to set port %s FEC mode %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_fec_mode = pCfg.fec.value; + p.m_override_fec = pCfg.fec.override_fec; + p.m_fec_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s FEC mode to %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); } } - if (!learn_mode.empty() && (p.m_learn_mode != learn_mode)) + if (pCfg.learn_mode.is_set) { - if (p.m_bridge_port_id != SAI_NULL_OBJECT_ID) + if (!p.m_lm_cfg || p.m_learn_mode != pCfg.learn_mode.value) { - if(setBridgePortLearnMode(p, learn_mode)) - { - p.m_learn_mode = learn_mode; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); - } - else + if(!setBridgePortLearnMode(p, pCfg.learn_mode.value)) { - SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s learn mode to %s", + p.m_alias.c_str(), m_portHlpr.getLearnModeStr(pCfg).c_str() + ); it++; continue; } - } - else - { - p.m_learn_mode = learn_mode; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode.c_str()); + p.m_learn_mode = pCfg.learn_mode.value; + p.m_lm_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s learn mode to %s", + p.m_alias.c_str(), m_portHlpr.getLearnModeStr(pCfg).c_str() + ); } } - if (pfc_asym != "") + if (pCfg.pfc_asym.is_set) { - if (setPortPfcAsym(p, pfc_asym)) + if (!p.m_pfc_asym_cfg || p.m_pfc_asym != pCfg.pfc_asym.value) { - SWSS_LOG_NOTICE("Set port %s asymmetric PFC to %s", alias.c_str(), pfc_asym.c_str()); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s asymmetric PFC to %s", alias.c_str(), pfc_asym.c_str()); - it++; - continue; + if (m_portCap.isPortPfcAsymSupported()) + { + if (!setPortPfcAsym(p, pCfg.pfc_asym.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s asymmetric PFC to %s", + p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_pfc_asym = pCfg.pfc_asym.value; + p.m_pfc_asym_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s asymmetric PFC to %s", + p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() + ); + } + else + { + SWSS_LOG_WARN( + "Port %s asymmetric PFC configuration is not supported: skipping ...", + p.m_alias.c_str() + ); + } } } - if (serdes_attr.size() != 0) + if (!serdes_attr.empty()) { - if (p.m_link_training > 0) + if (p.m_link_training) { - SWSS_LOG_NOTICE("Save port %s preemphasis for LT", alias.c_str()); + SWSS_LOG_NOTICE("Save port %s preemphasis for LT", p.m_alias.c_str()); p.m_preemphasis = serdes_attr; - m_portList[alias] = p; - } - else if (setPortSerdesAttribute(p.m_port_id, serdes_attr)) - { - SWSS_LOG_NOTICE("Set port %s preemphasis is success", alias.c_str()); - p.m_preemphasis = serdes_attr; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } else { - SWSS_LOG_ERROR("Failed to set port %s pre-emphasis", alias.c_str()); - it++; - continue; + if (p.m_admin_state_up) + { + /* Bring port down before applying serdes attribute*/ + if (!setPortAdminStatus(p, false)) + { + SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set serdes attr", p.m_alias.c_str()); + it++; + continue; + } + + p.m_admin_state_up = false; + m_portList[p.m_alias] = p; + } + + if (setPortSerdesAttribute(p.m_port_id, gSwitchId, serdes_attr)) + { + SWSS_LOG_NOTICE("Set port %s SI settings is successful", p.m_alias.c_str()); + p.m_preemphasis = serdes_attr; + m_portList[p.m_alias] = p; + } + else + { + SWSS_LOG_ERROR("Failed to set port %s SI settings", p.m_alias.c_str()); + it++; + continue; + } } } - + /* create host_tx_ready field in state-db */ initHostTxReadyState(p); - + + // Restore admin status if the port was brought down + if (admin_status != p.m_admin_state_up) + { + pCfg.admin_status.is_set = true; + pCfg.admin_status.value = admin_status; + } + /* Last step set port admin status */ - if (!admin_status.empty() && (p.m_admin_state_up != (admin_status == "up"))) + if (pCfg.admin_status.is_set) { - if (setPortAdminStatus(p, admin_status == "up")) + if (p.m_admin_state_up != pCfg.admin_status.value) { - p.m_admin_state_up = (admin_status == "up"); - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s admin status to %s", alias.c_str(), admin_status.c_str()); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s admin status to %s", alias.c_str(), admin_status.c_str()); - it++; - continue; + if (!setPortAdminStatus(p, pCfg.admin_status.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s admin status to %s", + p.m_alias.c_str(), m_portHlpr.getAdminStatusStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_admin_state_up = pCfg.admin_status.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s admin status to %s", + p.m_alias.c_str(), m_portHlpr.getAdminStatusStr(pCfg).c_str() + ); } } } } else if (op == DEL_COMMAND) { + Port p; + if (!getPort(pCfg.key, p)) + { + SWSS_LOG_ERROR("Failed to remove port: alias %s doesn't exist", pCfg.key.c_str()); + m_portConfigMap.erase(pCfg.key); + it = taskMap.erase(it); + continue; + } + + const auto &alias = pCfg.key; + if (m_port_ref_count[alias] > 0) { SWSS_LOG_WARN("Unable to remove port %s: ref count %u", alias.c_str(), m_port_ref_count[alias]); @@ -3816,7 +4406,11 @@ void PortsOrch::doPortTask(Consumer &consumer) removePortFromPortListMap(port_id); /* Delete port from port list */ + m_portConfigMap.erase(alias); m_portList.erase(alias); + saiOidToAlias.erase(port_id); + + SWSS_LOG_NOTICE("Removed port %s", alias.c_str()); } else { @@ -4064,6 +4658,69 @@ void PortsOrch::doVlanMemberTask(Consumer &consumer) } } +void PortsOrch::doTransceiverPresenceCheck(Consumer &consumer) +{ + /* + the idea is to listen to transceiver info table, and also maintain an internal list of plugged modules. + + */ + SWSS_LOG_ENTER(); + + string table_name = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while(it != consumer.m_toSync.end()) + { + auto t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - SET command for port %s", alias.c_str()); + + if (m_pluggedModulesPort.find(alias) == m_pluggedModulesPort.end()) + { + m_pluggedModulesPort[alias] = m_portList[alias]; + + SWSS_LOG_DEBUG("Setting host_tx_signal allow for port %s", alias.c_str()); + setSaiHostTxSignal(m_pluggedModulesPort[alias], true); + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - DEL command for port %s", alias.c_str()); + + Port p; + if (m_pluggedModulesPort.find(alias) != m_pluggedModulesPort.end()) + { + p = m_pluggedModulesPort[alias]; + m_pluggedModulesPort.erase(alias); + SWSS_LOG_DEBUG("Setting host_tx_signal NOT allow for port %s", alias.c_str()); + setSaiHostTxSignal(p, false); + } + } + + it = consumer.m_toSync.erase(it); + } +} + +bool PortsOrch::setSaiHostTxSignal(const Port &port, bool enable) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = enable; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Could not setSAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE to port 0x%" PRIx64, port.m_port_id); + return false; + } + + return true; +} + void PortsOrch::doLagTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4082,7 +4739,8 @@ void PortsOrch::doLagTask(Consumer &consumer) { // Retrieve attributes uint32_t mtu = 0; - string learn_mode; + string learn_mode_str; + sai_bridge_port_fdb_learning_mode_t learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; string operation_status; uint32_t lag_id = 0; int32_t switch_id = -1; @@ -4097,7 +4755,17 @@ void PortsOrch::doLagTask(Consumer &consumer) } else if (fvField(i) == "learn_mode") { - learn_mode = fvValue(i); + learn_mode_str = fvValue(i); + + const auto &cit = learn_mode_map.find(learn_mode_str); + if (cit == learn_mode_map.cend()) + { + SWSS_LOG_ERROR("Invalid MAC learn mode: %s", learn_mode_str.c_str()); + it++; + continue; + } + + learn_mode = cit->second; } else if (fvField(i) == "oper_status") { @@ -4197,7 +4865,7 @@ void PortsOrch::doLagTask(Consumer &consumer) } } - if (!learn_mode.empty() && (l.m_learn_mode != learn_mode)) + if (!learn_mode_str.empty() && (l.m_learn_mode != learn_mode)) { if (l.m_bridge_port_id != SAI_NULL_OBJECT_ID) { @@ -4205,11 +4873,11 @@ void PortsOrch::doLagTask(Consumer &consumer) { l.m_learn_mode = learn_mode; m_portList[alias] = l; - SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode_str.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode_str.c_str()); it++; continue; } @@ -4219,7 +4887,7 @@ void PortsOrch::doLagTask(Consumer &consumer) l.m_learn_mode = learn_mode; m_portList[alias] = l; - SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode_str.c_str()); } } } @@ -4338,13 +5006,19 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) continue; } - if (!addLagMember(lag, port, (status == "enabled"))) + if (!addLagMember(lag, port, status)) { it++; continue; } } + if ((gMySwitchType == "voq") && (port.m_type != Port::SYSTEM)) + { + //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB + voqSyncAddLagMember(lag, port, status); + } + /* Sync an enabled member */ if (status == "enabled") { @@ -4418,7 +5092,7 @@ void PortsOrch::doTask() APP_LAG_TABLE_NAME, APP_LAG_MEMBER_TABLE_NAME, APP_VLAN_TABLE_NAME, - APP_VLAN_MEMBER_TABLE_NAME, + APP_VLAN_MEMBER_TABLE_NAME }; for (auto tableName: tableOrder) @@ -4445,10 +5119,18 @@ void PortsOrch::doTask(Consumer &consumer) string table_name = consumer.getTableName(); - if (table_name == APP_PORT_TABLE_NAME) + if (table_name == STATE_TRANSCEIVER_INFO_TABLE_NAME) + { + doTransceiverPresenceCheck(consumer); + } + else if (table_name == APP_PORT_TABLE_NAME) { doPortTask(consumer); } + else if (table_name == APP_SEND_TO_INGRESS_PORT_TABLE_NAME) + { + doSendToIngressPortTask(consumer); + } else { /* Wait for all ports to be initialized */ @@ -4476,6 +5158,51 @@ void PortsOrch::doTask(Consumer &consumer) } } +void PortsOrch::initializeVoqs(Port &port) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_SYSTEM_PORT_ATTR_QOS_NUMBER_OF_VOQS; + sai_status_t status = sai_system_port_api->get_system_port_attribute( + port.m_system_port_oid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of voqs for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + SWSS_LOG_INFO("Get %d voq for port %s", attr.value.u32, port.m_alias.c_str()); + + m_port_voq_ids[port.m_alias] = std::vector( attr.value.u32 ); + + if (attr.value.u32 == 0) + { + return; + } + + attr.id = SAI_SYSTEM_PORT_ATTR_QOS_VOQ_LIST; + attr.value.objlist.count = (uint32_t) m_port_voq_ids[port.m_alias].size(); + attr.value.objlist.list = m_port_voq_ids[port.m_alias].data(); + + status = sai_system_port_api->get_system_port_attribute( + port.m_system_port_oid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get voq list for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + + SWSS_LOG_INFO("Get voqs for port %s", port.m_alias.c_str()); +} + void PortsOrch::initializeQueues(Port &port) { SWSS_LOG_ENTER(); @@ -4520,6 +5247,50 @@ void PortsOrch::initializeQueues(Port &port) SWSS_LOG_INFO("Get queues for port %s", port.m_alias.c_str()); } +void PortsOrch::initializeSchedulerGroups(Port &port) +{ + std::vector scheduler_group_ids; + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS; + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of scheduler groups for port:%s", port.m_alias.c_str()); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + SWSS_LOG_INFO("Got %d number of scheduler groups for port %s", attr.value.u32, port.m_alias.c_str()); + + scheduler_group_ids.resize(attr.value.u32); + + if (attr.value.u32 == 0) + { + return; + } + + attr.id = SAI_PORT_ATTR_QOS_SCHEDULER_GROUP_LIST; + attr.value.objlist.count = (uint32_t)scheduler_group_ids.size(); + attr.value.objlist.list = scheduler_group_ids.data(); + + status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get scheduler group list for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + + SWSS_LOG_INFO("Got scheduler groups for port %s", port.m_alias.c_str()); +} + void PortsOrch::initializePriorityGroups(Port &port) { SWSS_LOG_ENTER(); @@ -4592,9 +5363,13 @@ bool PortsOrch::initializePort(Port &port) SWSS_LOG_NOTICE("Initializing port alias:%s pid:%" PRIx64, port.m_alias.c_str(), port.m_port_id); - initializePriorityGroups(port); - initializeQueues(port); - initializePortBufferMaximumParameters(port); + if (gMySwitchType != "dpu") + { + initializePriorityGroups(port); + initializeQueues(port); + initializeSchedulerGroups(port); + initializePortBufferMaximumParameters(port); + } /* Create host interface */ if (!addHostIntfs(port, port.m_alias, port.m_hif_id)) @@ -4606,7 +5381,7 @@ bool PortsOrch::initializePort(Port &port) /* Check warm start states */ vector tuples; bool exist = m_portTable->get(port.m_alias, tuples); - string operStatus; + string operStatus, flapCount = "0"; if (exist) { for (auto i : tuples) @@ -4615,9 +5390,14 @@ bool PortsOrch::initializePort(Port &port) { operStatus = fvValue(i); } + + if (fvField(i) == "flap_count") + { + flapCount = fvValue(i); + } } } - SWSS_LOG_DEBUG("initializePort %s with oper %s", port.m_alias.c_str(), operStatus.c_str()); + SWSS_LOG_INFO("Port %s with oper %s flap_count=%s", port.m_alias.c_str(), operStatus.c_str(), flapCount.c_str()); /** * Create database port oper status as DOWN if attr missing @@ -4638,6 +5418,20 @@ bool PortsOrch::initializePort(Port &port) port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; } + // initalize port flap count + if (!flapCount.empty()) + { + try + { + port.m_flap_count = stoull(flapCount); + m_portTable->hset(port.m_alias, "flap_count", flapCount); + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to get port (%s) flap_count: %s", port.m_alias.c_str(), e.what()); + } + } + /* initialize port admin status */ if (!getPortAdminStatus(port.m_port_id, port.m_admin_state_up)) { @@ -4646,7 +5440,7 @@ bool PortsOrch::initializePort(Port &port) } /* initialize port admin speed */ - if (!getPortSpeed(port.m_port_id, port.m_speed)) + if (!isAutoNegEnabled(port.m_port_id) && !getPortSpeed(port.m_port_id, port.m_speed)) { SWSS_LOG_ERROR("Failed to get initial port admin speed %d", port.m_speed); return false; @@ -4658,6 +5452,22 @@ bool PortsOrch::initializePort(Port &port) SWSS_LOG_ERROR("Failed to get initial port mtu %d", port.m_mtu); } + /* initialize port host_tx_ready value (only for supporting systems) */ + if (m_cmisModuleAsicSyncSupported) + { + bool hostTxReadyVal; + if (!getPortHostTxReady(port, hostTxReadyVal)) + { + SWSS_LOG_ERROR("Failed to get host_tx_ready value from SAI to Port %" PRIx64 , port.m_port_id); + } + /* set value to state DB */ + + string hostTxReadyStr = hostTxReadyVal ? "true" : "false"; + + SWSS_LOG_DEBUG("Received host_tx_ready current status: port_id: 0x%" PRIx64 " status: %s", port.m_port_id, hostTxReadyStr.c_str()); + setHostTxReady(port.m_port_id, hostTxReadyStr); + } + /* * always initialize Port SAI_HOSTIF_ATTR_OPER_STATUS based on oper_status value in appDB. */ @@ -4696,6 +5506,23 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); + bool set_hostif_tx_queue = false; + if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) + { + set_hostif_tx_queue = true; + } + else + { + SWSS_LOG_WARN("Hostif queue attribute not supported"); + } + + if (set_hostif_tx_queue) + { + attr.id = SAI_HOSTIF_ATTR_QUEUE; + attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; + attrs.push_back(attr); + } + sai_status_t status = sai_hostif_api->create_hostif(&host_intfs_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -4712,6 +5539,64 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int return true; } +ReturnCode PortsOrch::addSendToIngressHostIf(const std::string &send_to_ingress_name) +{ + SWSS_LOG_ENTER(); + + // For SendToIngress port, add the host interface and bind to the CPU port + vector ingress_attribs; + sai_attribute_t attr; + + attr.id = SAI_HOSTIF_ATTR_TYPE; + attr.value.s32 = SAI_HOSTIF_TYPE_NETDEV; + ingress_attribs.push_back(attr); + + attr.id = SAI_HOSTIF_ATTR_NAME; + auto size = sizeof(attr.value.chardata); + strncpy(attr.value.chardata, send_to_ingress_name.c_str(), + size - 1); + attr.value.chardata[size - 1] = '\0'; + ingress_attribs.push_back(attr); + + // If this isn't passed as true, the false setting makes + // the device unready for later attempts to set UP/RUNNING + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + attr.value.booldata = true; + ingress_attribs.push_back(attr); + + // Get CPU port object id to signal send to ingress + attr.id = SAI_HOSTIF_ATTR_OBJ_ID; + attr.value.oid = m_cpuPort.m_port_id; + ingress_attribs.push_back(attr); + + LOG_AND_RETURN_IF_ERROR(sai_hostif_api->create_hostif(&m_cpuPort.m_hif_id, + gSwitchId, + (uint32_t)ingress_attribs.size(), + ingress_attribs.data())); + + return ReturnCode(); +} + +ReturnCode PortsOrch::removeSendToIngressHostIf() +{ + SWSS_LOG_ENTER(); + + if (SAI_NULL_OBJECT_ID == m_cpuPort.m_hif_id) + { + ReturnCode rc = ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Can't delete invalid SendToIngress hostif with SAI_NULL_OBJECT_ID oid"; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + return rc; + } + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_hostif_api->remove_hostif(m_cpuPort.m_hif_id), + "Failed to delete SendToIngress hostif:0x" + << std::hex << m_cpuPort.m_hif_id); + + return ReturnCode(); +} + bool PortsOrch::setBridgePortLearningFDB(Port &port, sai_bridge_port_fdb_learning_mode_t mode) { // TODO: how to support 1D bridge? @@ -4746,6 +5631,12 @@ bool PortsOrch::addBridgePort(Port &port) return true; } + if (port.m_rif_id != 0) + { + SWSS_LOG_NOTICE("Cannot create bridge port, interface %s is a router port", port.m_alias.c_str()); + return false; + } + sai_attribute_t attr; vector attrs; @@ -4797,15 +5688,7 @@ bool PortsOrch::addBridgePort(Port &port) /* And with hardware FDB learning mode set to HW (explicit default value) */ attr.id = SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE; - auto found = learn_mode_map.find(port.m_learn_mode); - if (found == learn_mode_map.end()) - { - attr.value.s32 = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; - } - else - { - attr.value.s32 = found->second; - } + attr.value.s32 = port.m_learn_mode; attrs.push_back(attr); sai_status_t status = sai_bridge_api->create_bridge_port(&port.m_bridge_port_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -4897,7 +5780,7 @@ bool PortsOrch::removeBridgePort(Port &port) return true; } -bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) +bool PortsOrch::setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode) { SWSS_LOG_ENTER(); @@ -4906,17 +5789,10 @@ bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) return true; } - auto found = learn_mode_map.find(learn_mode); - if (found == learn_mode_map.end()) - { - SWSS_LOG_ERROR("Incorrect MAC learn mode: %s", learn_mode.c_str()); - return false; - } - /* Set bridge port learning mode */ sai_attribute_t attr; attr.id = SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE; - attr.value.s32 = found->second; + attr.value.s32 = learn_mode; sai_status_t status = sai_bridge_api->set_bridge_port_attribute(port.m_bridge_port_id, &attr); if (status != SAI_STATUS_SUCCESS) @@ -4930,7 +5806,7 @@ bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) } } - SWSS_LOG_NOTICE("Set bridge port %s learning mode %s", port.m_alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Set bridge port %s learning mode %d", port.m_alias.c_str(), learn_mode); return true; } @@ -4969,6 +5845,7 @@ bool PortsOrch::addVlan(string vlan_alias) m_portList[vlan_alias] = vlan; m_port_ref_count[vlan_alias] = 0; saiOidToAlias[vlan_oid] = vlan_alias; + m_vlanPorts.emplace(vlan_alias); return true; } @@ -5035,6 +5912,7 @@ bool PortsOrch::removeVlan(Port vlan) saiOidToAlias.erase(vlan.m_vlan_info.vlan_oid); m_portList.erase(vlan.m_alias); m_port_ref_count.erase(vlan.m_alias); + m_vlanPorts.erase(vlan.m_alias); return true; } @@ -5673,9 +6551,10 @@ void PortsOrch::getLagMember(Port &lag, vector &portv) } } -bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) +bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) { SWSS_LOG_ENTER(); + bool enableForwarding = (member_status == "enabled"); sai_uint32_t pvid; if (getPortPvid(lag, pvid)) @@ -5747,7 +6626,7 @@ bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) if (gMySwitchType == "voq") { //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB - voqSyncAddLagMember(lag, port); + voqSyncAddLagMember(lag, port, member_status); } return true; @@ -5835,12 +6714,6 @@ bool PortsOrch::setCollectionOnLagMember(Port &lagMember, bool enableCollection) /* Port must be LAG member */ assert(lagMember.m_lag_member_id); - // Collection is not applicable for system port lag members (i.e, members of remote LAGs) - if (lagMember.m_type == Port::SYSTEM) - { - return true; - } - sai_status_t status = SAI_STATUS_FAILURE; sai_attribute_t attr {}; @@ -5872,12 +6745,6 @@ bool PortsOrch::setDistributionOnLagMember(Port &lagMember, bool enableDistribut /* Port must be LAG member */ assert(lagMember.m_lag_member_id); - // Distribution is not applicable for system port lag members (i.e, members of remote LAGs) - if (lagMember.m_type == Port::SYSTEM) - { - return true; - } - sai_status_t status = SAI_STATUS_FAILURE; sai_attribute_t attr {}; @@ -5912,11 +6779,11 @@ bool PortsOrch::addTunnel(string tunnel_alias, sai_object_id_t tunnel_id, bool h tunnel.m_tunnel_id = tunnel_id; if (hwlearning) { - tunnel.m_learn_mode = "hardware"; + tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; } else { - tunnel.m_learn_mode = "disable"; + tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE; } m_portList[tunnel_alias] = tunnel; @@ -5941,9 +6808,36 @@ void PortsOrch::generateQueueMap(map queuesState return; } + bool isCreateAllQueues = false; + + if (queuesStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllQueues = true; + queuesStateVector.clear(); + } + for (const auto& it: m_portList) { if (it.second.m_type == Port::PHY) + { + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + if (isCreateAllQueues && maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); + } + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), false); + if (gMySwitchType == "voq") + { + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); + } + } + + if (it.second.m_type == Port::SYSTEM) { if (!queuesStateVector.count(it.second.m_alias)) { @@ -5951,34 +6845,53 @@ void PortsOrch::generateQueueMap(map queuesState FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); } - generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); } } m_isQueueMapGenerated = true; } -void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) { /* Create the Queue map in the Counter DB */ - /* Add stat counters to flex_counter */ vector queueVector; vector queuePortVector; vector queueIndexVector; vector queueTypeVector; + std::vector queue_ids; - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + if (voq) + { + queue_ids = m_port_voq_ids[port.m_alias]; + } + else + { + queue_ids = port.m_queue_ids; + } + + for (size_t queueIndex = 0; queueIndex < queue_ids.size(); ++queueIndex) { std::ostringstream name; - name << port.m_alias << ":" << queueIndex; - const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + if (voq) + { + name << port.m_system_port_info.alias << ":" << queueIndex; + } + else + { + name << port.m_alias << ":" << queueIndex; + } + + const auto id = sai_serialize_object_id(queue_ids[queueIndex]); string queueType; uint8_t queueRealIndex = 0; - if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + if (getQueueTypeAndIndex(queue_ids[queueIndex], queueType, queueRealIndex)) { - if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + /* voq counters are always enabled. There is no mechanism to disable voq + * counters in a voq system. */ + if ((gMySwitchType != "voq") && !queuesState.isQueueCounterEnabled(queueRealIndex)) { continue; } @@ -5987,39 +6900,196 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates } queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + if (voq) + { + // Install a flex counter for this voq to track stats. Voq counters do + // not have buffer queue config. So it does not get enabled through the + // flexcounter orch logic. Always enabled voq counters. + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); + } + else + { + // In voq systems, always install a flex counter for this egress queue + // to track stats. In voq systems, the buffer profiles are defined on + // sysports. So the phy ports do not have buffer queue config. Hence + // queuesStateVector built by getQueueConfigurations in flexcounterorch + // never has phy ports in voq systems. So always enabled egress queue + // counter on voq systems. + if (gMySwitchType == "voq") + { + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + } + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + } + } - // Install a flex counter for this queue to track stats - std::unordered_set counter_stats; - for (const auto& it: queue_stat_ids) + if (voq) + { + m_voqTable->set("", queueVector); + } + else + { + m_queueTable->set("", queueVector); + CounterCheckOrch::getInstance().addPort(port); + } + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + +} + +void PortsOrch::addQueueFlexCounters(map queuesStateVector) +{ + if (m_isQueueFlexCountersAdded) + { + return; + } + + bool isCreateAllQueues = false; + + if (queuesStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllQueues = true; + queuesStateVector.clear(); + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) { - counter_stats.emplace(sai_serialize_queue_stat(it)); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + if (isCreateAllQueues && maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); + } + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + addQueueFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } - queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + } + + m_isQueueFlexCountersAdded = true; +} - /* add watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - string delimiter(""); - std::ostringstream counters_stream; - for (const auto& it: queueWatermarkStatIds) +void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + { + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - counters_stream << delimiter << sai_serialize_queue_stat(it); - delimiter = comma; + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + // Install a flex counter for this queue to track stats + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); } + } +} - vector fieldValues; - fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); +void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq) +{ + std::unordered_set counter_stats; + std::vector queue_ids; - m_flexCounterTable->set(key, fieldValues); + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + if (voq) + { + queue_ids = m_port_voq_ids[port.m_alias]; + } + else + { + queue_ids = port.m_queue_ids; } - m_queueTable->set("", queueVector); - m_queuePortTable->set("", queuePortVector); - m_queueIndexTable->set("", queueIndexVector); - m_queueTypeTable->set("", queueTypeVector); + queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); +} - CounterCheckOrch::getInstance().addPort(port); + +void PortsOrch::addQueueWatermarkFlexCounters(map queuesStateVector) +{ + if (m_isQueueWatermarkFlexCountersAdded) + { + return; + } + + bool isCreateAllQueues = false; + + if (queuesStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllQueues = true; + queuesStateVector.clear(); + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) + { + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + if (isCreateAllQueues && maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); + } + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + addQueueWatermarkFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); + } + } + + m_isQueueWatermarkFlexCountersAdded = true; +} + +void PortsOrch::addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ + /* Add stat counters to flex_counter */ + + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + { + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + } + } +} + +void PortsOrch::addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex) +{ + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + /* add watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); } void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) @@ -6027,7 +7097,6 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) SWSS_LOG_ENTER(); /* Create the Queue map in the Counter DB */ - /* Add stat counters to flex_counter */ vector queueVector; vector queuePortVector; vector queueIndexVector; @@ -6059,29 +7128,17 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) queueVector.emplace_back(name.str(), id); queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - // Install a flex counter for this queue to track stats - std::unordered_set counter_stats; - for (const auto& it: queue_stat_ids) + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getQueueCountersState()) { - counter_stats.emplace(sai_serialize_queue_stat(it)); + // Install a flex counter for this queue to track stats + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); } - queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); - - /* add watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - - string delimiter(""); - std::ostringstream counters_stream; - for (const auto& it: queueWatermarkStatIds) + if (flexCounterOrch->getQueueWatermarkCountersState()) { - counters_stream << delimiter << sai_serialize_queue_stat(it); - delimiter = comma; + /* add watermark queue counters */ + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); } - - vector fieldValues; - fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); - - m_flexCounterTable->set(key, fieldValues); } m_queueTable->set("", queueVector); @@ -6124,24 +7181,155 @@ void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) m_queueIndexTable->hdel("", id); } - // Remove the flex counter for this queue - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getQueueCountersState()) + { + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + } - // Remove watermark queue counters - string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + if (flexCounterOrch->getQueueWatermarkCountersState()) + { + // Remove watermark queue counters + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } } - CounterCheckOrch::getInstance().removePort(port); + CounterCheckOrch::getInstance().removePort(port); +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) +{ + if (m_isPriorityGroupMapGenerated) + { + return; + } + + bool isCreateAllPgs = false; + + if (pgsStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllPgs = true; + pgsStateVector.clear(); + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) + { + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + if (isCreateAllPgs && maxPgNumber) + { + flexCounterPgState.enablePgCounters(0, maxPgNumber - 1); + } + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); + } + } + + m_isPriorityGroupMapGenerated = true; +} + +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ + /* Create the PG map in the Counter DB */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; + + for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + + } + + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; + + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getPgCountersState()) + { + /* Add dropped packets counters to flex_counter */ + addPriorityGroupFlexCountersPerPortPerPgIndex(port, pgIndex); + } + if (flexCounterOrch->getPgWatermarkCountersState()) + { + /* Add watermark counters to flex_counter */ + addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(port, pgIndex); + } + } + + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap(map pgsStateVector) +void PortsOrch::addPriorityGroupFlexCounters(map pgsStateVector) { - if (m_isPriorityGroupMapGenerated) + if (m_isPriorityGroupFlexCountersAdded) { return; } + bool isCreateAllPgs = false; + + if (pgsStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllPgs = true; + pgsStateVector.clear(); + } + for (const auto& it: m_portList) { if (it.second.m_type == Port::PHY) @@ -6150,143 +7338,120 @@ void PortsOrch::generatePriorityGroupMap(map pgsSta { auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); FlexCounterPgStates flexCounterPgState(maxPgNumber); + if (isCreateAllPgs && maxPgNumber) + { + flexCounterPgState.enablePgCounters(0, maxPgNumber - 1); + } pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); } - generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); + addPriorityGroupFlexCountersPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } - m_isPriorityGroupMapGenerated = true; + m_isPriorityGroupFlexCountersAdded = true; } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +void PortsOrch::addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState) { - /* Create the PG map in the Counter DB */ - /* Add stat counters to flex_counter */ - vector pgVector; - vector pgPortVector; - vector pgIndexVector; - for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) { continue; } - std::ostringstream name; - name << port.m_alias << ":" << pgIndex; - - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - pgVector.emplace_back(name.str(), id); - pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - pgIndexVector.emplace_back(id, to_string(pgIndex)); + addPriorityGroupFlexCountersPerPortPerPgIndex(port, pgIndex); + } +} - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); +void PortsOrch::addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) +{ + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) + string delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); delimiter = comma; } + } + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); +} - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); +void PortsOrch::addPriorityGroupWatermarkFlexCounters(map pgsStateVector) +{ + if (m_isPriorityGroupWatermarkFlexCountersAdded) + { + return; + } - delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; - key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) + bool isCreateAllPgs = false; + + if (pgsStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllPgs = true; + pgsStateVector.clear(); + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) + if (!pgsStateVector.count(it.second.m_alias)) { - delimiter = comma; + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + if (isCreateAllPgs && maxPgNumber) + { + flexCounterPgState.enablePgCounters(0, maxPgNumber - 1); + } + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); } + addPriorityGroupWatermarkFlexCountersPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } - fieldValues.clear(); - fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); } - m_pgTable->set("", pgVector); - m_pgPortTable->set("", pgPortVector); - m_pgIndexTable->set("", pgIndexVector); - - CounterCheckOrch::getInstance().addPort(port); + m_isPriorityGroupWatermarkFlexCountersAdded = true; } -void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState) { - SWSS_LOG_ENTER(); - - /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ - vector pgVector; - vector pgPortVector; - vector pgIndexVector; - - auto toks = tokenize(pgs, '-'); - auto startIndex = to_uint(toks[0]); - auto endIndex = startIndex; - if (toks.size() > 1) - { - endIndex = to_uint(toks[1]); - } - for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { - std::ostringstream name; - name << port.m_alias << ":" << pgIndex; - - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - pgVector.emplace_back(name.str(), id); - pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - pgIndexVector.emplace_back(id, to_string(pgIndex)); - - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - delimiter = comma; + continue; } + addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(port, pgIndex); + } +} - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); +void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) +{ + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; - key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) - { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) - { - delimiter = comma; - } - } - fieldValues.clear(); - fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); - } + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_pgTable->set("", pgVector); - m_pgPortTable->set("", pgPortVector); - m_pgIndexTable->set("", pgIndexVector); + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } - CounterCheckOrch::getInstance().addPort(port); + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); } void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) @@ -6314,13 +7479,20 @@ void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) m_pgPortTable->hdel("", id); m_pgIndexTable->hdel("", id); - // Remove dropped packets counters from flex_counter - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - m_flexCounterTable->del(key); + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getPgCountersState()) + { + // Remove dropped packets counters from flex_counter + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } - // Remove watermark counters from flex_counter - key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + if (flexCounterOrch->getPgWatermarkCountersState()) + { + // Remove watermark counters from flex_counter + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } } CounterCheckOrch::getInstance().removePort(port); @@ -6402,12 +7574,12 @@ void PortsOrch::doTask(NotificationConsumer &consumer) consumer.pop(op, data, values); - if (&consumer != m_portStatusNotificationConsumer) + if (&consumer != m_portStatusNotificationConsumer && &consumer != m_portHostTxReadyNotificationConsumer) { return; } - if (op == "port_state_change") + if (&consumer == m_portStatusNotificationConsumer && op == "port_state_change") { uint32_t count; sai_port_oper_status_notification_t *portoperstatus = nullptr; @@ -6425,7 +7597,7 @@ void PortsOrch::doTask(NotificationConsumer &consumer) if (!getPort(id, port)) { - SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, id); + SWSS_LOG_NOTICE("Got port state change for port id 0x%" PRIx64 " which does not exist, possibly outdated event", id); continue; } @@ -6442,6 +7614,22 @@ void PortsOrch::doTask(NotificationConsumer &consumer) { updateDbPortOperSpeed(port, 0); } + sai_port_fec_mode_t fec_mode; + string fec_str; + if (oper_fec_sup && getPortOperFec(port, fec_mode)) + { + if (!m_portHlpr.fecToStr(fec_str, fec_mode)) + { + SWSS_LOG_ERROR("Error unknown fec mode %d while querying port %s fec mode", + static_cast(fec_mode), port.m_alias.c_str()); + fec_str = "N/A"; + } + updateDbPortOperFec(port,fec_str); + } + else + { + updateDbPortOperFec(port, "N/A"); + } } /* update m_portList */ @@ -6450,6 +7638,18 @@ void PortsOrch::doTask(NotificationConsumer &consumer) sai_deserialize_free_port_oper_status_ntf(count, portoperstatus); } + else if (&consumer == m_portHostTxReadyNotificationConsumer && op == "port_host_tx_ready") + { + sai_object_id_t port_id; + sai_object_id_t switch_id; + sai_port_host_tx_ready_status_t host_tx_ready_status; + + sai_deserialize_port_host_tx_ready_ntf(data, switch_id, port_id, host_tx_ready_status); + SWSS_LOG_DEBUG("Recieved host_tx_ready notification for port 0x%" PRIx64, port_id); + + setHostTxReady(port_id, host_tx_ready_status == SAI_PORT_HOST_TX_READY_STATUS_READY ? "true" : "false"); + } + } void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) @@ -6465,6 +7665,7 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) if (port.m_type == Port::PHY) { updateDbPortOperStatus(port, status); + updateDbPortFlapCount(port, status); updateGearboxPortOperStatus(port); /* Refresh the port states and reschedule the poller tasks */ @@ -6525,6 +7726,16 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) // cause a port flapping. } +void PortsOrch::updateDbPortOperFec(Port &port, string fec_str) +{ + SWSS_LOG_ENTER(); + + vector tuples; + tuples.emplace_back(std::make_pair("fec", fec_str)); + m_portStateTable.set(port.m_alias, tuples); + +} + /* * sync up orchagent with libsai/ASIC for port state. * @@ -6569,6 +7780,18 @@ void PortsOrch::refreshPortStatus() { updateDbPortOperSpeed(port, 0); } + sai_port_fec_mode_t fec_mode; + string fec_str = "N/A"; + if (oper_fec_sup && getPortOperFec(port, fec_mode)) + { + if (!m_portHlpr.fecToStr(fec_str, fec_mode)) + { + SWSS_LOG_ERROR("Error unknown fec mode %d while querying port %s fec mode", + static_cast(fec_mode), port.m_alias.c_str()); + fec_str = "N/A"; + } + } + updateDbPortOperFec(port,fec_str); } } } @@ -6633,6 +7856,28 @@ bool PortsOrch::getPortOperSpeed(const Port& port, sai_uint32_t& speed) const return true; } +bool PortsOrch::getPortOperFec(const Port& port, sai_port_fec_mode_t &fec_mode) const +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return false; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_OPER_PORT_FEC_MODE; + + sai_status_t ret = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Failed to get oper fec for %s", port.m_alias.c_str()); + return false; + } + + fec_mode = static_cast(attr.value.s32); + return true; +} bool PortsOrch::getPortLinkTrainingRxStatus(const Port &port, sai_port_link_training_rx_status_t &rx_status) { SWSS_LOG_ENTER(); @@ -6740,7 +7985,7 @@ bool PortsOrch::removeAclTableGroup(const Port &p) return true; } -bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, +bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, map> &serdes_attr) { SWSS_LOG_ENTER(); @@ -6792,7 +8037,7 @@ bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, port_serdes_attr.value.u32list.list = it->second.data(); attr_list.emplace_back(port_serdes_attr); } - status = sai_port_api->create_port_serdes(&port_serdes_id, gSwitchId, + status = sai_port_api->create_port_serdes(&port_serdes_id, switch_id, static_cast(serdes_attr.size()+1), attr_list.data()); @@ -6843,7 +8088,8 @@ void PortsOrch::removePortSerdesAttribute(sai_object_id_t port_id) } void PortsOrch::getPortSerdesVal(const std::string& val_str, - std::vector &lane_values) + std::vector &lane_values, + int base) { SWSS_LOG_ENTER(); @@ -6853,91 +8099,11 @@ void PortsOrch::getPortSerdesVal(const std::string& val_str, while (std::getline(iss, lane_str, ',')) { - lane_val = (uint32_t)std::stoul(lane_str, NULL, 16); + lane_val = (uint32_t)std::stoul(lane_str, NULL, base); lane_values.push_back(lane_val); } } -bool PortsOrch::getPortAdvSpeedsVal(const std::string &val_str, - std::vector &speed_values) -{ - SWSS_LOG_ENTER(); - - if (val_str == "all") - { - return true; - } - - uint32_t speed_val; - std::string speed_str; - std::istringstream iss(val_str); - - try - { - while (std::getline(iss, speed_str, ',')) - { - speed_val = (uint32_t)std::stoul(speed_str); - speed_values.push_back(speed_val); - } - } - catch (const std::invalid_argument &e) - { - SWSS_LOG_ERROR("Failed to parse adv_speeds value: %s", val_str.c_str()); - return false; - } - std::sort(speed_values.begin(), speed_values.end()); - return true; -} - -bool PortsOrch::getPortInterfaceTypeVal(const std::string &s, - sai_port_interface_type_t &interface_type) -{ - SWSS_LOG_ENTER(); - - auto iter = interface_type_map_for_an.find(s); - if (iter != interface_type_map_for_an.end()) - { - interface_type = interface_type_map_for_an[s]; - return true; - } - else - { - const std::string &validInterfaceTypes = getValidInterfaceTypes(); - SWSS_LOG_ERROR("Failed to parse interface_type value %s, valid interface type includes: %s", - s.c_str(), validInterfaceTypes.c_str()); - return false; - } -} - -bool PortsOrch::getPortAdvInterfaceTypesVal(const std::string &val_str, - std::vector &type_values) -{ - SWSS_LOG_ENTER(); - if (val_str == "all") - { - return true; - } - - sai_port_interface_type_t interface_type ; - std::string type_str; - std::istringstream iss(val_str); - bool valid; - - while (std::getline(iss, type_str, ',')) - { - valid = getPortInterfaceTypeVal(type_str, interface_type); - if (!valid) { - const std::string &validInterfaceTypes = getValidInterfaceTypes(); - SWSS_LOG_ERROR("Failed to parse adv_interface_types value %s, valid interface type includes: %s", - val_str.c_str(), validInterfaceTypes.c_str()); - return false; - } - type_values.push_back(static_cast(interface_type)); - } - std::sort(type_values.begin(), type_values.end()); - return true; -} - /* Bring up/down Vlan interface associated with L3 VNI*/ bool PortsOrch::updateL3VniStatus(uint16_t vlan_id, bool isUp) { @@ -7028,6 +8194,7 @@ bool PortsOrch::initGearboxPort(Port &port) sai_status_t status; string phyOidStr; int phy_id; + sai_port_fec_mode_t sai_fec; SWSS_LOG_ENTER(); @@ -7081,9 +8248,22 @@ bool PortsOrch::initGearboxPort(Port &port) attrs.push_back(attr); attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = fec_mode_map[m_gearboxPortMap[port.m_index].system_fec]; + if (!m_portHlpr.fecToSaiFecMode(m_gearboxPortMap[port.m_index].system_fec, sai_fec)) + { + SWSS_LOG_ERROR("Invalid system FEC mode %s", m_gearboxPortMap[port.m_index].system_fec.c_str()); + return false; + } + attr.value.s32 = sai_fec; attrs.push_back(attr); + if (fec_override_sup) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; + + attr.value.booldata = m_portHlpr.fecIsOverrideRequired(m_gearboxPortMap[port.m_index].system_fec); + attrs.push_back(attr); + } + attr.id = SAI_PORT_ATTR_INTERNAL_LOOPBACK_MODE; attr.value.u32 = loopback_mode_map[m_gearboxPortMap[port.m_index].system_loopback]; attrs.push_back(attr); @@ -7092,6 +8272,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.booldata = m_gearboxPortMap[port.m_index].system_training; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&systemPort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -7137,9 +8324,22 @@ bool PortsOrch::initGearboxPort(Port &port) attrs.push_back(attr); attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = fec_mode_map[m_gearboxPortMap[port.m_index].line_fec]; + if (!m_portHlpr.fecToSaiFecMode(m_gearboxPortMap[port.m_index].line_fec, sai_fec)) + { + SWSS_LOG_ERROR("Invalid line FEC mode %s", m_gearboxPortMap[port.m_index].line_fec.c_str()); + return false; + } + attr.value.s32 = sai_fec; attrs.push_back(attr); + // FEC override will take effect only when autoneg is enabled + if (fec_override_sup) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; + attr.value.booldata = m_portHlpr.fecIsOverrideRequired(m_gearboxPortMap[port.m_index].line_fec); + attrs.push_back(attr); + } + attr.id = SAI_PORT_ATTR_MEDIA_TYPE; attr.value.u32 = media_type_map[m_gearboxPortMap[port.m_index].line_media_type]; attrs.push_back(attr); @@ -7180,6 +8380,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.u32 = media_type_map[m_gearboxPortMap[port.m_index].line_adver_media_type]; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&linePort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -7229,6 +8436,50 @@ bool PortsOrch::initGearboxPort(Port &port) fields[0] = FieldValueTuple(port.m_alias + "_line", sai_serialize_object_id(linePort)); m_gbcounterTable->set("", fields); + + /* Set serdes tx taps on system and line side */ + map> serdes_attr; + typedef pair> serdes_attr_pair; + vector attr_val; + for (auto pair: tx_fir_strings_system_side) { + if (m_gearboxInterfaceMap[port.m_index].tx_firs.find(pair.first) != m_gearboxInterfaceMap[port.m_index].tx_firs.end() ) { + attr_val.clear(); + getPortSerdesVal(m_gearboxInterfaceMap[port.m_index].tx_firs[pair.first], attr_val, 10); + serdes_attr.insert(serdes_attr_pair(pair.second, attr_val)); + } + } + if (serdes_attr.size() != 0) + { + if (setPortSerdesAttribute(systemPort, phyOid, serdes_attr)) + { + SWSS_LOG_NOTICE("Set port %s system side preemphasis is success", port.m_alias.c_str()); + } + else + { + SWSS_LOG_ERROR("Failed to set port %s system side pre-emphasis", port.m_alias.c_str()); + return false; + } + } + serdes_attr.clear(); + for (auto pair: tx_fir_strings_line_side) { + if (m_gearboxInterfaceMap[port.m_index].tx_firs.find(pair.first) != m_gearboxInterfaceMap[port.m_index].tx_firs.end() ) { + attr_val.clear(); + getPortSerdesVal(m_gearboxInterfaceMap[port.m_index].tx_firs[pair.first], attr_val, 10); + serdes_attr.insert(serdes_attr_pair(pair.second, attr_val)); + } + } + if (serdes_attr.size() != 0) + { + if (setPortSerdesAttribute(linePort, phyOid, serdes_attr)) + { + SWSS_LOG_NOTICE("Set port %s line side preemphasis is success", port.m_alias.c_str()); + } + else + { + SWSS_LOG_ERROR("Failed to set port %s line side pre-emphasis", port.m_alias.c_str()); + return false; + } + } } } @@ -7372,7 +8623,7 @@ bool PortsOrch::getSystemPorts() return true; } -bool PortsOrch::getRecircPort(Port &port, string role) +bool PortsOrch::getRecircPort(Port &port, Port::Role role) { for (auto it = m_recircPortRole.begin(); it != m_recircPortRole.end(); it++) { @@ -7381,7 +8632,12 @@ bool PortsOrch::getRecircPort(Port &port, string role) return getPort(it->first, port); } } - SWSS_LOG_ERROR("Failed to find recirc port with role %s", role.c_str()); + + SWSS_LOG_ERROR( + "Failed to find recirc port %s with role %d", + port.m_alias.c_str(), static_cast(role) + ); + return false; } @@ -7509,7 +8765,14 @@ bool PortsOrch::addSystemPorts() port.m_system_port_info.speed = attrs[1].value.sysportconfig.speed; port.m_system_port_info.num_voq = attrs[1].value.sysportconfig.num_voq; + initializeVoqs( port ); setPort(port.m_alias, port); + /* Add system port name map to counter table */ + FieldValueTuple tuple(port.m_system_port_info.alias, + sai_serialize_object_id(system_port_oid)); + vector fields; + fields.push_back(tuple); + m_counterSysPortTable->set("", fields); if(m_port_ref_count.find(port.m_alias) == m_port_ref_count.end()) { m_port_ref_count[port.m_alias] = 0; @@ -7616,7 +8879,7 @@ void PortsOrch::voqSyncDelLag(Port &lag) m_tableVoqSystemLagTable->del(key); } -void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) +void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port, string status) { // Sync only local lag's member add to CHASSIS_APP_DB if (lag.m_system_lag_info.switch_id != gVoqMySwitchId) @@ -7625,8 +8888,8 @@ void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) } vector attrs; - FieldValueTuple nullFv ("NULL", "NULL"); - attrs.push_back(nullFv); + FieldValueTuple statusFv ("status", status); + attrs.push_back(statusFv); string key = lag.m_system_lag_info.alias + ":" + port.m_system_port_info.alias; m_tableVoqSystemLagMemberTable->set(key, attrs); @@ -7755,6 +9018,13 @@ bool PortsOrch::isMACsecPort(sai_object_id_t port_id) const return m_macsecEnabledPorts.find(port_id) != m_macsecEnabledPorts.end(); } +vector PortsOrch::getPortVoQIds(Port& port) +{ + SWSS_LOG_ENTER(); + + return m_port_voq_ids[port.m_alias]; +} + /* Refresh the per-port Auto-Negotiation operational states */ void PortsOrch::refreshPortStateAutoNeg(const Port &port) { @@ -7870,3 +9140,4 @@ void PortsOrch::doTask(swss::SelectableTimer &timer) m_port_state_poller->stop(); } } + diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index f481c95d43..21ed299681 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -2,6 +2,7 @@ #define SWSS_PORTSORCH_H #include +#include #include "acltable.h" #include "orch.h" @@ -14,7 +15,11 @@ #include "saihelper.h" #include "lagid.h" #include "flexcounterorch.h" +#include "events.h" +#include "port/port_capabilities.h" +#include "port/porthlpr.h" +#include "port/portschema.h" #define FCS_LEN 4 #define VLAN_TAG_LEN 4 @@ -28,7 +33,7 @@ #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; -typedef std::set PortSupportedFecModes; +typedef std::set PortSupportedFecModes; static const map oper_status_strings = { @@ -48,6 +53,28 @@ static const unordered_map string_oper_status = { "not present", SAI_PORT_OPER_STATUS_NOT_PRESENT } }; +static const std::map tx_fir_strings_system_side = +{ + {"system_tx_fir_pre1", SAI_PORT_SERDES_ATTR_TX_FIR_PRE1}, + {"system_tx_fir_pre2", SAI_PORT_SERDES_ATTR_TX_FIR_PRE2}, + {"system_tx_fir_pre3", SAI_PORT_SERDES_ATTR_TX_FIR_PRE3}, + {"system_tx_fir_post1", SAI_PORT_SERDES_ATTR_TX_FIR_POST1}, + {"system_tx_fir_post2", SAI_PORT_SERDES_ATTR_TX_FIR_POST2}, + {"system_tx_fir_post3", SAI_PORT_SERDES_ATTR_TX_FIR_POST3}, + {"system_tx_fir_main", SAI_PORT_SERDES_ATTR_TX_FIR_MAIN} +}; + +static const std::map tx_fir_strings_line_side = +{ + {"line_tx_fir_pre1", SAI_PORT_SERDES_ATTR_TX_FIR_PRE1}, + {"line_tx_fir_pre2", SAI_PORT_SERDES_ATTR_TX_FIR_PRE2}, + {"line_tx_fir_pre3", SAI_PORT_SERDES_ATTR_TX_FIR_PRE3}, + {"line_tx_fir_post1", SAI_PORT_SERDES_ATTR_TX_FIR_POST1}, + {"line_tx_fir_post2", SAI_PORT_SERDES_ATTR_TX_FIR_POST2}, + {"line_tx_fir_post3", SAI_PORT_SERDES_ATTR_TX_FIR_POST3}, + {"line_tx_fir_main", SAI_PORT_SERDES_ATTR_TX_FIR_MAIN} +}; + struct PortUpdate { Port port; @@ -74,6 +101,23 @@ struct VlanMemberUpdate bool add; }; +struct queueInfo +{ + // SAI_QUEUE_ATTR_TYPE + sai_queue_type_t type; + // SAI_QUEUE_ATTR_INDEX + sai_uint8_t index; +}; + +template +struct PortCapability +{ + bool supported = false; + T data; +}; + +typedef PortCapability PortFecModeCapability_t; + class PortsOrch : public Orch, public Subject { public: @@ -103,10 +147,13 @@ class PortsOrch : public Orch, public Subject bool setHostIntfsOperStatus(const Port& port, bool up) const; void updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const; + void updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus); bool createVlanHostIntf(Port& vl, string hostif_name); bool removeVlanHostIntf(Port vl); + unordered_set& getAllVlans(); + bool createBindAclTableGroup(sai_object_id_t port_oid, sai_object_id_t acl_table_oid, sai_object_id_t &group_oid, @@ -130,16 +177,20 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); - + void generateQueueMap(map queuesStateVector); uint32_t getNumberOfPortSupportedQueueCounters(string port); void createPortBufferQueueCounters(const Port &port, string queues); void removePortBufferQueueCounters(const Port &port, string queues); + void addQueueFlexCounters(map queuesStateVector); + void addQueueWatermarkFlexCounters(map queuesStateVector); void generatePriorityGroupMap(map pgsStateVector); uint32_t getNumberOfPortSupportedPgCounters(string port); void createPortBufferPgCounters(const Port &port, string pgs); void removePortBufferPgCounters(const Port& port, string pgs); + void addPriorityGroupFlexCounters(map pgsStateVector); + void addPriorityGroupWatermarkFlexCounters(map pgsStateVector); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -171,7 +222,7 @@ class PortsOrch : public Orch, public Subject bool setVoqInbandIntf(string &alias, string &type); bool getPortVlanMembers(Port &port, vlan_members_t &vlan_members); - bool getRecircPort(Port &p, string role); + bool getRecircPort(Port &p, Port::Role role); const gearbox_phy_t* getGearboxPhy(const Port &port); @@ -186,13 +237,17 @@ class PortsOrch : public Orch, public Subject void setMACsecEnabledState(sai_object_id_t port_id, bool enabled); bool isMACsecPort(sai_object_id_t port_id) const; + vector getPortVoQIds(Port& port); private: unique_ptr
m_counterTable; + unique_ptr
m_counterSysPortTable; unique_ptr
m_counterLagTable; unique_ptr
m_portTable; + unique_ptr
m_sendToIngressPortTable; unique_ptr
m_gearboxTable; unique_ptr
m_queueTable; + unique_ptr
m_voqTable; unique_ptr
m_queuePortTable; unique_ptr
m_queueIndexTable; unique_ptr
m_queueTypeTable; @@ -212,6 +267,7 @@ class PortsOrch : public Orch, public Subject shared_ptr m_counter_db; shared_ptr m_flex_db; shared_ptr m_state_db; + shared_ptr m_notificationsDb; FlexCounterManager port_stat_manager; FlexCounterManager port_buffer_drop_stat_manager; @@ -221,11 +277,13 @@ class PortsOrch : public Orch, public Subject shared_ptr m_gb_counter_db; unique_ptr
m_gbcounterTable; + // Supported speeds on the system side. std::map m_portSupportedSpeeds; // Supported FEC modes on the system side. - std::map m_portSupportedFecModes; + std::map m_portSupportedFecModes; bool m_initDone = false; + bool m_isSendToIngressPortConfigured = false; Port m_cpuPort; // TODO: Add Bridge/Vlan class sai_object_id_t m_default1QBridge; @@ -252,18 +310,21 @@ class PortsOrch : public Orch, public Subject map m_gearboxPortMap; map> m_gearboxPortListLaneMap; + unordered_set m_vlanPorts; port_config_state_t m_portConfigState = PORT_CONFIG_MISSING; sai_uint32_t m_portCount; - map, sai_object_id_t> m_portListLaneMap; - map, tuple> m_lanesAliasSpeedMap; + map, sai_object_id_t> m_portListLaneMap; + map, PortConfig> m_lanesAliasSpeedMap; map m_portList; + map m_pluggedModulesPort; map m_portVlanMember; + map> m_port_voq_ids; /* mapping from SAI object ID to Name for faster * retrieval of Port/VLAN from object ID for events * coming from SAI */ unordered_map saiOidToAlias; - unordered_map m_portOidToIndex; + unordered_map m_portOidToIndex; map m_port_ref_count; unordered_set m_pendingPortSet; const uint32_t max_flood_control_types = 4; @@ -272,16 +333,24 @@ class PortsOrch : public Orch, public Subject map m_bridge_port_ref_count; NotificationConsumer* m_portStatusNotificationConsumer; + NotificationConsumer* m_portHostTxReadyNotificationConsumer; + + bool fec_override_sup = false; + bool oper_fec_sup = false; swss::SelectableTimer *m_port_state_poller = nullptr; + bool m_cmisModuleAsicSyncSupported = false; + void doTask() override; void doTask(Consumer &consumer); void doPortTask(Consumer &consumer); + void doSendToIngressPortTask(Consumer &consumer); void doVlanTask(Consumer &consumer); void doVlanMemberTask(Consumer &consumer); void doLagTask(Consumer &consumer); void doLagMemberTask(Consumer &consumer); + void doTransceiverPresenceCheck(Consumer &consumer); void doTask(NotificationConsumer &consumer); void doTask(swss::SelectableTimer &timer); @@ -295,11 +364,13 @@ class PortsOrch : public Orch, public Subject void initializePriorityGroups(Port &port); void initializePortBufferMaximumParameters(Port &port); void initializeQueues(Port &port); + void initializeSchedulerGroups(Port &port); + void initializeVoqs(Port &port); bool addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id); bool setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip); - bool setBridgePortLearnMode(Port &port, string learn_mode); + bool setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode); bool addVlan(string vlan); bool removeVlan(Port vlan); @@ -307,14 +378,13 @@ class PortsOrch : public Orch, public Subject bool addLag(string lag, uint32_t spa_id, int32_t switch_id); bool removeLag(Port lag); bool setLagTpid(sai_object_id_t id, sai_uint16_t tpid); - bool addLagMember(Port &lag, Port &port, bool enableForwarding); + bool addLagMember(Port &lag, Port &port, string status); bool removeLagMember(Port &lag, Port &port); bool setCollectionOnLagMember(Port &lagMember, bool enableCollection); bool setDistributionOnLagMember(Port &lagMember, bool enableDistribution); - bool addPort(const set &lane_set, uint32_t speed, int an=0, string fec=""); sai_status_t removePort(sai_object_id_t port_id); - bool initPort(const string &alias, const string &role, const int index, const set &lane_set); + bool initPort(const PortConfig &port); void deInitPort(string alias, sai_object_id_t port_id); void initPortCapAutoNeg(Port &port); @@ -323,44 +393,67 @@ class PortsOrch : public Orch, public Subject bool setPortAdminStatus(Port &port, bool up); bool getPortAdminStatus(sai_object_id_t id, bool& up); bool getPortMtu(const Port& port, sai_uint32_t &mtu); + bool getPortHostTxReady(const Port& port, bool &hostTxReadyVal); bool setPortMtu(const Port& port, sai_uint32_t mtu); - bool setPortTpid(sai_object_id_t id, sai_uint16_t tpid); + bool setPortTpid(Port &port, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); bool getPortPvid(Port &port, sai_uint32_t &pvid); - bool setPortFec(Port &port, std::string &mode); - bool setPortPfcAsym(Port &port, string pfc_asym); + bool setPortFec(Port &port, sai_port_fec_mode_t fec_mode, bool override_fec); + bool setPortFecOverride(sai_object_id_t port_obj, bool override_fec); + bool setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym); bool getDestPortId(sai_object_id_t src_port_id, dest_port_type_t port_type, sai_object_id_t &des_port_id); bool setBridgePortAdminStatus(sai_object_id_t id, bool up); + bool setSaiHostTxSignal(const Port &port, bool enable); + + void setHostTxReady(sai_object_id_t portId, const std::string &status); + // Get supported speeds on system side bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); // Get supported FEC modes on system side - void getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes); + bool isFecModeSupported(const Port &port, sai_port_fec_mode_t fec_mode); + sai_status_t getPortSupportedFecModes(PortSupportedFecModes &supported_fecmodes, sai_object_id_t port_id); void initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id); task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); - bool setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value); - bool setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value); + bool setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value, bool override_fec=true); + bool setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value, bool override_fec); bool getPortAdvSpeeds(const Port& port, bool remote, std::vector& speed_list); bool getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds); - task_process_status setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list); + task_process_status setPortAdvSpeeds(Port &port, std::set &speed_list); bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq); + bool m_isQueueFlexCountersAdded = false; + void addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq); + + bool m_isQueueWatermarkFlexCountersAdded = false; + void addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex); + bool m_isPriorityGroupMapGenerated = false; void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); + bool m_isPriorityGroupFlexCountersAdded = false; + void addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState); + void addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex); + + bool m_isPriorityGroupWatermarkFlexCountersAdded = false; + void addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState); + void addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex); + bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; - task_process_status setPortAutoNeg(sai_object_id_t id, int an); - bool setPortFecMode(sai_object_id_t id, int fec); - task_process_status setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); - task_process_status setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); + bool isAutoNegEnabled(sai_object_id_t id); + task_process_status setPortAutoNeg(Port &port, bool autoneg); + task_process_status setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type); + task_process_status setPortAdvInterfaceTypes(Port &port, std::set &interface_types); task_process_status setPortLinkTraining(const Port& port, bool state); void updatePortOperStatus(Port &port, sai_port_oper_status_t status); @@ -382,12 +475,8 @@ class PortsOrch : public Orch, public Subject void refreshPortStateAutoNeg(const Port &port); void refreshPortStateLinkTraining(const Port &port); - void getPortSerdesVal(const std::string& s, std::vector &lane_values); - bool getPortAdvSpeedsVal(const std::string &s, std::vector &speed_values); - bool getPortInterfaceTypeVal(const std::string &s, sai_port_interface_type_t &interface_type); - bool getPortAdvInterfaceTypesVal(const std::string &s, std::vector &type_values); - - bool setPortSerdesAttribute(sai_object_id_t port_id, + void getPortSerdesVal(const std::string& s, std::vector &lane_values, int base = 16); + bool setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, std::map> &serdes_attr); @@ -395,10 +484,15 @@ class PortsOrch : public Orch, public Subject bool getSaiAclBindPointType(Port::Type type, sai_acl_bind_point_type_t &sai_acl_bind_type); + + ReturnCode addSendToIngressHostIf(const std::string &send_to_ingress_name); + ReturnCode removeSendToIngressHostIf(); void initGearbox(); bool initGearboxPort(Port &port); + bool getPortOperFec(const Port& port, sai_port_fec_mode_t &fec_mode) const; + void updateDbPortOperFec(Port &port, string fec_str); - map m_recircPortRole; + map m_recircPortRole; //map key is tuple of map, sai_object_id_t> m_systemPortOidMap; @@ -409,11 +503,32 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_tableVoqSystemLagMemberTable; void voqSyncAddLag(Port &lag); void voqSyncDelLag(Port &lag); - void voqSyncAddLagMember(Port &lag, Port &port); + void voqSyncAddLagMember(Port &lag, Port &port, string status); void voqSyncDelLagMember(Port &lag, Port &port); unique_ptr m_lagIdAllocator; set m_macsecEnabledPorts; std::unordered_set generateCounterStats(const string& type, bool gearbox = false); + map m_queueInfo; + +private: + void initializeCpuPort(); + void initializePorts(); + + auto getPortConfigState() const -> port_config_state_t; + void setPortConfigState(port_config_state_t value); + + bool addPortBulk(const std::vector &portList); + bool removePortBulk(const std::vector &portList); + +private: + // Port config aggregator + std::unordered_map> m_portConfigMap; + + // Port OA capabilities + PortCapabilities m_portCap; + + // Port OA helper + PortHelper m_portHlpr; }; #endif /* SWSS_PORTSORCH_H */ diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index c6d7bff842..90fc6fc766 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -28,6 +28,9 @@ extern PortsOrch *gPortsOrch; extern QosOrch *gQosOrch; extern sai_object_id_t gSwitchId; extern CrmOrch *gCrmOrch; +extern string gMySwitchType; +extern string gMyHostName; +extern string gMyAsicName; map ecn_map = { {"ecn_none", SAI_ECN_MARK_MODE_NONE}, @@ -46,12 +49,20 @@ enum { RED_DROP_PROBABILITY_SET = (1U << 2) }; +enum { + GREEN_WRED_ENABLED = (1U << 0), + YELLOW_WRED_ENABLED = (1U << 1), + RED_WRED_ENABLED = (1U << 2) +}; + // field_name is what is expected in CONFIG_DB PORT_QOS_MAP table map qos_to_attr_map = { {dscp_to_tc_field_name, SAI_PORT_ATTR_QOS_DSCP_TO_TC_MAP}, {mpls_tc_to_tc_field_name, SAI_PORT_ATTR_QOS_MPLS_EXP_TO_TC_MAP}, {dot1p_to_tc_field_name, SAI_PORT_ATTR_QOS_DOT1P_TO_TC_MAP}, {tc_to_queue_field_name, SAI_PORT_ATTR_QOS_TC_TO_QUEUE_MAP}, + {tc_to_dot1p_field_name, SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DOT1P_MAP}, + {tc_to_dscp_field_name, SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP}, {tc_to_pg_map_field_name, SAI_PORT_ATTR_QOS_TC_TO_PRIORITY_GROUP_MAP}, {pfc_to_pg_map_name, SAI_PORT_ATTR_QOS_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP}, {pfc_to_queue_map_name, SAI_PORT_ATTR_QOS_PFC_PRIORITY_TO_QUEUE_MAP}, @@ -66,21 +77,22 @@ map scheduler_meter_map = { }; type_map QosOrch::m_qos_maps = { - {CFG_DSCP_TO_TC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_DOT1P_TO_TC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_TC_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_SCHEDULER_TABLE_NAME, new object_reference_map()}, - {CFG_WRED_PROFILE_TABLE_NAME, new object_reference_map()}, - {CFG_PORT_QOS_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_QUEUE_TABLE_NAME, new object_reference_map()}, - {CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()}, - {APP_TUNNEL_DECAP_TABLE_NAME, new object_reference_map()} + {CFG_DSCP_TO_TC_MAP_TABLE_NAME, make_shared()}, + {CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, make_shared()}, + {CFG_DOT1P_TO_TC_MAP_TABLE_NAME, make_shared()}, + {CFG_TC_TO_QUEUE_MAP_TABLE_NAME, make_shared()}, + {CFG_SCHEDULER_TABLE_NAME, make_shared()}, + {CFG_WRED_PROFILE_TABLE_NAME, make_shared()}, + {CFG_PORT_QOS_MAP_TABLE_NAME, make_shared()}, + {CFG_QUEUE_TABLE_NAME, make_shared()}, + {CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, make_shared()}, + {CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, make_shared()}, + {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, make_shared()}, + {CFG_DSCP_TO_FC_MAP_TABLE_NAME, make_shared()}, + {CFG_EXP_TO_FC_MAP_TABLE_NAME, make_shared()}, + {CFG_TC_TO_DOT1P_MAP_TABLE_NAME, make_shared()}, + {CFG_TC_TO_DSCP_MAP_TABLE_NAME, make_shared()}, + {APP_TUNNEL_DECAP_TABLE_NAME, make_shared()} }; map qos_to_ref_table_map = { @@ -88,6 +100,8 @@ map qos_to_ref_table_map = { {mpls_tc_to_tc_field_name, CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME}, {dot1p_to_tc_field_name, CFG_DOT1P_TO_TC_MAP_TABLE_NAME}, {tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME}, + {tc_to_dot1p_field_name, CFG_TC_TO_DOT1P_MAP_TABLE_NAME}, + {tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, {tc_to_pg_map_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, {pfc_to_pg_map_name, CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, {pfc_to_queue_map_name, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME}, @@ -172,7 +186,7 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFiel } if (!removeQosItem(sai_object)) { - SWSS_LOG_ERROR("Failed to remove dscp_to_tc map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); + SWSS_LOG_ERROR("Failed to remove QoS map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); return task_process_status::task_failed; } auto it_to_delete = (QosOrch::getTypeMap()[qos_map_type_name])->find(qos_object_name); @@ -464,6 +478,60 @@ task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer, KeyOpField return tc_queue_handler.processWorkItem(consumer, tuple); } +//Functions for TC-to-DOT1P qos map handling +bool TcToDot1pMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_attribute_t list_attr; + sai_qos_map_list_t tc_map_list; + tc_map_list.count = (uint32_t)kfvFieldsValues(tuple).size(); + tc_map_list.list = new sai_qos_map_t[tc_map_list.count](); + uint32_t ind = 0; + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++) + { + tc_map_list.list[ind].key.tc = (uint8_t)stoi(fvField(*i)); + tc_map_list.list[ind].value.dot1p = (uint8_t)stoi(fvValue(*i)); + } + list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + list_attr.value.qosmap.count = tc_map_list.count; + list_attr.value.qosmap.list = tc_map_list.list; + attributes.push_back(list_attr); + return true; +} + +sai_object_id_t TcToDot1pMapHandler::addQosItem(const vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_status_t sai_status; + sai_object_id_t sai_object; + vector qos_map_attrs; + sai_attribute_t qos_map_attr; + + qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE; + qos_map_attr.value.s32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DOT1P; + qos_map_attrs.push_back(qos_map_attr); + + qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count; + qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list; + qos_map_attrs.push_back(qos_map_attr); + + sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create tc_to_dot1p qos map. status:%d", sai_status); + return SAI_NULL_OBJECT_ID; + } + return sai_object; +} + +task_process_status QosOrch::handleTcToDot1pTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + TcToDot1pMapHandler tc_dot1p_handler; + return tc_dot1p_handler.processWorkItem(consumer, tuple); +} + void WredMapHandler::freeAttribResources(vector &attributes) { SWSS_LOG_ENTER(); @@ -720,6 +788,7 @@ sai_object_id_t WredMapHandler::addQosItem(const vector &attrib sai_attribute_t attr; vector attrs; uint8_t drop_prob_set = 0; + uint8_t wred_enable_set = 0; attr.id = SAI_WRED_ATTR_WEIGHT; attr.value.s32 = 0; @@ -729,32 +798,53 @@ sai_object_id_t WredMapHandler::addQosItem(const vector &attrib { attrs.push_back(attrib); - if (attrib.id == SAI_WRED_ATTR_GREEN_DROP_PROBABILITY) + switch (attrib.id) { + case SAI_WRED_ATTR_GREEN_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= GREEN_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_YELLOW_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= YELLOW_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_RED_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= RED_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_GREEN_DROP_PROBABILITY: drop_prob_set |= GREEN_DROP_PROBABILITY_SET; - } - else if (attrib.id == SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY) - { + break; + case SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY: drop_prob_set |= YELLOW_DROP_PROBABILITY_SET; - } - else if (attrib.id == SAI_WRED_ATTR_RED_DROP_PROBABILITY) - { + break; + case SAI_WRED_ATTR_RED_DROP_PROBABILITY: drop_prob_set |= RED_DROP_PROBABILITY_SET; + break; + default: + break; } } - if (!(drop_prob_set & GREEN_DROP_PROBABILITY_SET)) + + if (!(drop_prob_set & GREEN_DROP_PROBABILITY_SET) && (wred_enable_set & GREEN_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_GREEN_DROP_PROBABILITY; attr.value.s32 = 100; attrs.push_back(attr); } - if (!(drop_prob_set & YELLOW_DROP_PROBABILITY_SET)) + if (!(drop_prob_set & YELLOW_DROP_PROBABILITY_SET) && (wred_enable_set & YELLOW_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY; attr.value.s32 = 100; attrs.push_back(attr); } - if (!(drop_prob_set & RED_DROP_PROBABILITY_SET)) + if (!(drop_prob_set & RED_DROP_PROBABILITY_SET) && (wred_enable_set & RED_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_RED_DROP_PROBABILITY; attr.value.s32 = 100; @@ -829,7 +919,7 @@ sai_object_id_t TcToPgHandler::addQosItem(const vector &attribu sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create tc_to_pg map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; @@ -883,7 +973,7 @@ sai_object_id_t PfcPrioToPgHandler::addQosItem(const vector &at sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create pfc_priority_to_queue map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; @@ -938,7 +1028,7 @@ sai_object_id_t PfcToQueueHandler::addQosItem(const vector &att sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create pfc_priority_to_queue map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; @@ -1246,6 +1336,7 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_DSCP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleDscpToFcTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_EXP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleExpToFcTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAME, &QosOrch::handleTcToDscpTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DOT1P_MAP_TABLE_NAME, &QosOrch::handleTcToDot1pTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable)); @@ -1310,11 +1401,6 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpField attr.value.u8 = (uint8_t)stoi(fvValue(*i)); sai_attr_list.push_back(attr); } - else if (fvField(*i) == scheduler_priority_field_name) - { - // TODO: The meaning is to be able to adjust priority of the given scheduler group. - // However currently SAI model does not provide such ability. - } else if (fvField(*i) == scheduler_meter_type_field_name) { sai_meter_type_t meter_value = scheduler_meter_map.at(fvValue(*i)); @@ -1543,22 +1629,58 @@ sai_object_id_t QosOrch::getSchedulerGroup(const Port &port, const sai_object_id bool QosOrch::applySchedulerToQueueSchedulerGroup(Port &port, size_t queue_ind, sai_object_id_t scheduler_profile_id) { SWSS_LOG_ENTER(); + sai_object_id_t queue_id; + Port input_port = port; + sai_object_id_t group_id = 0; - if (port.m_queue_ids.size() <= queue_ind) + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); - return false; - } + if(port.m_system_port_info.type == SAI_SYSTEM_PORT_TYPE_REMOTE) + { + return true; + } + + // Get local port from system port. port is pointing to local port now + if (!gPortsOrch->getPort(port.m_system_port_info.local_port_oid, port)) + { + SWSS_LOG_ERROR("Port with alias:%s not found", port.m_alias.c_str()); + return task_process_status::task_invalid_entry; + } - const sai_object_id_t queue_id = port.m_queue_ids[queue_ind]; + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; + + group_id = getSchedulerGroup(port, queue_id); + if(group_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); + return false; + } - const sai_object_id_t group_id = getSchedulerGroup(port, queue_id); - if(group_id == SAI_NULL_OBJECT_ID) + // port is set back to system port + port = input_port; + } + else { - SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); - return false; + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; + + group_id = getSchedulerGroup(port, queue_id); + if(group_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); + return false; + } } - + /* Apply scheduler profile to all port groups */ sai_attribute_t attr; sai_status_t sai_status; @@ -1589,12 +1711,25 @@ bool QosOrch::applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_i sai_status_t sai_status; sai_object_id_t queue_id; - if (port.m_queue_ids.size() <= queue_ind) + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); - return false; + std :: vector queue_ids = gPortsOrch->getPortVoQIds(port); + if (queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid voq index specified:%zd", queue_ind); + return task_process_status::task_invalid_entry; + } + queue_id = queue_ids[queue_ind]; + } + else + { + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; } - queue_id = port.m_queue_ids[queue_ind]; attr.id = SAI_QUEUE_ATTR_WRED_PROFILE_ID; attr.value.oid = sai_wred_profile; @@ -1620,23 +1755,54 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal string op = kfvOp(tuple); size_t queue_ind = 0; vector tokens; + bool local_port = false; + string local_port_name; sai_uint32_t range_low, range_high; vector port_names; ref_resolve_status resolve_result; - // sample "QUEUE: {Ethernet4|0-1}" + /* + Input sample "QUEUE : {Ethernet4|0-1}" or + "QUEUE : {STG01-0101-0400-01T2-LC6|ASIC0|Ethernet4|0-1}" + */ tokens = tokenize(key, config_db_key_delimiter); - if (tokens.size() != 2) + + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); - return task_process_status::task_invalid_entry; + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 4 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + if (!parseIndexRange(tokens[3], range_low, range_high)) + { + SWSS_LOG_ERROR("Failed to parse range:%s", tokens[3].c_str()); + return task_process_status::task_invalid_entry; + } + + if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) + { + local_port = true; + local_port_name = tokens[2]; + SWSS_LOG_INFO("System port %s is local port %d local port name %s", port_names[0].c_str(), local_port, local_port_name.c_str()); + } } - port_names = tokenize(tokens[0], list_item_delimiter); - if (!parseIndexRange(tokens[1], range_low, range_high)) + else { - SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); - return task_process_status::task_invalid_entry; + if (tokens.size() != 2) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + port_names = tokenize(tokens[0], list_item_delimiter); + if (!parseIndexRange(tokens[1], range_low, range_high)) + { + SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); + return task_process_status::task_invalid_entry; + } } bool donotChangeScheduler = false; @@ -1730,6 +1896,12 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal { Port port; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); + + if(local_port == true) + { + port_name = local_port_name; + } + if (!gPortsOrch->getPort(port_name, port)) { SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); @@ -2054,12 +2226,13 @@ void QosOrch::doTask() SWSS_LOG_ENTER(); auto *port_qos_map_cfg_exec = getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME); + auto *queue_exec = getExecutor(CFG_QUEUE_TABLE_NAME); for (const auto &it : m_consumerMap) { auto *exec = it.second.get(); - if (exec == port_qos_map_cfg_exec) + if (exec == port_qos_map_cfg_exec || exec == queue_exec) { continue; } @@ -2068,6 +2241,7 @@ void QosOrch::doTask() } port_qos_map_cfg_exec->drain(); + queue_exec->drain(); } void QosOrch::doTask(Consumer &consumer) diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index b5e2e1ad86..8079e45bc0 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -17,6 +17,8 @@ const string pfc_enable_name = "pfc_enable"; const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; +const string tc_to_dot1p_field_name = "tc_to_dot1p_map"; +const string tc_to_dscp_field_name = "tc_to_dscp_map"; const string scheduler_field_name = "scheduler"; const string red_max_threshold_field_name = "red_max_threshold"; const string red_min_threshold_field_name = "red_min_threshold"; @@ -44,7 +46,6 @@ const string scheduler_algo_DWRR = "DWRR"; const string scheduler_algo_WRR = "WRR"; const string scheduler_algo_STRICT = "STRICT"; const string scheduler_weight_field_name = "weight"; -const string scheduler_priority_field_name = "priority"; const string scheduler_meter_type_field_name = "meter_type"; const string scheduler_min_bandwidth_rate_field_name = "cir";//Committed Information Rate const string scheduler_min_bandwidth_burst_rate_field_name = "cbs";//Committed Burst Size @@ -176,6 +177,13 @@ class TcToDscpMapHandler : public QosMapHandler sai_object_id_t addQosItem(const vector &attributes) override; }; +class TcToDot1pMapHandler : public QosMapHandler +{ +public: + bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes); + sai_object_id_t addQosItem(const vector &attributes); +}; + class QosOrch : public Orch { public: @@ -210,6 +218,7 @@ class QosOrch : public Orch task_process_status handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); task_process_status handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToDot1pTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); task_process_status handleGlobalQosMap(const string &op, KeyOpFieldsValuesTuple &tuple); diff --git a/orchagent/response_publisher.cpp b/orchagent/response_publisher.cpp index 5d0490167c..d5b94a586d 100644 --- a/orchagent/response_publisher.cpp +++ b/orchagent/response_publisher.cpp @@ -5,13 +5,6 @@ #include #include -#include "timestamp.h" - -extern bool gResponsePublisherRecord; -extern bool gResponsePublisherLogRotate; -extern std::ofstream gResponsePublisherRecordOfs; -extern std::string gResponsePublisherRecordFile; - namespace { @@ -35,27 +28,10 @@ std::string PrependedComponent(const ReturnCode &status) return kOrchagentComponent; } -void PerformLogRotate() -{ - if (!gResponsePublisherLogRotate) - { - return; - } - gResponsePublisherLogRotate = false; - - gResponsePublisherRecordOfs.close(); - gResponsePublisherRecordOfs.open(gResponsePublisherRecordFile); - if (!gResponsePublisherRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to reopen Response Publisher record file %s: %s", gResponsePublisherRecordFile.c_str(), - strerror(errno)); - } -} - void RecordDBWrite(const std::string &table, const std::string &key, const std::vector &attrs, const std::string &op) { - if (!gResponsePublisherRecord) + if (!swss::Recorder::Instance().respub.isRecord()) { return; } @@ -66,14 +42,13 @@ void RecordDBWrite(const std::string &table, const std::string &key, const std:: s += "|" + fvField(attr) + ":" + fvValue(attr); } - PerformLogRotate(); - gResponsePublisherRecordOfs << swss::getTimestamp() << "|" << s << std::endl; + swss::Recorder::Instance().respub.record(s); } void RecordResponse(const std::string &response_channel, const std::string &key, const std::vector &attrs, const std::string &status) { - if (!gResponsePublisherRecord) + if (!swss::Recorder::Instance().respub.isRecord()) { return; } @@ -84,13 +59,14 @@ void RecordResponse(const std::string &response_channel, const std::string &key, s += "|" + fvField(attr) + ":" + fvValue(attr); } - PerformLogRotate(); - gResponsePublisherRecordOfs << swss::getTimestamp() << "|" << s << std::endl; + swss::Recorder::Instance().respub.record(s); } } // namespace -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) +ResponsePublisher::ResponsePublisher(bool buffered) + : m_db(std::make_unique("APPL_STATE_DB", 0)), + m_pipe(std::make_unique(m_db.get())), m_buffered(buffered) { } @@ -107,17 +83,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key } std::string response_channel = "APPL_DB_" + table + "_RESPONSE_CHANNEL"; - if (m_notifiers.find(table) == m_notifiers.end()) - { - m_notifiers[table] = std::make_unique(&m_db, response_channel); - } + swss::NotificationProducer notificationProducer{m_pipe.get(), response_channel, m_buffered}; auto intent_attrs_copy = intent_attrs; // Add error message as the first field-value-pair. swss::FieldValueTuple err_str("err_str", PrependedComponent(status) + status.message()); intent_attrs_copy.insert(intent_attrs_copy.begin(), err_str); // Sends the response to the notification channel. - m_notifiers[table]->send(status.codeStr(), key, intent_attrs_copy); + notificationProducer.send(status.codeStr(), key, intent_attrs_copy); RecordResponse(response_channel, key, intent_attrs_copy, status.codeStr()); } @@ -140,17 +113,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key void ResponsePublisher::writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace) { - if (m_tables.find(table) == m_tables.end()) - { - m_tables[table] = std::make_unique(&m_db, table); - } + swss::Table applStateTable{m_pipe.get(), table, m_buffered}; auto attrs = values; if (op == SET_COMMAND) { if (replace) { - m_tables[table]->del(key); + applStateTable.del(key); } if (!values.size()) { @@ -160,9 +130,9 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k // Write to DB only if the key does not exist or non-NULL attributes are // being written to the entry. std::vector fv; - if (!m_tables[table]->get(key, fv)) + if (!applStateTable.get(key, fv)) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); return; } @@ -179,13 +149,23 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k } if (attrs.size()) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); } } else if (op == DEL_COMMAND) { - m_tables[table]->del(key); + applStateTable.del(key); RecordDBWrite(table, key, {}, op); } } + +void ResponsePublisher::flush() +{ + m_pipe->flush(); +} + +void ResponsePublisher::setBuffered(bool buffered) +{ + m_buffered = buffered; +} diff --git a/orchagent/response_publisher.h b/orchagent/response_publisher.h index cd688112e8..985532e827 100644 --- a/orchagent/response_publisher.h +++ b/orchagent/response_publisher.h @@ -7,6 +7,7 @@ #include "dbconnector.h" #include "notificationproducer.h" +#include "recorder.h" #include "response_publisher_interface.h" #include "table.h" @@ -16,7 +17,8 @@ class ResponsePublisher : public ResponsePublisherInterface { public: - explicit ResponsePublisher(); + explicit ResponsePublisher(bool buffered = false); + virtual ~ResponsePublisher() = default; // Intent attributes are the attributes sent in the notification into the @@ -42,10 +44,21 @@ class ResponsePublisher : public ResponsePublisherInterface void writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace = false) override; + /** + * @brief Flush pending responses + */ + void flush(); + + /** + * @brief Set buffering mode + * + * @param buffered Flag whether responses are buffered + */ + void setBuffered(bool buffered); + private: - swss::DBConnector m_db; - // Maps table names to tables. - std::unordered_map> m_tables; - // Maps table names to notifiers. - std::unordered_map> m_notifiers; + std::unique_ptr m_db; + std::unique_ptr m_pipe; + + bool m_buffered{false}; }; diff --git a/orchagent/response_publisher_interface.h b/orchagent/response_publisher_interface.h index 92d364a500..094238b826 100644 --- a/orchagent/response_publisher_interface.h +++ b/orchagent/response_publisher_interface.h @@ -5,32 +5,31 @@ #include "return_code.h" #include "table.h" -class ResponsePublisherInterface { - public: - virtual ~ResponsePublisherInterface() = default; +class ResponsePublisherInterface +{ + public: + virtual ~ResponsePublisherInterface() = default; - // Publishes the response status. - // If intent attributes are empty, it is a delete operation. - // What "publish" needs to do is completely up to implementation. - // This API does not include redis DB namespace. So if implementation chooses - // to write to a redis DB, it will need to use a fixed namespace. - // The replace flag indicates the state attributes will replace the old ones. - virtual void publish(const std::string& table, const std::string& key, - const std::vector& intent_attrs, - const ReturnCode& status, - const std::vector& state_attrs, - bool replace = false) = 0; + // Publishes the response status. + // If intent attributes are empty, it is a delete operation. + // What "publish" needs to do is completely up to implementation. + // This API does not include redis DB namespace. So if implementation chooses + // to write to a redis DB, it will need to use a fixed namespace. + // The replace flag indicates the state attributes will replace the old ones. + virtual void publish(const std::string &table, const std::string &key, + const std::vector &intent_attrs, const ReturnCode &status, + const std::vector &state_attrs, bool replace = false) = 0; - // Publishes response status. If response status is OK then also writes the - // intent attributes into the DB. - // The replace flag indicates a replace operation. - virtual void publish(const std::string& table, const std::string& key, - const std::vector& intent_attrs, - const ReturnCode& status, bool replace = false) = 0; + // Publishes response status. If response status is OK then also writes the + // intent attributes into the DB. + // The replace flag indicates a replace operation. + virtual void publish(const std::string &table, const std::string &key, + const std::vector &intent_attrs, const ReturnCode &status, + bool replace = false) = 0; - // Write to DB only. This API does not send notification. - // The replace flag indicates the new attributes will replace the old ones. - virtual void writeToDB(const std::string& table, const std::string& key, - const std::vector& values, - const std::string& op, bool replace = false) = 0; + // Write to DB only. This API does not send notification. + // The replace flag indicates the new attributes will replace the old ones. + virtual void writeToDB(const std::string &table, const std::string &key, + const std::vector &values, const std::string &op, + bool replace = false) = 0; }; diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index a793ab8dcc..dea3d10262 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -6,6 +6,7 @@ #include "cbf/cbfnhgorch.h" #include "logger.h" #include "flowcounterrouteorch.h" +#include "muxorch.h" #include "swssnet.h" #include "crmorch.h" #include "directory.h" @@ -47,6 +48,8 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, { SWSS_LOG_ENTER(); + m_publisher.setBuffered(true); + sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_NUMBER_OF_ECMP_GROUPS; @@ -499,7 +502,7 @@ void RouteOrch::doTask(Consumer& consumer) auto rc = toBulk.emplace(std::piecewise_construct, std::forward_as_tuple(key, op), - std::forward_as_tuple()); + std::forward_as_tuple(key, (op == SET_COMMAND))); bool inserted = rc.second; auto& ctx = rc.first->second; @@ -630,6 +633,11 @@ void RouteOrch::doTask(Consumer& consumer) if (fvField(i) == "seg_src") srv6_source = fvValue(i); + + if (fvField(i) == "protocol") + { + ctx.protocol = fvValue(i); + } } /* @@ -658,6 +666,8 @@ void RouteOrch::doTask(Consumer& consumer) NextHopGroupKey& nhg = ctx.nhg; vector srv6_segv; vector srv6_src; + bool l3Vni = true; + uint32_t vni = 0; /* Check if the next hop group is owned by the NhgOrch. */ if (nhg_index.empty()) @@ -689,6 +699,23 @@ void RouteOrch::doTask(Consumer& consumer) ipv.resize(alsv.size()); } + for (auto &vni_str: vni_labelv) + { + vni = static_cast(std::stoul(vni_str)); + if (!m_vrfOrch->isL3VniVlan(vni)) + { + SWSS_LOG_WARN("Route %s is received on non L3 VNI %s", key.c_str(), vni_str.c_str()); + l3Vni = false; + break; + } + } + + if (!l3Vni) + { + it++; + continue; + } + /* Set the empty ip(s) to zero * as IpAddress("") will construct a incorrect ip. */ for (auto &ip : ipv) @@ -724,6 +751,9 @@ void RouteOrch::doTask(Consumer& consumer) it = consumer.m_toSync.erase(it); else it++; + + /* Publish route state to advertise routes to Loopback interface */ + publishRouteState(ctx); continue; } @@ -777,6 +807,18 @@ void RouteOrch::doTask(Consumer& consumer) } else { + if(ipv.size() != rmacv.size()){ + SWSS_LOG_ERROR("Skip route %s, it has an invalid router mac field %s", key.c_str(), remote_macs.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if(ipv.size() != vni_labelv.size()){ + SWSS_LOG_ERROR("Skip route %s, it has an invalid vni label field %s", key.c_str(), vni_labels.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + for (uint32_t i = 0; i < ipv.size(); i++) { if (i) nhg_str += NHG_DELIMITER; @@ -831,6 +873,10 @@ void RouteOrch::doTask(Consumer& consumer) /* fullmask subnet route is same as ip2me route */ else if (ip_prefix.isFullMask() && m_intfsOrch->isPrefixSubnet(ip_prefix, alsv[0])) { + /* The prefix is full mask (/32 or /128) and it is an interface subnet route, so IntfOrch has already + * created an IP2ME route for it and we skip programming such route here as it already exists. + * However, to keep APPL_DB and APPL_STATE_DB consistent we have to publish it. */ + publishRouteState(ctx); it = consumer.m_toSync.erase(it); } /* subnet route, vrf leaked route, etc */ @@ -860,7 +906,9 @@ void RouteOrch::doTask(Consumer& consumer) } else { - /* Duplicate entry */ + /* Duplicate entry. Publish route state anyway since there could be multiple DEL, SET operations + * consolidated by ConsumerStateTable leading to orchagent receiving only the last SET update. */ + publishRouteState(ctx); it = consumer.m_toSync.erase(it); } @@ -1552,6 +1600,20 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout auto rt = it->second.begin(); while(rt != it->second.end()) { + /* Check if route points to nexthop group and skip */ + NextHopGroupKey nhg_key = gRouteOrch->getSyncdRouteNhgKey(gVirtualRouterId, (*rt).prefix); + if (nhg_key.getSize() > 1) + { + /* multiple mux nexthop case: + * skip for now, muxOrch::updateRoute() will handle route + */ + SWSS_LOG_INFO("Route %s is mux multi nexthop route, skipping.", + (*rt).prefix.to_string().c_str()); + + ++rt; + continue; + } + SWSS_LOG_INFO("Updating route %s", (*rt).prefix.to_string().c_str()); next_hop_id = m_neighOrch->getNextHopId(nextHop); @@ -1580,6 +1642,24 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout return true; } +/** + * @brief returns a route prefix associated with nexthopkey + * @param routeKeys empty set of routekeys to populate + * @param nexthopKey nexthop key to lookup + * @return true if found, false if not found. + */ +bool RouteOrch::getRoutesForNexthop(std::set& routeKeys, const NextHopKey& nexthopKey) +{ + auto it = m_nextHops.find(nexthopKey); + + if (it != m_nextHops.end()) + { + routeKeys = it->second; + } + + return it != m_nextHops.end(); +} + void RouteOrch::addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) { SWSS_LOG_ENTER(); @@ -2016,6 +2096,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey auto it_status = object_statuses.begin(); auto it_route = m_syncdRoutes.at(vrf_id).find(ipPrefix); + MuxOrch* mux_orch = gDirectory.get(); if (isFineGrained) { if (it_route == m_syncdRoutes.at(vrf_id).end()) @@ -2145,15 +2226,35 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; - if (ol_nextHops.getSize() > 1 - && m_syncdNextHopGroups[ol_nextHops].ref_count == 0) + if (ol_nextHops.getSize() > 1) { - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, 0); + if (m_syncdNextHopGroups[ol_nextHops].ref_count == 0) + { + SWSS_LOG_NOTICE("Update Nexthop Group %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, 0); + } + if (mux_orch->isMuxNexthops(ol_nextHops)) + { + SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = ol_nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + removeNextHopRoute(*nh, routekey); + } + } + } } else if (ol_nextHops.is_overlay_nexthop()) { - SWSS_LOG_NOTICE("Update overlay Nexthop %s", ol_nextHops.to_string().c_str()); - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + const NextHopKey& nexthop = *it_route->second.nhg_key.getNextHops().begin(); + if (m_neighOrch->getNextHopRefCount(nexthop) == 0) + { + SWSS_LOG_NOTICE("Update overlay Nexthop %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + } } else if (ol_nextHops.is_srv6_nexthop()) { @@ -2211,6 +2312,18 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey addNextHopRoute(nexthop, r_key); } } + else if (mux_orch->isMuxNexthops(nextHops)) + { + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + addNextHopRoute(*nh, routekey); + } + } + } if (ipPrefix.isDefaultRoute()) { @@ -2224,8 +2337,17 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index); + // update routes to reflect mux state + if (mux_orch->isMuxNexthops(nextHops)) + { + mux_orch->updateRoute(ipPrefix, true); + } + notifyNextHopChangeObservers(vrf_id, ipPrefix, nextHops, true); + /* Publish and update APPL STATE DB route entry programming status */ + publishRouteState(ctx); + /* * If the route uses a temporary synced NHG owned by NhgOrch, return false * in order to keep trying to update the route in case the NHG is updated, @@ -2383,16 +2505,38 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; - - if (it_route->second.nhg_key.getSize() > 1 - && m_syncdNextHopGroups[it_route->second.nhg_key].ref_count == 0) + MuxOrch* mux_orch = gDirectory.get(); + if (it_route->second.nhg_key.getSize() > 1) { - m_bulkNhgReducedRefCnt.emplace(it_route->second.nhg_key, 0); + if (m_syncdNextHopGroups[it_route->second.nhg_key].ref_count == 0) + { + SWSS_LOG_NOTICE("Remove Nexthop Group %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(it_route->second.nhg_key, 0); + } + if (mux_orch->isMuxNexthops(ol_nextHops)) + { + SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = ol_nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + SWSS_LOG_NOTICE("removeNextHopRoute"); + removeNextHopRoute(*nh, routekey); + } + } + mux_orch->updateRoute(ipPrefix, false); + } } else if (ol_nextHops.is_overlay_nexthop()) { - SWSS_LOG_NOTICE("Remove overlay Nexthop %s", ol_nextHops.to_string().c_str()); - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + const NextHopKey& nexthop = *it_route->second.nhg_key.getNextHops().begin(); + if (m_neighOrch->getNextHopRefCount(nexthop) == 0) + { + SWSS_LOG_NOTICE("Remove overlay Nexthop %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + } } /* * Additionally check if the NH has label and its ref count == 0, then @@ -2420,6 +2564,9 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) SWSS_LOG_INFO("Remove route %s with next hop(s) %s", ipPrefix.to_string().c_str(), it_route->second.nhg_key.to_string().c_str()); + /* Publish removal status, removes route entry from APPL STATE DB */ + publishRouteState(ctx); + if (ipPrefix.isDefaultRoute() && vrf_id == gVirtualRouterId) { it_route_table->second[ipPrefix] = RouteNhg(); @@ -2429,7 +2576,6 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) } else { - gFlowCounterRouteOrch->handleRouteRemove(vrf_id, ipPrefix); it_route_table->second.erase(ipPrefix); /* Notify about the route next hop removal */ @@ -2440,6 +2586,8 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) m_syncdRoutes.erase(vrf_id); m_vrfOrch->decreaseVrfRefCount(vrf_id); } + + gFlowCounterRouteOrch->handleRouteRemove(vrf_id, ipPrefix); } return true; @@ -2574,3 +2722,22 @@ void RouteOrch::decNhgRefCount(const std::string &nhg_index) gCbfNhgOrch->decNhgRefCount(nhg_index); } } + +void RouteOrch::publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status) +{ + SWSS_LOG_ENTER(); + + std::vector fvs; + + /* Leave the fvs empty if the operation type is "DEL". + * An empty fvs makes ResponsePublisher::publish() remove the state entry from APPL_STATE_DB + */ + if (ctx.is_set) + { + fvs.emplace_back("protocol", ctx.protocol); + } + + const bool replace = false; + + m_publisher.publish(APP_ROUTE_TABLE_NAME, ctx.key, fvs, status, replace); +} diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 5f297c6a0e..b232137766 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -122,8 +122,12 @@ struct RouteBulkContext // using_temp_nhg will track if the NhgOrch's owned NHG is temporary or not bool using_temp_nhg; - RouteBulkContext() - : excp_intfs_flag(false), using_temp_nhg(false) + std::string key; // Key in database table + std::string protocol; // Protocol string + bool is_set; // True if set operation + + RouteBulkContext(const std::string& key, bool is_set) + : key(key), excp_intfs_flag(false), using_temp_nhg(false), is_set(is_set) { } @@ -139,6 +143,8 @@ struct RouteBulkContext excp_intfs_flag = false; vrf_id = SAI_NULL_OBJECT_ID; using_temp_nhg = false; + key.clear(); + protocol.clear(); } }; @@ -195,6 +201,7 @@ class RouteOrch : public Orch, public Subject void addNextHopRoute(const NextHopKey&, const RouteKey&); void removeNextHopRoute(const NextHopKey&, const RouteKey&); bool updateNextHopRoutes(const NextHopKey&, uint32_t&); + bool getRoutesForNexthop(std::set&, const NextHopKey&); bool validnexthopinNextHopGroup(const NextHopKey&, uint32_t&); bool invalidnexthopinNextHopGroup(const NextHopKey&, uint32_t&); @@ -269,6 +276,8 @@ class RouteOrch : public Orch, public Subject const NhgBase &getNhg(const std::string& nhg_index); void incNhgRefCount(const std::string& nhg_index); void decNhgRefCount(const std::string& nhg_index); + + void publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status = ReturnCode(SAI_STATUS_SUCCESS)); }; #endif /* SWSS_ROUTEORCH_H */ diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index b216e0dccd..d731b7b8ac 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -72,12 +72,19 @@ sai_l2mc_group_api_t* sai_l2mc_group_api; sai_counter_api_t* sai_counter_api; sai_bfd_api_t* sai_bfd_api; sai_my_mac_api_t* sai_my_mac_api; +sai_generic_programmable_api_t* sai_generic_programmable_api; +sai_dash_acl_api_t* sai_dash_acl_api; +sai_dash_vnet_api_t sai_dash_vnet_api; +sai_dash_outbound_ca_to_pa_api_t* sai_dash_outbound_ca_to_pa_api; +sai_dash_pa_validation_api_t * sai_dash_pa_validation_api; +sai_dash_outbound_routing_api_t* sai_dash_outbound_routing_api; +sai_dash_inbound_routing_api_t* sai_dash_inbound_routing_api; +sai_dash_eni_api_t* sai_dash_eni_api; +sai_dash_vip_api_t* sai_dash_vip_api; +sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; +sai_twamp_api_t* sai_twamp_api; extern sai_object_id_t gSwitchId; -extern bool gSairedisRecord; -extern bool gSwssRecord; -extern ofstream gRecordOfs; -extern string gRecordFile; static map hardware_access_map = { @@ -201,6 +208,17 @@ void initSaiApi() sai_api_query(SAI_API_COUNTER, (void **)&sai_counter_api); sai_api_query(SAI_API_BFD, (void **)&sai_bfd_api); sai_api_query(SAI_API_MY_MAC, (void **)&sai_my_mac_api); + sai_api_query(SAI_API_GENERIC_PROGRAMMABLE, (void **)&sai_generic_programmable_api); + sai_api_query((sai_api_t)SAI_API_DASH_ACL, (void**)&sai_dash_acl_api); + sai_api_query((sai_api_t)SAI_API_DASH_VNET, (void**)&sai_dash_vnet_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_CA_TO_PA, (void**)&sai_dash_outbound_ca_to_pa_api); + sai_api_query((sai_api_t)SAI_API_DASH_PA_VALIDATION, (void**)&sai_dash_pa_validation_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_ROUTING, (void**)&sai_dash_outbound_routing_api); + sai_api_query((sai_api_t)SAI_API_DASH_INBOUND_ROUTING, (void**)&sai_dash_inbound_routing_api); + sai_api_query((sai_api_t)SAI_API_DASH_ENI, (void**)&sai_dash_eni_api); + sai_api_query((sai_api_t)SAI_API_DASH_VIP, (void**)&sai_dash_vip_api); + sai_api_query((sai_api_t)SAI_API_DASH_DIRECTION_LOOKUP, (void**)&sai_dash_direction_lookup_api); + sai_api_query(SAI_API_TWAMP, (void **)&sai_twamp_api); sai_log_set(SAI_API_SWITCH, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BRIDGE, SAI_LOG_LEVEL_NOTICE); @@ -239,9 +257,11 @@ void initSaiApi() sai_log_set(SAI_API_COUNTER, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BFD, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_MY_MAC, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_GENERIC_PROGRAMMABLE, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_TWAMP, SAI_LOG_LEVEL_NOTICE); } -void initSaiRedis(const string &record_location, const std::string &record_filename) +void initSaiRedis() { /** * NOTE: Notice that all Redis attributes here are using SAI_NULL_OBJECT_ID @@ -252,9 +272,11 @@ void initSaiRedis(const string &record_location, const std::string &record_filen sai_attribute_t attr; sai_status_t status; - /* set recording dir before enable recording */ + auto record_filename = Recorder::Instance().sairedis.getFile(); + auto record_location = Recorder::Instance().sairedis.getLoc(); - if (gSairedisRecord) + /* set recording dir before enable recording */ + if (Recorder::Instance().sairedis.isRecord()) { attr.id = SAI_REDIS_SWITCH_ATTR_RECORDING_OUTPUT_DIR; attr.value.s8list.count = (uint32_t)record_location.size(); @@ -283,15 +305,14 @@ void initSaiRedis(const string &record_location, const std::string &record_filen } /* Disable/enable SAI Redis recording */ - attr.id = SAI_REDIS_SWITCH_ATTR_RECORD; - attr.value.booldata = gSairedisRecord; + attr.value.booldata = Recorder::Instance().sairedis.isRecord(); status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to %s SAI Redis recording, rv:%d", - gSairedisRecord ? "enable" : "disable", status); + Recorder::Instance().sairedis.isRecord() ? "enable" : "disable", status); exit(EXIT_FAILURE); } @@ -462,3 +483,323 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) return status; } +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis create + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (api) + { + case SAI_API_FDB: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + /* + * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. + * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, + * and orchagent should ignore the error and treat it as entry was explicitly created. + */ + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_HOSTIF: + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_FAILURE: + /* + * Host interface maybe failed due to lane not available. + * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, + * So just ignore the failure and report an error log. + */ + return task_ignore; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_ROUTE: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + case SAI_STATUS_NOT_EXECUTED: + /* With VNET routes, the same route can be learned via multiple + sources, like via BGP. Handle this gracefully */ + return task_success; + case SAI_STATUS_TABLE_FULL: + return task_need_retry; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_NEIGHBOR: + case SAI_API_NEXT_HOP: + case SAI_API_NEXT_HOP_GROUP: + switch(status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + return task_success; + case SAI_STATUS_TABLE_FULL: + return task_need_retry; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + } + return task_need_retry; +} + +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis set + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + if (status == SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); + return task_success; + } + + switch (api) + { + case SAI_API_PORT: + switch (status) + { + case SAI_STATUS_INVALID_ATTR_VALUE_0: + /* + * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task + * and let user correct the configuration. + */ + SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_TUNNEL: + switch (status) + { + case SAI_STATUS_ATTR_NOT_SUPPORTED_0: + SWSS_LOG_ERROR("Encountered SAI_STATUS_ATTR_NOT_SUPPORTED_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_BUFFER: + switch (status) + { + case SAI_STATUS_INSUFFICIENT_RESOURCES: + SWSS_LOG_ERROR("Encountered SAI_STATUS_INSUFFICIENT_RESOURCES in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + + return task_need_retry; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis remove + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, + * SAI_STATUS_ITEM_NOT_FOUND) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (api) + { + case SAI_API_ROUTE: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + case SAI_STATUS_NOT_EXECUTED: + /* When the same route is learned via multiple sources, + there can be a duplicate remove operation. Handle this gracefully */ + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_NEIGHBOR: + case SAI_API_NEXT_HOP: + case SAI_API_NEXT_HOP_GROUP: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + } + return task_need_retry; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis get + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); + return task_success; + case SAI_STATUS_NOT_IMPLEMENTED: + SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", + sai_serialize_api(api).c_str()); + throw std::logic_error("SAI get function not implemented"); + default: + SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + } + return task_failed; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + /* + * This function parses task process status from SAI failure handling function to whether a retry is needed. + * Return value: true - no retry is needed. + * false - retry is needed. + */ + switch (status) + { + case task_need_retry: + return false; + case task_failed: + return true; + default: + SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); + } + return true; +} + +/* Handling SAI failure. Request redis to invoke SAI failure dump and abort if set*/ +void handleSaiFailure(bool abort_on_failure) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD; + attr.value.s32 = SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to take sai failure dump %d", status); + } + if (abort_on_failure) + { + abort(); + } +} diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index a0b2aa2fac..b83f894c2e 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -3,10 +3,19 @@ #include "gearboxutils.h" #include +#include "orch.h" #define IS_ATTR_ID_IN_RANGE(attrId, objectType, attrPrefix) \ ((attrId) >= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _START && (attrId) <= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _END) void initSaiApi(); -void initSaiRedis(const std::string &record_location, const std::string &record_filename); +void initSaiRedis(); sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy); + +/* Handling SAI status*/ +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +bool parseHandleSaiStatusFailure(task_process_status status); +void handleSaiFailure(bool abort_on_failure); diff --git a/orchagent/sfloworch.cpp b/orchagent/sfloworch.cpp index ac76d23004..2ec367b412 100644 --- a/orchagent/sfloworch.cpp +++ b/orchagent/sfloworch.cpp @@ -83,7 +83,7 @@ bool SflowOrch::sflowUpdateRate(sai_object_id_t port_id, uint32_t rate) if (port_info->second.admin_state) { - if (!sflowAddPort(new_session.m_sample_id, port_id)) + if (!sflowAddPort(new_session.m_sample_id, port_id, port_info->second.m_sample_dir)) { return false; } @@ -107,49 +107,155 @@ bool SflowOrch::sflowUpdateRate(sai_object_id_t port_id, uint32_t rate) return true; } -bool SflowOrch::sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id) +bool SflowOrch::sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id, string direction) { sai_attribute_t attr; sai_status_t sai_rc; - attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; - attr.value.oid = sample_id; - sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + SWSS_LOG_DEBUG("sflowAddPort %" PRIx64 " portOid %" PRIx64 " dir %s", + sample_id, port_id, direction.c_str()); - if (sai_rc != SAI_STATUS_SUCCESS) + if (direction == "both" || direction == "rx") { - SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64 , sample_id, port_id); - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); - if (handle_status != task_success) + attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = sample_id; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64, sample_id, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (direction == "both" || direction == "tx") + { + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = sample_id; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64, sample_id, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + return true; +} + +bool SflowOrch::sflowDelPort(sai_object_id_t port_id, string direction) +{ + sai_attribute_t attr; + sai_status_t sai_rc; + + SWSS_LOG_DEBUG("sflowDelPort portOid %" PRIx64 " dir %s", + port_id, direction.c_str()); + + if (direction == "both" || direction == "rx") + { + attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = SAI_NULL_OBJECT_ID; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (direction == "both" || direction == "tx") + { + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = SAI_NULL_OBJECT_ID; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } } return true; } -bool SflowOrch::sflowDelPort(sai_object_id_t port_id) +bool SflowOrch::sflowUpdateSampleDirection(sai_object_id_t port_id, string old_dir, string new_dir) { + sai_object_id_t ing_sample_oid = SAI_NULL_OBJECT_ID; + sai_object_id_t egr_sample_oid = SAI_NULL_OBJECT_ID; sai_attribute_t attr; sai_status_t sai_rc; + auto port_info = m_sflowPortInfoMap.find(port_id); + + SWSS_LOG_DEBUG("sflowUpdateSampleDirection portOid %" PRIx64 " old dir %s new dir %s", + port_id, old_dir.c_str(), new_dir.c_str()); + + if ((new_dir == "tx") && (old_dir == "rx" || old_dir == "both")) + { + ing_sample_oid = SAI_NULL_OBJECT_ID; + egr_sample_oid = port_info->second.m_sample_id; + } + + if ((new_dir == "rx") && (old_dir == "tx" || old_dir == "both")) + { + ing_sample_oid = port_info->second.m_sample_id; + egr_sample_oid = SAI_NULL_OBJECT_ID; + } + + if ((new_dir == "both") && (old_dir == "tx" || old_dir == "rx")) + { + ing_sample_oid = port_info->second.m_sample_id; + egr_sample_oid = port_info->second.m_sample_id; + } attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; - attr.value.oid = SAI_NULL_OBJECT_ID; + attr.value.oid = ing_sample_oid; sai_rc = sai_port_api->set_port_attribute(port_id, &attr); if (sai_rc != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64 , port_id); + SWSS_LOG_ERROR("Failed to Ingress session on port %" PRIx64, port_id); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } + + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = egr_sample_oid; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to Update Egress session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + return true; } -void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint32_t &rate) +void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint32_t &rate, string &dir) { for (auto i : fvs) { @@ -175,6 +281,13 @@ void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint rate = 0; } } + else if (fvField(i) == "sample_direction") + { + if (fvValue(i) != "error") + { + dir = fvValue(i); + } + } } } @@ -187,10 +300,11 @@ void SflowOrch::sflowStatusSet(Consumer &consumer) auto tuple = it->second; string op = kfvOp(tuple); uint32_t rate = 0; + string dir = ""; if (op == SET_COMMAND) { - sflowExtractInfo(kfvFieldsValues(tuple), m_sflowStatus, rate); + sflowExtractInfo(kfvFieldsValues(tuple), m_sflowStatus, rate, dir); } else if (op == DEL_COMMAND) { @@ -221,7 +335,7 @@ bool SflowOrch::handleSflowSessionDel(sai_object_id_t port_id) uint32_t rate = sflowSessionGetRate(sflowInfo->second.m_sample_id); if (sflowInfo->second.admin_state) { - if (!sflowDelPort(port_id)) + if (!sflowDelPort(port_id, sflowInfo->second.m_sample_dir)) { return false; } @@ -270,6 +384,7 @@ void SflowOrch::doTask(Consumer &consumer) { bool admin_state = m_sflowStatus; uint32_t rate = 0; + string dir = "rx"; if (!m_sflowStatus) { @@ -282,7 +397,15 @@ void SflowOrch::doTask(Consumer &consumer) admin_state = sflowInfo->second.admin_state; } - sflowExtractInfo(kfvFieldsValues(tuple), admin_state, rate); + SWSS_LOG_DEBUG(" Existing Cfg portOid %" PRIx64 " admin %d rate %d dir %s", + port.m_port_id, (unsigned int)admin_state, rate, + sflowInfo->second.m_sample_dir.c_str()); + + sflowExtractInfo(kfvFieldsValues(tuple), admin_state, rate, dir); + + SWSS_LOG_DEBUG("New Cfg portOid %" PRIx64 " admin %d rate %d dir %s", + port.m_port_id, (unsigned int)admin_state, rate, dir.c_str()); + if (sflowInfo == m_sflowPortInfoMap.end()) { if (rate == 0) @@ -308,9 +431,11 @@ void SflowOrch::doTask(Consumer &consumer) m_sflowRateSampleMap[rate] = session; port_info.m_sample_id = session.m_sample_id; } + port_info.m_sample_dir = dir; + if (admin_state) { - if (!sflowAddPort(port_info.m_sample_id, port.m_port_id)) + if (!sflowAddPort(port_info.m_sample_id, port.m_port_id, port_info.m_sample_dir)) { it++; continue; @@ -335,11 +460,12 @@ void SflowOrch::doTask(Consumer &consumer) bool ret = false; if (admin_state) { - ret = sflowAddPort(sflowInfo->second.m_sample_id, port.m_port_id); + ret = sflowAddPort(sflowInfo->second.m_sample_id, port.m_port_id, + sflowInfo->second.m_sample_dir); } else { - ret = sflowDelPort(port.m_port_id); + ret = sflowDelPort(port.m_port_id, sflowInfo->second.m_sample_dir); } if (!ret) { @@ -348,6 +474,17 @@ void SflowOrch::doTask(Consumer &consumer) } sflowInfo->second.admin_state = admin_state; } + + if (dir != sflowInfo->second.m_sample_dir) + { + string old_dir = sflowInfo->second.m_sample_dir; + if (!sflowUpdateSampleDirection(port.m_port_id, old_dir, dir)) + { + it++; + continue; + } + sflowInfo->second.m_sample_dir = dir; + } } } else if (op == DEL_COMMAND) diff --git a/orchagent/sfloworch.h b/orchagent/sfloworch.h index 04a5c9d650..508b22c0aa 100644 --- a/orchagent/sfloworch.h +++ b/orchagent/sfloworch.h @@ -10,6 +10,7 @@ struct SflowPortInfo { bool admin_state; + string m_sample_dir; sai_object_id_t m_sample_id; }; @@ -38,11 +39,12 @@ class SflowOrch : public Orch virtual void doTask(Consumer& consumer); bool sflowCreateSession(uint32_t rate, SflowSession &session); bool sflowDestroySession(SflowSession &session); - bool sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id); - bool sflowDelPort(sai_object_id_t port_id); + bool sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id, string direction); + bool sflowDelPort(sai_object_id_t port_id, string direction); void sflowStatusSet(Consumer &consumer); bool sflowUpdateRate(sai_object_id_t port_id, uint32_t rate); + bool sflowUpdateSampleDirection(sai_object_id_t port_id, string old_dir, string new_dir); uint32_t sflowSessionGetRate(sai_object_id_t sample_id); bool handleSflowSessionDel(sai_object_id_t port_id); - void sflowExtractInfo(std::vector &fvs, bool &admin, uint32_t &rate); + void sflowExtractInfo(std::vector &fvs, bool &admin, uint32_t &rate, string &dir); }; diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index 3d81163b2a..d1177cddc2 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -10,6 +10,8 @@ using namespace std; using namespace swss; +#define ADJ_DELIMITER ',' + extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gUnderlayIfId; @@ -52,6 +54,14 @@ const map end_flavor_map = {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD} }; +const map sidlist_type_map = +{ + {"insert", SAI_SRV6_SIDLIST_TYPE_INSERT}, + {"insert.red", SAI_SRV6_SIDLIST_TYPE_INSERT_RED}, + {"encaps", SAI_SRV6_SIDLIST_TYPE_ENCAPS}, + {"encaps.red", SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED} +}; + void Srv6Orch::srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert) { if (insert) @@ -267,7 +277,7 @@ bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &next return true; } -bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list) +bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list, const string sidlist_type) { SWSS_LOG_ENTER(); bool exists = (sid_table_.find(sid_name) != sid_table_.end()); @@ -303,7 +313,16 @@ bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list) attributes.push_back(attr); attr.id = SAI_SRV6_SIDLIST_ATTR_TYPE; - attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + if (sidlist_type_map.find(sidlist_type) == sidlist_type_map.end()) + { + SWSS_LOG_INFO("Use default sidlist type: ENCAPS_RED"); + attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + } + else + { + SWSS_LOG_INFO("sidlist type: %s", sidlist_type.c_str()); + attr.value.s32 = sidlist_type_map.at(sidlist_type); + } attributes.push_back(attr); status = sai_srv6_api->create_srv6_sidlist(&segment_oid, gSwitchId, (uint32_t) attributes.size(), attributes.data()); if (status != SAI_STATUS_SUCCESS) @@ -365,7 +384,7 @@ void Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) SWSS_LOG_ENTER(); string sid_name = kfvKey(tuple); string op = kfvOp(tuple); - string sid_list; + string sid_list, sidlist_type; for (auto i : kfvFieldsValues(tuple)) { @@ -373,10 +392,14 @@ void Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) { sid_list = fvValue(i); } + if (fvField(i) == "type") + { + sidlist_type = fvValue(i); + } } if (op == SET_COMMAND) { - if (!createUpdateSidList(sid_name, sid_list)) + if (!createUpdateSidList(sid_name, sid_list, sidlist_type)) { SWSS_LOG_ERROR("Failed to process sid %s", sid_name.c_str()); } @@ -401,6 +424,175 @@ bool Srv6Orch::mySidExists(string my_sid_string) return false; } +/* + * Neighbor change notification to be processed for the SRv6 MySID entries + * + * In summary, this function handles both add and delete neighbor notifications + * + * When a neighbor ADD notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries that are waiting for this neighbor to be ready + * - For each SID, we install the SID into the ASIC + * - We remove the SID from the pending MySID entries list + * + * When a neighbor DELETE notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries installed in the ASIC + * - For each SID, we remove the SID from the ASIC + * - We add the SID to the pending MySID entries list + */ +void Srv6Orch::updateNeighbor(const NeighborUpdate& update) +{ + SWSS_LOG_ENTER(); + + /* Check if the received notification is a neighbor add or a neighbor delete */ + if (update.add) + { + /* + * It's a neighbor add notification, let's walk through the list of SRv6 MySID entries + * that are waiting for that neighbor to be ready, and install them into the ASIC. + */ + + SWSS_LOG_INFO("Neighbor ADD event: %s alias '%s', installing pending SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + auto it = m_pendingSRv6MySIDEntries.find(NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)); + if (it == m_pendingSRv6MySIDEntries.end()) + { + /* No SID is waiting for this neighbor. Nothing to do */ + return; + } + auto &nexthop_key = it->first; + auto &pending_my_sid_entries = it->second; + + for (auto iter = pending_my_sid_entries.begin(); iter != pending_my_sid_entries.end();) + { + string my_sid_string = get<0>(*iter); + const string dt_vrf = get<1>(*iter); + const string adj = get<2>(*iter); + const string end_action = get<3>(*iter); + + SWSS_LOG_INFO("Creating SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(), adj.c_str()); + + if(!createUpdateMysidEntry(my_sid_string, dt_vrf, adj, end_action)) + { + SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", my_sid_string.c_str()); + ++iter; + continue; + } + + SWSS_LOG_INFO("SID %s created successfully", my_sid_string.c_str()); + + iter = pending_my_sid_entries.erase(iter); + } + + if (pending_my_sid_entries.size() == 0) + { + m_pendingSRv6MySIDEntries.erase(nexthop_key); + } + } + else + { + /* + * It's a neighbor delete notification, let's uninstall the SRv6 MySID entries associated with that + * nexthop from the ASIC, and add them to the SRv6 MySID entries pending set. + */ + + SWSS_LOG_INFO("Neighbor DELETE event: %s alias '%s', removing associated SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + for (auto it = srv6_my_sid_table_.begin(); it != srv6_my_sid_table_.end();) + { + /* Skip SIDs that are not associated with a L3 Adjacency */ + if (it->second.endAdjString.empty()) + { + ++it; + continue; + } + + try + { + /* Skip SIDs that are not associated with this neighbor */ + if (IpAddress(it->second.endAdjString) != update.entry.ip_address) + { + ++it; + continue; + } + } + catch (const std::invalid_argument &e) + { + /* SRv6 SID is associated with an invalid L3 Adjacency IP address, skipping */ + ++it; + continue; + } + + /* + * Save SID entry information to temp variables, before removing the SID. + * This information will be consumed used later. + */ + string my_sid_string = it->first; + const string dt_vrf = it->second.endVrfString; + const string adj = it->second.endAdjString; + string end_action; + for (auto iter = end_behavior_map.begin(); iter != end_behavior_map.end(); iter++) + { + if (iter->second == it->second.endBehavior) + { + end_action = iter->first; + break; + } + } + + /* Skip SIDs with unknown SRv6 behavior */ + if (end_action.empty()) + { + ++it; + continue; + } + + SWSS_LOG_INFO("Removing SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), dt_vrf.c_str(), adj.c_str(), end_action.c_str()); + + /* Let's delete the SID from the ASIC */ + unordered_map::iterator tmp = it; + ++tmp; + if(!deleteMysidEntry(it->first)) + { + SWSS_LOG_ERROR("Failed to delete my_sid entry for sid %s", it->first.c_str()); + ++it; + continue; + } + it = tmp; + + SWSS_LOG_INFO("SID %s removed successfully", my_sid_string.c_str()); + + /* + * Finally, add the SID to the pending MySID entries set, so that we can re-install it + * when the neighbor comes back + */ + auto pending_mysid_entry = make_tuple(my_sid_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)].insert(pending_mysid_entry); + } + } +} + +void Srv6Orch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + assert(cntx); + + switch(type) { + case SUBJECT_TYPE_NEIGH_CHANGE: + { + NeighborUpdate *update = static_cast(cntx); + updateNeighbor(*update); + break; + } + default: + // Received update in which we are not interested + // Ignore it + return; + } +} + bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) { @@ -431,7 +623,23 @@ bool Srv6Orch::mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_b return false; } -bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string end_action) +bool Srv6Orch::mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +{ + if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA) + { + return true; + } + return false; +} + +bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string adj, const string end_action) { SWSS_LOG_ENTER(); vector attributes; @@ -469,9 +677,9 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, my_sid_entry = srv6_my_sid_table_[key_string].entry; } - SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s", + SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(),my_sid_entry.locator_block_len, my_sid_entry.locator_node_len, - my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str()); + my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str(), adj.c_str()); if (sidEntryEndpointBehavior(end_action, end_behavior, end_flavor) != true) { @@ -484,7 +692,11 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, { sai_object_id_t dt_vrf_id; SWSS_LOG_INFO("DT VRF name %s", dt_vrf.c_str()); - if (m_vrfOrch->isVRFexists(dt_vrf)) + if (dt_vrf == "default") + { + dt_vrf_id = gVirtualRouterId; + } + else if (m_vrfOrch->isVRFexists(dt_vrf)) { SWSS_LOG_INFO("VRF %s exists in DB", dt_vrf.c_str()); dt_vrf_id = m_vrfOrch->getVRFid(dt_vrf); @@ -504,6 +716,47 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, attributes.push_back(vrf_attr); vrf_update = true; } + sai_attribute_t nh_attr; + NextHopKey nexthop; + bool nh_update = false; + if (mySidNextHopRequired(end_behavior)) + { + sai_object_id_t next_hop_id; + + vector adjv = tokenize(adj, ADJ_DELIMITER); + if (adjv.size() > 1) + { + SWSS_LOG_ERROR("Failed to create my_sid entry %s adj %s: ECMP adjacency not yet supported", key_string.c_str(), adj.c_str()); + return false; + } + + nexthop = NextHopKey(adj); + SWSS_LOG_INFO("Adjacency %s", adj.c_str()); + if (m_neighOrch->hasNextHop(nexthop)) + { + SWSS_LOG_INFO("Nexthop for adjacency %s exists in DB", adj.c_str()); + next_hop_id = m_neighOrch->getNextHopId(nexthop); + if(next_hop_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get nexthop for adjacency %s", adj.c_str()); + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + } + else + { + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + nh_attr.id = SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID; + nh_attr.value.oid = next_hop_id; + attributes.push_back(nh_attr); + nh_update = true; + } attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR; attr.value.s32 = end_behavior; attributes.push_back(attr); @@ -534,6 +787,15 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, return false; } } + if (nh_update) + { + status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &nh_attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update nexthop to my_sid_entry %s, rv %d", key_string.c_str(), status); + return false; + } + } } SWSS_LOG_INFO("Store keystring %s in cache", key_string.c_str()); if(vrf_update) @@ -541,6 +803,15 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, m_vrfOrch->increaseVrfRefCount(dt_vrf); srv6_my_sid_table_[key_string].endVrfString = dt_vrf; } + if(nh_update) + { + m_neighOrch->increaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Increasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + + srv6_my_sid_table_[key_string].endAdjString = adj; + } srv6_my_sid_table_[key_string].endBehavior = end_behavior; srv6_my_sid_table_[key_string].entry = my_sid_entry; @@ -571,6 +842,15 @@ bool Srv6Orch::deleteMysidEntry(const string my_sid_string) { m_vrfOrch->decreaseVrfRefCount(srv6_my_sid_table_[my_sid_string].endVrfString); } + /* Decrease NextHop refcount */ + if (mySidNextHopRequired(srv6_my_sid_table_[my_sid_string].endBehavior)) + { + NextHopKey nexthop = NextHopKey(srv6_my_sid_table_[my_sid_string].endAdjString); + m_neighOrch->decreaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Decreasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + } srv6_my_sid_table_.erase(my_sid_string); return true; } @@ -579,7 +859,7 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) { SWSS_LOG_ENTER(); string op = kfvOp(tuple); - string end_action, dt_vrf; + string end_action, dt_vrf, adj; /* Key for mySid : block_len:node_len:function_len:args_len:sid-ip */ string keyString = kfvKey(tuple); @@ -594,10 +874,14 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) { dt_vrf = fvValue(i); } + if(fvField(i) == "adj") + { + adj = fvValue(i); + } } if (op == SET_COMMAND) { - if(!createUpdateMysidEntry(keyString, dt_vrf, end_action)) + if(!createUpdateMysidEntry(keyString, dt_vrf, adj, end_action)) { SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", keyString.c_str()); return; diff --git a/orchagent/srv6orch.h b/orchagent/srv6orch.h index 989737a998..a3e39b5632 100644 --- a/orchagent/srv6orch.h +++ b/orchagent/srv6orch.h @@ -43,6 +43,7 @@ struct MySidEntry sai_my_sid_entry_t entry; sai_my_sid_entry_endpoint_behavior_t endBehavior; string endVrfString; // Used for END.T, END.DT4, END.DT6 and END.DT46, + string endAdjString; // Used for END.X, END.DX4, END.DX6 }; typedef unordered_map SidTable; @@ -52,7 +53,7 @@ typedef unordered_map Srv6MySidTable; #define SID_LIST_DELIMITER ',' #define MY_SID_KEY_DELIMITER ':' -class Srv6Orch : public Orch +class Srv6Orch : public Orch, public Observer { public: Srv6Orch(DBConnector *applDb, vector &tableNames, SwitchOrch *switchOrch, VRFOrch *vrfOrch, NeighOrch *neighOrch): @@ -63,10 +64,11 @@ class Srv6Orch : public Orch m_sidTable(applDb, APP_SRV6_SID_LIST_TABLE_NAME), m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME) { + m_neighOrch->attach(this); } ~Srv6Orch() { - + m_neighOrch->detach(this); } bool srv6Nexthops(const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id); bool removeSrv6Nexthops(const NextHopGroupKey &nhg); @@ -76,20 +78,22 @@ class Srv6Orch : public Orch void doTask(Consumer &consumer); void doTaskSidTable(const KeyOpFieldsValuesTuple &tuple); void doTaskMySidTable(const KeyOpFieldsValuesTuple &tuple); - bool createUpdateSidList(const string seg_name, const string ips); + bool createUpdateSidList(const string seg_name, const string ips, const string sidlist_type); bool deleteSidList(const string seg_name); bool createSrv6Tunnel(const string srv6_source); bool createSrv6Nexthop(const NextHopKey &nh); bool srv6NexthopExists(const NextHopKey &nh); - bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string end_action); + bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string adj, const string end_action); bool deleteMysidEntry(const string my_sid_string); bool sidEntryEndpointBehavior(const string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor); bool mySidExists(const string mysid_string); bool mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); + bool mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); void srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert); size_t srv6TunnelNexthopSize(const string srv6_source); + void updateNeighbor(const NeighborUpdate& update); ProducerStateTable m_sidTable; ProducerStateTable m_mysidTable; @@ -100,6 +104,15 @@ class Srv6Orch : public Orch VRFOrch *m_vrfOrch; SwitchOrch *m_switchOrch; NeighOrch *m_neighOrch; + + /* + * Map to store the SRv6 MySID entries not yet configured in ASIC because associated to a non-ready nexthop + * + * Key: nexthop + * Value: a set of SID entries that are waiting for the nexthop to be ready + * each SID entry is encoded as a tuple + */ + map>> m_pendingSRv6MySIDEntries; }; #endif // SWSS_SRV6ORCH_H diff --git a/orchagent/switch/switch_capabilities.cpp b/orchagent/switch/switch_capabilities.cpp new file mode 100644 index 0000000000..d1f191bf39 --- /dev/null +++ b/orchagent/switch/switch_capabilities.cpp @@ -0,0 +1,613 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +#include +#include +#include +#include +} + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "switch_schema.h" +#include "switch_capabilities.h" + +using namespace swss; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_CAPABILITY_HASH_NATIVE_HASH_FIELD_LIST_FIELD "HASH|NATIVE_HASH_FIELD_LIST" + +#define SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD "ECMP_HASH_CAPABLE" +#define SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD "LAG_HASH_CAPABLE" + +#define SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_FIELD "ECMP_HASH_ALGORITHM" +#define SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_CAPABLE_FIELD "ECMP_HASH_ALGORITHM_CAPABLE" +#define SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_FIELD "LAG_HASH_ALGORITHM" +#define SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_CAPABLE_FIELD "LAG_HASH_ALGORITHM_CAPABLE" + +#define SWITCH_CAPABILITY_KEY "switch" + +#define SWITCH_STATE_DB_NAME "STATE_DB" +#define SWITCH_STATE_DB_TIMEOUT 0 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::unordered_map swHashHashFieldMap = +{ + { SAI_NATIVE_HASH_FIELD_IN_PORT, SWITCH_HASH_FIELD_IN_PORT }, + { SAI_NATIVE_HASH_FIELD_DST_MAC, SWITCH_HASH_FIELD_DST_MAC }, + { SAI_NATIVE_HASH_FIELD_SRC_MAC, SWITCH_HASH_FIELD_SRC_MAC }, + { SAI_NATIVE_HASH_FIELD_ETHERTYPE, SWITCH_HASH_FIELD_ETHERTYPE }, + { SAI_NATIVE_HASH_FIELD_VLAN_ID, SWITCH_HASH_FIELD_VLAN_ID }, + { SAI_NATIVE_HASH_FIELD_IP_PROTOCOL, SWITCH_HASH_FIELD_IP_PROTOCOL }, + { SAI_NATIVE_HASH_FIELD_DST_IP, SWITCH_HASH_FIELD_DST_IP }, + { SAI_NATIVE_HASH_FIELD_SRC_IP, SWITCH_HASH_FIELD_SRC_IP }, + { SAI_NATIVE_HASH_FIELD_L4_DST_PORT, SWITCH_HASH_FIELD_L4_DST_PORT }, + { SAI_NATIVE_HASH_FIELD_L4_SRC_PORT, SWITCH_HASH_FIELD_L4_SRC_PORT }, + { SAI_NATIVE_HASH_FIELD_INNER_DST_MAC, SWITCH_HASH_FIELD_INNER_DST_MAC }, + { SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC, SWITCH_HASH_FIELD_INNER_SRC_MAC }, + { SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE, SWITCH_HASH_FIELD_INNER_ETHERTYPE }, + { SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL, SWITCH_HASH_FIELD_INNER_IP_PROTOCOL }, + { SAI_NATIVE_HASH_FIELD_INNER_DST_IP, SWITCH_HASH_FIELD_INNER_DST_IP }, + { SAI_NATIVE_HASH_FIELD_INNER_SRC_IP, SWITCH_HASH_FIELD_INNER_SRC_IP }, + { SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT, SWITCH_HASH_FIELD_INNER_L4_DST_PORT }, + { SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT, SWITCH_HASH_FIELD_INNER_L4_SRC_PORT } +}; + +static const std::unordered_map swHashAlgorithmMap = +{ + { SAI_HASH_ALGORITHM_CRC, SWITCH_HASH_ALGORITHM_CRC }, + { SAI_HASH_ALGORITHM_XOR, SWITCH_HASH_ALGORITHM_XOR }, + { SAI_HASH_ALGORITHM_RANDOM, SWITCH_HASH_ALGORITHM_RANDOM }, + { SAI_HASH_ALGORITHM_CRC_32LO, SWITCH_HASH_ALGORITHM_CRC_32LO }, + { SAI_HASH_ALGORITHM_CRC_32HI, SWITCH_HASH_ALGORITHM_CRC_32HI }, + { SAI_HASH_ALGORITHM_CRC_CCITT, SWITCH_HASH_ALGORITHM_CRC_CCITT }, + { SAI_HASH_ALGORITHM_CRC_XOR, SWITCH_HASH_ALGORITHM_CRC_XOR } +}; + +// variables ---------------------------------------------------------------------------------------------------------- + +extern sai_object_id_t gSwitchId; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) +{ + const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); + + return meta != nullptr ? meta->attridname : "UNKNOWN"; +} + +static std::string toStr(sai_native_hash_field_t value) +{ + const auto *name = sai_metadata_get_native_hash_field_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(sai_hash_algorithm_t value) +{ + const auto *name = sai_metadata_get_hash_algorithm_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(const std::set &value) +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = swHashHashFieldMap.find(cit1); + if (cit2 != swHashHashFieldMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(const std::set &value) +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = swHashAlgorithmMap.find(cit1); + if (cit2 != swHashAlgorithmMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(bool value) +{ + return value ? "true" : "false"; +} + +template +static void insertBack(T1 &out, const T2 &in) +{ + out.insert(out.end(), in.cbegin(), in.cend()); +} + +// Switch capabilities ------------------------------------------------------------------------------------------------ + +DBConnector SwitchCapabilities::stateDb(SWITCH_STATE_DB_NAME, SWITCH_STATE_DB_TIMEOUT); +Table SwitchCapabilities::capTable(&stateDb, STATE_SWITCH_CAPABILITY_TABLE_NAME); + +SwitchCapabilities::SwitchCapabilities() +{ + queryHashCapabilities(); + querySwitchCapabilities(); + + writeHashCapabilitiesToDb(); + writeSwitchCapabilitiesToDb(); +} + +bool SwitchCapabilities::isSwitchEcmpHashSupported() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + const auto &ecmpHash = switchCapabilities.ecmpHash; + + return nativeHashFieldList.isAttrSupported && ecmpHash.isAttrSupported; +} + +bool SwitchCapabilities::isSwitchLagHashSupported() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + const auto &lagHash = switchCapabilities.lagHash; + + return nativeHashFieldList.isAttrSupported && lagHash.isAttrSupported; +} + +bool SwitchCapabilities::isSwitchEcmpHashAlgorithmSupported() const +{ + return switchCapabilities.ecmpHashAlgorithm.isAttrSupported; +} + +bool SwitchCapabilities::isSwitchLagHashAlgorithmSupported() const +{ + return switchCapabilities.lagHashAlgorithm.isAttrSupported; +} + +bool SwitchCapabilities::validateSwitchHashFieldCap(const std::set &hfSet) const +{ + SWSS_LOG_ENTER(); + + if (!hashCapabilities.nativeHashFieldList.isEnumSupported) + { + return true; + } + + if (hashCapabilities.nativeHashFieldList.hfSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate hash field: no hash field capabilities"); + return false; + } + + for (const auto &cit : hfSet) + { + if (hashCapabilities.nativeHashFieldList.hfSet.count(cit) == 0) + { + SWSS_LOG_ERROR("Failed to validate hash field: value(%s) is not supported", toStr(cit).c_str()); + return false; + } + } + + return true; +} + +bool SwitchCapabilities::validateSwitchEcmpHashAlgorithmCap(sai_hash_algorithm_t haValue) const +{ + return validateSwitchHashAlgorithmCap(switchCapabilities.ecmpHashAlgorithm, haValue); +} + +bool SwitchCapabilities::validateSwitchLagHashAlgorithmCap(sai_hash_algorithm_t haValue) const +{ + return validateSwitchHashAlgorithmCap(switchCapabilities.lagHashAlgorithm, haValue); +} + +template +bool SwitchCapabilities::validateSwitchHashAlgorithmCap(const T &obj, sai_hash_algorithm_t haValue) const +{ + SWSS_LOG_ENTER(); + + if (!obj.isEnumSupported) + { + return true; + } + + if (obj.haSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate hash algorithm: no hash algorithm capabilities"); + return false; + } + + if (obj.haSet.count(haValue) == 0) + { + SWSS_LOG_ERROR("Failed to validate hash algorithm: value(%s) is not supported", toStr(haValue).c_str()); + return false; + } + + return true; +} + +FieldValueTuple SwitchCapabilities::makeHashFieldCapDbEntry() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + + auto field = SWITCH_CAPABILITY_HASH_NATIVE_HASH_FIELD_LIST_FIELD; + auto value = nativeHashFieldList.isEnumSupported ? toStr(nativeHashFieldList.hfSet) : "N/A"; + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchCapabilities::makeEcmpHashCapDbEntry() const +{ + auto field = SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD; + auto value = toStr(isSwitchEcmpHashSupported()); + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchCapabilities::makeLagHashCapDbEntry() const +{ + auto field = SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD; + auto value = toStr(isSwitchLagHashSupported()); + + return FieldValueTuple(field, value); +} + +std::vector SwitchCapabilities::makeEcmpHashAlgorithmCapDbEntry() const +{ + const auto &ecmpHashAlgorithm = switchCapabilities.ecmpHashAlgorithm; + + std::vector fvList; + + fvList.emplace_back( + SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_FIELD, + ecmpHashAlgorithm.isEnumSupported ? toStr(ecmpHashAlgorithm.haSet) : "N/A" + ); + fvList.emplace_back( + SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_CAPABLE_FIELD, + toStr(isSwitchEcmpHashAlgorithmSupported()) + ); + + return fvList; +} + +std::vector SwitchCapabilities::makeLagHashAlgorithmCapDbEntry() const +{ + const auto &lagHashAlgorithm = switchCapabilities.lagHashAlgorithm; + + std::vector fvList; + + fvList.emplace_back( + SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_FIELD, + lagHashAlgorithm.isEnumSupported ? toStr(lagHashAlgorithm.haSet) : "N/A" + ); + fvList.emplace_back( + SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_CAPABLE_FIELD, + toStr(isSwitchLagHashAlgorithmSupported()) + ); + + return fvList; +} + +sai_status_t SwitchCapabilities::queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + sai_s32_list_t enumList = { .count = 0, .list = nullptr }; + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); + if ((status != SAI_STATUS_SUCCESS) && (status != SAI_STATUS_BUFFER_OVERFLOW)) + { + return status; + } + + capList.resize(enumList.count); + enumList.list = capList.data(); + + return sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); +} + +sai_status_t SwitchCapabilities::queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + return sai_query_attribute_capability(gSwitchId, objType, attrId, &attrCap); +} + +void SwitchCapabilities::queryHashNativeHashFieldListEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector hfList; + auto status = queryEnumCapabilitiesSai( + hfList, SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + auto &hfSet = hashCapabilities.nativeHashFieldList.hfSet; + std::transform( + hfList.cbegin(), hfList.cend(), std::inserter(hfSet, hfSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + hashCapabilities.nativeHashFieldList.isEnumSupported = true; +} + +void SwitchCapabilities::queryHashNativeHashFieldListAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + hashCapabilities.nativeHashFieldList.isAttrSupported = true; +} + +void SwitchCapabilities::queryHashCapabilities() +{ + queryHashNativeHashFieldListEnumCapabilities(); + queryHashNativeHashFieldListAttrCapabilities(); +} + +void SwitchCapabilities::querySwitchEcmpHashAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH).c_str() + ); + return; + } + + switchCapabilities.ecmpHash.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH).c_str() + ); + return; + } + + switchCapabilities.lagHash.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchEcmpHashAlgorithmEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector haList; + auto status = queryEnumCapabilitiesSai( + haList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + auto &haSet = switchCapabilities.ecmpHashAlgorithm.haSet; + std::transform( + haList.cbegin(), haList.cend(), std::inserter(haSet, haSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + switchCapabilities.ecmpHashAlgorithm.isEnumSupported = true; +} + +void SwitchCapabilities::querySwitchEcmpHashAlgorithmAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + switchCapabilities.ecmpHashAlgorithm.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashAlgorithmEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector haList; + auto status = queryEnumCapabilitiesSai( + haList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + auto &haSet = switchCapabilities.lagHashAlgorithm.haSet; + std::transform( + haList.cbegin(), haList.cend(), std::inserter(haSet, haSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + switchCapabilities.lagHashAlgorithm.isEnumSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashAlgorithmAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + switchCapabilities.lagHashAlgorithm.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchCapabilities() +{ + querySwitchEcmpHashAttrCapabilities(); + querySwitchLagHashAttrCapabilities(); + + querySwitchEcmpHashAlgorithmEnumCapabilities(); + querySwitchEcmpHashAlgorithmAttrCapabilities(); + querySwitchLagHashAlgorithmEnumCapabilities(); + querySwitchLagHashAlgorithmAttrCapabilities(); +} + +void SwitchCapabilities::writeHashCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + std::vector fvList = { + makeHashFieldCapDbEntry() + }; + + SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); + + SWSS_LOG_NOTICE( + "Wrote hash capabilities to State DB: %s key", + SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY).c_str() + ); +} + +void SwitchCapabilities::writeSwitchCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + std::vector fvList = { + makeEcmpHashCapDbEntry(), + makeLagHashCapDbEntry() + }; + insertBack(fvList, makeEcmpHashAlgorithmCapDbEntry()); + insertBack(fvList, makeLagHashAlgorithmCapDbEntry()); + + SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); + + SWSS_LOG_NOTICE( + "Wrote switch capabilities to State DB: %s key", + SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY).c_str() + ); +} diff --git a/orchagent/switch/switch_capabilities.h b/orchagent/switch/switch_capabilities.h new file mode 100644 index 0000000000..9bff1df497 --- /dev/null +++ b/orchagent/switch/switch_capabilities.h @@ -0,0 +1,99 @@ +#pragma once + +extern "C" { +#include +#include +#include +#include +} + +#include +#include + +#include +#include + +class SwitchCapabilities final +{ +public: + SwitchCapabilities(); + ~SwitchCapabilities() = default; + + bool isSwitchEcmpHashSupported() const; + bool isSwitchLagHashSupported() const; + + bool isSwitchEcmpHashAlgorithmSupported() const; + bool isSwitchLagHashAlgorithmSupported() const; + + bool validateSwitchHashFieldCap(const std::set &hfSet) const; + + bool validateSwitchEcmpHashAlgorithmCap(sai_hash_algorithm_t haValue) const; + bool validateSwitchLagHashAlgorithmCap(sai_hash_algorithm_t haValue) const; + +private: + template + bool validateSwitchHashAlgorithmCap(const T &obj, sai_hash_algorithm_t haValue) const; + + swss::FieldValueTuple makeHashFieldCapDbEntry() const; + + swss::FieldValueTuple makeEcmpHashCapDbEntry() const; + swss::FieldValueTuple makeLagHashCapDbEntry() const; + + std::vector makeEcmpHashAlgorithmCapDbEntry() const; + std::vector makeLagHashAlgorithmCapDbEntry() const; + + sai_status_t queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const; + sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; + + void queryHashNativeHashFieldListEnumCapabilities(); + void queryHashNativeHashFieldListAttrCapabilities(); + + void querySwitchEcmpHashAttrCapabilities(); + void querySwitchLagHashAttrCapabilities(); + + void querySwitchEcmpHashAlgorithmEnumCapabilities(); + void querySwitchEcmpHashAlgorithmAttrCapabilities(); + void querySwitchLagHashAlgorithmEnumCapabilities(); + void querySwitchLagHashAlgorithmAttrCapabilities(); + + void queryHashCapabilities(); + void querySwitchCapabilities(); + + void writeHashCapabilitiesToDb(); + void writeSwitchCapabilitiesToDb(); + + // Hash SAI capabilities + struct { + struct { + std::set hfSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } nativeHashFieldList; + } hashCapabilities; + + // Switch SAI capabilities + struct { + struct { + bool isAttrSupported = false; + } ecmpHash; + + struct { + bool isAttrSupported = false; + } lagHash; + + struct { + std::set haSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } ecmpHashAlgorithm; + + struct { + std::set haSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } lagHashAlgorithm; + } switchCapabilities; + + static swss::DBConnector stateDb; + static swss::Table capTable; +}; diff --git a/orchagent/switch/switch_container.h b/orchagent/switch/switch_container.h new file mode 100644 index 0000000000..a51b551427 --- /dev/null +++ b/orchagent/switch/switch_container.h @@ -0,0 +1,39 @@ +#pragma once + +extern "C" { +#include +#include +} + +#include +#include +#include + +class SwitchHash final +{ +public: + SwitchHash() = default; + ~SwitchHash() = default; + + struct { + std::set value; + bool is_set = false; + } ecmp_hash; + + struct { + std::set value; + bool is_set = false; + } lag_hash; + + struct { + sai_hash_algorithm_t value; + bool is_set = false; + } ecmp_hash_algorithm; + + struct { + sai_hash_algorithm_t value; + bool is_set = false; + } lag_hash_algorithm; + + std::unordered_map fieldValueMap; +}; diff --git a/orchagent/switch/switch_helper.cpp b/orchagent/switch/switch_helper.cpp new file mode 100644 index 0000000000..23a7c3fd5a --- /dev/null +++ b/orchagent/switch/switch_helper.cpp @@ -0,0 +1,209 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +#include +} + +#include +#include +#include + +#include +#include + +#include "switch_schema.h" +#include "switch_helper.h" + +using namespace swss; + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::unordered_map swHashHashFieldMap = +{ + { SWITCH_HASH_FIELD_IN_PORT, SAI_NATIVE_HASH_FIELD_IN_PORT }, + { SWITCH_HASH_FIELD_DST_MAC, SAI_NATIVE_HASH_FIELD_DST_MAC }, + { SWITCH_HASH_FIELD_SRC_MAC, SAI_NATIVE_HASH_FIELD_SRC_MAC }, + { SWITCH_HASH_FIELD_ETHERTYPE, SAI_NATIVE_HASH_FIELD_ETHERTYPE }, + { SWITCH_HASH_FIELD_VLAN_ID, SAI_NATIVE_HASH_FIELD_VLAN_ID }, + { SWITCH_HASH_FIELD_IP_PROTOCOL, SAI_NATIVE_HASH_FIELD_IP_PROTOCOL }, + { SWITCH_HASH_FIELD_DST_IP, SAI_NATIVE_HASH_FIELD_DST_IP }, + { SWITCH_HASH_FIELD_SRC_IP, SAI_NATIVE_HASH_FIELD_SRC_IP }, + { SWITCH_HASH_FIELD_L4_DST_PORT, SAI_NATIVE_HASH_FIELD_L4_DST_PORT }, + { SWITCH_HASH_FIELD_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_L4_SRC_PORT }, + { SWITCH_HASH_FIELD_INNER_DST_MAC, SAI_NATIVE_HASH_FIELD_INNER_DST_MAC }, + { SWITCH_HASH_FIELD_INNER_SRC_MAC, SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC }, + { SWITCH_HASH_FIELD_INNER_ETHERTYPE, SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE }, + { SWITCH_HASH_FIELD_INNER_IP_PROTOCOL, SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL }, + { SWITCH_HASH_FIELD_INNER_DST_IP, SAI_NATIVE_HASH_FIELD_INNER_DST_IP }, + { SWITCH_HASH_FIELD_INNER_SRC_IP, SAI_NATIVE_HASH_FIELD_INNER_SRC_IP }, + { SWITCH_HASH_FIELD_INNER_L4_DST_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT }, + { SWITCH_HASH_FIELD_INNER_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT } +}; + +static const std::unordered_map swHashAlgorithmMap = +{ + { SWITCH_HASH_ALGORITHM_CRC, SAI_HASH_ALGORITHM_CRC }, + { SWITCH_HASH_ALGORITHM_XOR, SAI_HASH_ALGORITHM_XOR }, + { SWITCH_HASH_ALGORITHM_RANDOM, SAI_HASH_ALGORITHM_RANDOM }, + { SWITCH_HASH_ALGORITHM_CRC_32LO, SAI_HASH_ALGORITHM_CRC_32LO }, + { SWITCH_HASH_ALGORITHM_CRC_32HI, SAI_HASH_ALGORITHM_CRC_32HI }, + { SWITCH_HASH_ALGORITHM_CRC_CCITT, SAI_HASH_ALGORITHM_CRC_CCITT }, + { SWITCH_HASH_ALGORITHM_CRC_XOR, SAI_HASH_ALGORITHM_CRC_XOR } +}; + +// switch helper ------------------------------------------------------------------------------------------------------ + +const SwitchHash& SwitchHelper::getSwHash() const +{ + return swHash; +} + +void SwitchHelper::setSwHash(const SwitchHash &hash) +{ + swHash = hash; +} + +template +bool SwitchHelper::parseSwHashFieldList(T &obj, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + const auto &hfList = tokenize(value, ','); + + if (hfList.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty list is prohibited", field.c_str()); + return false; + } + + const auto &hfSet = std::unordered_set(hfList.cbegin(), hfList.cend()); + + if (hfSet.size() != hfList.size()) + { + SWSS_LOG_ERROR("Duplicate hash fields in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + return false; + } + + for (const auto &cit1 : hfSet) + { + const auto &cit2 = swHashHashFieldMap.find(cit1); + if (cit2 == swHashHashFieldMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + obj.value.insert(cit2->second); + } + + obj.is_set = true; + + return true; +} + +template +bool SwitchHelper::parseSwHashAlgorithm(T &obj, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = swHashAlgorithmMap.find(value); + if (cit == swHashAlgorithmMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + obj.value = cit->second; + obj.is_set = true; + + return true; +} + +bool SwitchHelper::parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashFieldList(hash.ecmp_hash, field, value); +} + +bool SwitchHelper::parseSwHashLagHash(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashFieldList(hash.lag_hash, field, value); +} + +bool SwitchHelper::parseSwHashEcmpHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashAlgorithm(hash.ecmp_hash_algorithm, field, value); +} + +bool SwitchHelper::parseSwHashLagHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashAlgorithm(hash.lag_hash_algorithm, field, value); +} + +bool SwitchHelper::parseSwHash(SwitchHash &hash) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : hash.fieldValueMap) + { + const auto &field = cit.first; + const auto &value = cit.second; + + if (field == SWITCH_HASH_ECMP_HASH) + { + if (!parseSwHashEcmpHash(hash, field, value)) + { + return false; + } + } + else if (field == SWITCH_HASH_LAG_HASH) + { + if (!parseSwHashLagHash(hash, field, value)) + { + return false; + } + } + else if (field == SWITCH_HASH_ECMP_HASH_ALGORITHM) + { + if (!parseSwHashEcmpHashAlgorithm(hash, field, value)) + { + return false; + } + } + else if (field == SWITCH_HASH_LAG_HASH_ALGORITHM) + { + if (!parseSwHashLagHashAlgorithm(hash, field, value)) + { + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); + } + } + + return validateSwHash(hash); +} + +bool SwitchHelper::validateSwHash(SwitchHash &hash) const +{ + SWSS_LOG_ENTER(); + + auto cond = hash.ecmp_hash.is_set || hash.lag_hash.is_set; + cond = cond || hash.ecmp_hash_algorithm.is_set || hash.lag_hash_algorithm.is_set; + + if (!cond) + { + SWSS_LOG_ERROR("Validation error: missing valid fields"); + return false; + } + + return true; +} diff --git a/orchagent/switch/switch_helper.h b/orchagent/switch/switch_helper.h new file mode 100644 index 0000000000..d7be11981f --- /dev/null +++ b/orchagent/switch/switch_helper.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "switch_container.h" + +class SwitchHelper final +{ +public: + SwitchHelper() = default; + ~SwitchHelper() = default; + + const SwitchHash& getSwHash() const; + void setSwHash(const SwitchHash &hash); + + bool parseSwHash(SwitchHash &hash) const; + +private: + template + bool parseSwHashFieldList(T &obj, const std::string &field, const std::string &value) const; + template + bool parseSwHashAlgorithm(T &obj, const std::string &field, const std::string &value) const; + + bool parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashLagHash(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashEcmpHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashLagHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const; + + bool validateSwHash(SwitchHash &hash) const; + +private: + SwitchHash swHash; +}; diff --git a/orchagent/switch/switch_schema.h b/orchagent/switch/switch_schema.h new file mode 100644 index 0000000000..16a17f179c --- /dev/null +++ b/orchagent/switch/switch_schema.h @@ -0,0 +1,36 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_HASH_FIELD_IN_PORT "IN_PORT" +#define SWITCH_HASH_FIELD_DST_MAC "DST_MAC" +#define SWITCH_HASH_FIELD_SRC_MAC "SRC_MAC" +#define SWITCH_HASH_FIELD_ETHERTYPE "ETHERTYPE" +#define SWITCH_HASH_FIELD_VLAN_ID "VLAN_ID" +#define SWITCH_HASH_FIELD_IP_PROTOCOL "IP_PROTOCOL" +#define SWITCH_HASH_FIELD_DST_IP "DST_IP" +#define SWITCH_HASH_FIELD_SRC_IP "SRC_IP" +#define SWITCH_HASH_FIELD_L4_DST_PORT "L4_DST_PORT" +#define SWITCH_HASH_FIELD_L4_SRC_PORT "L4_SRC_PORT" +#define SWITCH_HASH_FIELD_INNER_DST_MAC "INNER_DST_MAC" +#define SWITCH_HASH_FIELD_INNER_SRC_MAC "INNER_SRC_MAC" +#define SWITCH_HASH_FIELD_INNER_ETHERTYPE "INNER_ETHERTYPE" +#define SWITCH_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" +#define SWITCH_HASH_FIELD_INNER_DST_IP "INNER_DST_IP" +#define SWITCH_HASH_FIELD_INNER_SRC_IP "INNER_SRC_IP" +#define SWITCH_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define SWITCH_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" + +#define SWITCH_HASH_ECMP_HASH "ecmp_hash" +#define SWITCH_HASH_LAG_HASH "lag_hash" + +#define SWITCH_HASH_ALGORITHM_CRC "CRC" +#define SWITCH_HASH_ALGORITHM_XOR "XOR" +#define SWITCH_HASH_ALGORITHM_RANDOM "RANDOM" +#define SWITCH_HASH_ALGORITHM_CRC_32LO "CRC_32LO" +#define SWITCH_HASH_ALGORITHM_CRC_32HI "CRC_32HI" +#define SWITCH_HASH_ALGORITHM_CRC_CCITT "CRC_CCITT" +#define SWITCH_HASH_ALGORITHM_CRC_XOR "CRC_XOR" + +#define SWITCH_HASH_ECMP_HASH_ALGORITHM "ecmp_hash_algorithm" +#define SWITCH_HASH_LAG_HASH_ALGORITHM "lag_hash_algorithm" diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 951358774f..06dc36e472 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -9,6 +9,7 @@ #include "notificationproducer.h" #include "macaddress.h" #include "return_code.h" +#include "saihelper.h" using namespace std; using namespace swss; @@ -16,6 +17,7 @@ using namespace swss; extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_acl_api_t *sai_acl_api; +extern sai_hash_api_t *sai_hash_api; extern MacAddress gVxlanMacAddress; extern CrmOrch *gCrmOrch; @@ -84,6 +86,9 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl set_switch_pfc_dlr_init_capability(); initSensorsTable(); querySwitchTpidCapability(); + querySwitchPortEgressSampleCapability(); + querySwitchHashDefaults(); + auto executorT = new ExecutableTimer(m_sensorsPollerTimer, this, "ASIC_SENSORS_POLL_TIMER"); Orch::addExecutor(executorT); } @@ -471,24 +476,278 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) } } +bool SwitchOrch::setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const +{ + const auto &oid = isEcmpHash ? m_switchHashDefaults.ecmpHash.oid : m_switchHashDefaults.lagHash.oid; + const auto &hfSet = isEcmpHash ? hash.ecmp_hash.value : hash.lag_hash.value; + + std::vector hfList; + std::transform( + hfSet.cbegin(), hfSet.cend(), std::back_inserter(hfList), + [](sai_native_hash_field_t value) { return static_cast(value); } + ); + + sai_attribute_t attr; + + attr.id = SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST; + attr.value.s32list.list = hfList.data(); + attr.value.s32list.count = static_cast(hfList.size()); + + auto status = sai_hash_api->set_hash_attribute(oid, &attr); + return status == SAI_STATUS_SUCCESS; +} + +bool SwitchOrch::setSwitchHashAlgorithmSai(const SwitchHash &hash, bool isEcmpHash) const +{ + sai_attribute_t attr; + + attr.id = isEcmpHash ? SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM : SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM; + attr.value.s32 = static_cast(isEcmpHash ? hash.ecmp_hash_algorithm.value : hash.lag_hash_algorithm.value); + + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; +} + +bool SwitchOrch::setSwitchHash(const SwitchHash &hash) +{ + SWSS_LOG_ENTER(); + + auto hObj = swHlpr.getSwHash(); + auto cfgUpd = false; + + if (hash.ecmp_hash.is_set) + { + if (hObj.ecmp_hash.value != hash.ecmp_hash.value) + { + if (swCap.isSwitchEcmpHashSupported()) + { + if (!swCap.validateSwitchHashFieldCap(hash.ecmp_hash.value)) + { + SWSS_LOG_ERROR("Failed to validate switch ECMP hash: capability is not supported"); + return false; + } + + if (!setSwitchHashFieldListSai(hash, true)) + { + SWSS_LOG_ERROR("Failed to set switch ECMP hash in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch ECMP hash configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.ecmp_hash.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch ECMP hash configuration: operation is not supported"); + return false; + } + } + + if (hash.lag_hash.is_set) + { + if (hObj.lag_hash.value != hash.lag_hash.value) + { + if (swCap.isSwitchLagHashSupported()) + { + if (!swCap.validateSwitchHashFieldCap(hash.lag_hash.value)) + { + SWSS_LOG_ERROR("Failed to validate switch LAG hash: capability is not supported"); + return false; + } + + if (!setSwitchHashFieldListSai(hash, false)) + { + SWSS_LOG_ERROR("Failed to set switch LAG hash in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch LAG hash configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.lag_hash.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch LAG hash configuration: operation is not supported"); + return false; + } + } + + if (hash.ecmp_hash_algorithm.is_set) + { + if (!hObj.ecmp_hash_algorithm.is_set || (hObj.ecmp_hash_algorithm.value != hash.ecmp_hash_algorithm.value)) + { + if (swCap.isSwitchEcmpHashAlgorithmSupported()) + { + if (!swCap.validateSwitchEcmpHashAlgorithmCap(hash.ecmp_hash_algorithm.value)) + { + SWSS_LOG_ERROR("Failed to validate switch ECMP hash algorithm: capability is not supported"); + return false; + } + + if (!setSwitchHashAlgorithmSai(hash, true)) + { + SWSS_LOG_ERROR("Failed to set switch ECMP hash algorithm in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch ECMP hash algorithm configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.ecmp_hash_algorithm.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch ECMP hash algorithm configuration: operation is not supported"); + return false; + } + } + + if (hash.lag_hash_algorithm.is_set) + { + if (!hObj.lag_hash_algorithm.is_set || (hObj.lag_hash_algorithm.value != hash.lag_hash_algorithm.value)) + { + if (swCap.isSwitchLagHashAlgorithmSupported()) + { + if (!swCap.validateSwitchLagHashAlgorithmCap(hash.lag_hash_algorithm.value)) + { + SWSS_LOG_ERROR("Failed to validate switch LAG hash algorithm: capability is not supported"); + return false; + } + + if (!setSwitchHashAlgorithmSai(hash, false)) + { + SWSS_LOG_ERROR("Failed to set switch LAG hash algorithm in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch LAG hash algorithm configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.lag_hash_algorithm.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch LAG hash algorithm configuration: operation is not supported"); + return false; + } + } + + // Don't update internal cache when config remains unchanged + if (!cfgUpd) + { + SWSS_LOG_NOTICE("Switch hash in SAI is up-to-date"); + return true; + } + + swHlpr.setSwHash(hash); + + SWSS_LOG_NOTICE("Set switch hash in SAI"); + + return true; +} + +void SwitchOrch::doCfgSwitchHashTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto &map = consumer.m_toSync; + auto it = map.begin(); + + while (it != map.end()) + { + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); + + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse switch hash key: empty string"); + it = map.erase(it); + continue; + } + + SwitchHash hash; + + if (op == SET_COMMAND) + { + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + { + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); + + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + + hash.fieldValueMap[fieldName] = fieldValue; + } + + if (swHlpr.parseSwHash(hash)) + { + if (!setSwitchHash(hash)) + { + SWSS_LOG_ERROR("Failed to set switch hash: ASIC and CONFIG DB are diverged"); + } + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("Failed to remove switch hash: operation is not supported: ASIC and CONFIG DB are diverged"); + } + else + { + SWSS_LOG_ERROR("Unknown operation(%s)", op.c_str()); + } + + it = map.erase(it); + } +} + void SwitchOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - const string & table_name = consumer.getTableName(); - if (table_name == APP_SWITCH_TABLE_NAME) + const auto &tableName = consumer.getTableName(); + + if (tableName == APP_SWITCH_TABLE_NAME) { doAppSwitchTableTask(consumer); } - else if (table_name == CFG_ASIC_SENSORS_TABLE_NAME) + else if (tableName == CFG_ASIC_SENSORS_TABLE_NAME) { doCfgSensorsTableTask(consumer); } + else if (tableName == CFG_SWITCH_HASH_TABLE_NAME) + { + doCfgSwitchHashTableTask(consumer); + } else { - SWSS_LOG_ERROR("Unknown table : %s", table_name.c_str()); + SWSS_LOG_ERROR("Unknown table : %s", tableName.c_str()); } - } void SwitchOrch::doTask(NotificationConsumer& consumer) @@ -730,6 +989,35 @@ void SwitchOrch::set_switch_capability(const std::vector& value m_switchTable.set("switch", values); } +void SwitchOrch::querySwitchPortEgressSampleCapability() +{ + vector fvVector; + sai_status_t status = SAI_STATUS_SUCCESS; + sai_attr_capability_t capability; + + // Check if SAI is capable of handling Port egress sample. + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query port egress Sample capability %d", status); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "false"); + } + else + { + if (capability.set_implemented) + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "true"); + } + else + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "false"); + } + SWSS_LOG_NOTICE("port egress Sample capability %d", capability.set_implemented); + } + set_switch_capability(fvVector); +} + void SwitchOrch::querySwitchTpidCapability() { SWSS_LOG_ENTER(); @@ -783,6 +1071,38 @@ void SwitchOrch::querySwitchTpidCapability() } } +bool SwitchOrch::getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const +{ + sai_attribute_t attr; + attr.id = isEcmpHash ? SAI_SWITCH_ATTR_ECMP_HASH : SAI_SWITCH_ATTR_LAG_HASH; + attr.value.oid = SAI_NULL_OBJECT_ID; + + auto status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return false; + } + + oid = attr.value.oid; + + return true; +} + +void SwitchOrch::querySwitchHashDefaults() +{ + SWSS_LOG_ENTER(); + + if (!getSwitchHashOidSai(m_switchHashDefaults.ecmpHash.oid, true)) + { + SWSS_LOG_WARN("Failed to get switch ECMP hash OID"); + } + + if (!getSwitchHashOidSai(m_switchHashDefaults.lagHash.oid, false)) + { + SWSS_LOG_WARN("Failed to get switch LAG hash OID"); + } +} + bool SwitchOrch::querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) { SWSS_LOG_ENTER(); diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 87e6b1a309..7135bcdc39 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -3,6 +3,8 @@ #include "acltable.h" #include "orch.h" #include "timer.h" +#include "switch/switch_capabilities.h" +#include "switch/switch_helper.h" #define DEFAULT_ASIC_SENSORS_POLLER_INTERVAL 60 #define ASIC_SENSORS_POLLER_STATUS "ASIC_SENSORS_POLLER_STATUS" @@ -12,6 +14,7 @@ #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" #define SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE "PFC_DLR_INIT_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE "PORT_EGRESS_SAMPLE_CAPABLE" struct WarmRestartCheck { @@ -46,10 +49,21 @@ class SwitchOrch : public Orch private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); + void doCfgSwitchHashTableTask(Consumer &consumer); void doCfgSensorsTableTask(Consumer &consumer); void doAppSwitchTableTask(Consumer &consumer); void initSensorsTable(); void querySwitchTpidCapability(); + void querySwitchPortEgressSampleCapability(); + + // Switch hash + bool setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const; + bool setSwitchHashAlgorithmSai(const SwitchHash &hash, bool isEcmpHash) const; + bool setSwitchHash(const SwitchHash &hash); + + bool getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const; + void querySwitchHashDefaults(); + sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); @@ -85,7 +99,23 @@ class SwitchOrch : public Orch bool m_orderedEcmpEnable = false; bool m_PfcDlrInitEnable = false; + // Switch hash SAI defaults + struct { + struct { + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + } ecmpHash; + struct { + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + } lagHash; + } m_switchHashDefaults; + // Information contained in the request from // external program for orchagent pre-shutdown state check WarmRestartCheck m_warmRestartCheck = {false, false, false}; + + // Switch OA capabilities + SwitchCapabilities swCap; + + // Switch OA helper + SwitchHelper swHlpr; }; diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index e84ba315c4..065e78a0c0 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -144,7 +144,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_DECAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "encap_ecn_mode") @@ -158,7 +160,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting encap_ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_ENCAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "ttl_mode") @@ -582,30 +586,6 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ sai_attribute_t attr; - if (field == "ecn_mode") - { - // decap ecn mode (copy from outer/standard) - attr.id = SAI_TUNNEL_ATTR_DECAP_ECN_MODE; - if (value == "copy_from_outer") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER; - } - else if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_STANDARD; - } - } - - if (field == "encap_ecn_mode") - { - // encap ecn mode (only standard is supported) - attr.id = SAI_TUNNEL_ATTR_ENCAP_ECN_MODE; - if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_ENCAP_ECN_MODE_STANDARD; - } - } - if (field == "ttl_mode") { // ttl mode (uniform/pipe) diff --git a/orchagent/twamporch.cpp b/orchagent/twamporch.cpp new file mode 100644 index 0000000000..58f7b5921f --- /dev/null +++ b/orchagent/twamporch.cpp @@ -0,0 +1,1052 @@ +#include "twamporch.h" +#include "vrforch.h" +#include "crmorch.h" +#include "logger.h" +#include "swssnet.h" +#include "converter.h" +#include "sai_serialize.h" +#include "tokenize.h" +#include "notifier.h" +#include "notifications.h" + +#include + +using namespace std; +using namespace swss; + +/* TWAMP infor */ +#define TWAMP_SESSION_MODE "MODE" +#define TWAMP_SESSION_ROLE "ROLE" +#define TWAMP_SESSION_VRF_NAME "VRF_NAME" +#define TWAMP_SESSION_HW_LOOKUP "HW_LOOKUP" + +/* TWAMP-test packet */ +#define TWAMP_SESSION_SRC_IP "SRC_IP" +#define TWAMP_SESSION_SRC_UDP_PORT "SRC_UDP_PORT" +#define TWAMP_SESSION_DST_IP "DST_IP" +#define TWAMP_SESSION_DST_UDP_PORT "DST_UDP_PORT" +#define TWAMP_SESSION_DSCP "DSCP" +#define TWAMP_SESSION_TTL "TTL" +#define TWAMP_SESSION_PACKET_TIMESTAMP_FORMAT "TIMESTAMP_FORMAT" +#define TWAMP_SESSION_PACKET_PADDING_SIZE "PADDING_SIZE" + +/* Session-Sender */ +#define TWAMP_SESSION_TX_PACKET_COUNT "PACKET_COUNT" +#define TWAMP_SESSION_TX_MONITOR_TIME "MONITOR_TIME" +#define TWAMP_SESSION_TX_INTERVAL "TX_INTERVAL" +#define TWAMP_SESSION_TIMEOUT "TIMEOUT" +#define TWAMP_SESSION_STATISTICS_INTERVAL "STATISTICS_INTERVAL" +#define TWAMP_SESSION_ADMIN_STATE "ADMIN_STATE" + +/* TWAMP session status */ +#define TWAMP_SESSION_STATUS "status" +#define TWAMP_SESSION_STATUS_ACTIVE "active" +#define TWAMP_SESSION_STATUS_INACTIVE "inactive" + +#define TWAMP_SESSION_TX_MODE_PACKET_NUM "packet_num" +#define TWAMP_SESSION_TX_MODE_CONTINUOUS "continuous" + +#define TWAMP_SESSION_DSCP_MIN 0 +#define TWAMP_SESSION_DSCP_MAX 63 + +#define TWAMP_SESSION_TIMEOUT_MIN 1 +#define TWAMP_SESSION_TIMEOUT_MAX 10 + +static map twamp_role_map = +{ + { "SENDER", SAI_TWAMP_SESSION_ROLE_SENDER }, + { "REFLECTOR", SAI_TWAMP_SESSION_ROLE_REFLECTOR } +}; + +static map twamp_mode_map = +{ + { "FULL", SAI_TWAMP_MODE_FULL }, + { "LIGHT", SAI_TWAMP_MODE_LIGHT } +}; + +static map timestamp_format_map = +{ + { "NTP", SAI_TWAMP_TIMESTAMP_FORMAT_NTP }, + { "PTP", SAI_TWAMP_TIMESTAMP_FORMAT_PTP } +}; + +static map session_admin_state_map = +{ + { "ENABLED", true }, + { "DISABLED", false } +}; + +static map hw_lookup_map = +{ + { "TRUE", true }, + { "FALSE", false } +}; + +/* Global variables */ +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gVirtualRouterId; +extern sai_switch_api_t *sai_switch_api; +extern sai_twamp_api_t *sai_twamp_api; +extern CrmOrch *gCrmOrch; + +const vector twamp_session_stat_ids = +{ + SAI_TWAMP_SESSION_STAT_RX_PACKETS, + SAI_TWAMP_SESSION_STAT_RX_BYTE, + SAI_TWAMP_SESSION_STAT_TX_PACKETS, + SAI_TWAMP_SESSION_STAT_TX_BYTE, + SAI_TWAMP_SESSION_STAT_DROP_PACKETS, + SAI_TWAMP_SESSION_STAT_MAX_LATENCY, + SAI_TWAMP_SESSION_STAT_MIN_LATENCY, + SAI_TWAMP_SESSION_STAT_AVG_LATENCY, + SAI_TWAMP_SESSION_STAT_MAX_JITTER, + SAI_TWAMP_SESSION_STAT_MIN_JITTER, + SAI_TWAMP_SESSION_STAT_AVG_JITTER +}; + + + +TwampOrch::TwampOrch(TableConnector confDbConnector, TableConnector stateDbConnector, SwitchOrch *switchOrch, PortsOrch *portOrch, VRFOrch *vrfOrch) : + Orch(confDbConnector.first, confDbConnector.second), + m_stateDbTwampTable(stateDbConnector.first, stateDbConnector.second), + m_switchOrch(switchOrch), + m_portsOrch(portOrch), + m_vrfOrch(vrfOrch) +{ + /* Set entries count to 0 */ + m_maxTwampSessionCount = m_twampSessionCount = 0; + + /* Get the Maximum supported TWAMP sessions */ + SWSS_LOG_INFO("Get the Maximum supported TWAMP sessions"); + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_MAX_TWAMP_SESSION; + sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Twamp session resource availability is not supported. Skipping ..."); + return; + } + else + { + m_maxTwampSessionCount = attr.value.u32; + } + + /* Set MAX entries to state DB */ + if (m_maxTwampSessionCount) + { + vector fvTuple; + fvTuple.emplace_back("MAX_TWAMP_SESSION_COUNT", to_string(m_maxTwampSessionCount)); + m_switchOrch->set_switch_capability(fvTuple); + } + else + { + SWSS_LOG_NOTICE("Twamp session resource availability is not supported. Skipping ..."); + return; + } + + /* Add TWAMP session event notification support */ + DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); + m_twampNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + auto twampNotifier = new Notifier(m_twampNotificationConsumer, this, "TWAMP_NOTIFICATIONS"); + Orch::addExecutor(twampNotifier); + register_event_notif = false; + + /* Initialize DB connectors */ + m_asicDb = shared_ptr(new DBConnector("ASIC_DB", 0)); + m_countersDb = shared_ptr(new DBConnector("COUNTERS_DB", 0)); + + /* Initialize VIDTORID table */ + m_vidToRidTable = unique_ptr
(new Table(m_asicDb.get(), "VIDTORID")); + + /* Initialize counter tables */ + m_counterTwampSessionNameMapTable = unique_ptr
(new Table(m_countersDb.get(), COUNTERS_TWAMP_SESSION_NAME_MAP)); + m_countersTable = unique_ptr
(new Table(m_countersDb.get(), COUNTERS_TABLE)); +} + +bool TwampOrch::isSessionExists(const string& name) +{ + SWSS_LOG_ENTER(); + + return m_twampEntries.find(name) != m_twampEntries.end(); +} + +bool TwampOrch::getSessionName(const sai_object_id_t oid, string& name) +{ + SWSS_LOG_ENTER(); + + for (const auto& it: m_twampEntries) + { + if (it.second.session_id == oid) + { + name = it.first; + return true; + } + } + + return false; +} + +bool TwampOrch::validateUdpPort(uint16_t udp_port) +{ + if (udp_port == 862) + { + return true; + } + if (udp_port == 863) + { + return true; + } + if (udp_port >= 1025) + { + return true; + } + return false; +} + +void TwampOrch::increaseTwampSessionCount(void) +{ + m_twampSessionCount++; +} + +void TwampOrch::decreaseTwampSessionCount(void) +{ + m_twampSessionCount--; +} + +bool TwampOrch::checkTwampSessionCount(void) +{ + return m_twampSessionCount < m_maxTwampSessionCount; +} + +void TwampOrch::setSessionStatus(const string& name, const string& status) +{ + SWSS_LOG_ENTER(); + + vector fvVector; + fvVector.emplace_back(TWAMP_SESSION_STATUS, status); + m_stateDbTwampTable.set(name, fvVector); +} + +bool TwampOrch::getSessionStatus(const string &name, string& status) +{ + SWSS_LOG_ENTER(); + + if (m_stateDbTwampTable.hget(name, TWAMP_SESSION_STATUS, status)) + { + return true; + } + return false; +} + +void TwampOrch::removeSessionStatus(const string& name) +{ + SWSS_LOG_ENTER(); + + m_stateDbTwampTable.del(name); +} + +void TwampOrch::removeSessionCounter(const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + string key_pattern = "COUNTERS:" + sai_serialize_object_id(session_id) + "*"; + auto keys = m_countersDb->keys(key_pattern); + for (auto& k : keys) + { + m_countersDb->del(k); + } +} + +void TwampOrch::initSessionStats(const string& name) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to init non-existent twamp session %s stat info", name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + + total_stats.rx_packets = 0; + total_stats.rx_bytes = 0; + total_stats.tx_packets = 0; + total_stats.tx_bytes = 0; + total_stats.drop_packets = 0; + total_stats.max_latency = 0; + total_stats.min_latency = 0; + total_stats.avg_latency = 0; + total_stats.max_jitter = 0; + total_stats.min_jitter = 0; + total_stats.avg_jitter = 0; + total_stats.avg_latency_total = 0; + total_stats.avg_jitter_total = 0; +} + +bool TwampOrch::registerTwampEventNotification(void) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY, + &capability); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Unable to query the TWAMP event notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_NOTICE("TWAMP register event notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY; + attr.value.ptr = (void *)on_twamp_session_event; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register TWAMP notification handler"); + return false; + } + + return true; +} + +bool TwampOrch::activateSession(const string& name, TwampEntry& entry) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_MODE; + attr.value.s32 = entry.mode; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ROLE; + attr.value.s32 = entry.role; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_HW_LOOKUP_VALID; + attr.value.booldata = entry.hw_lookup; + attrs.emplace_back(attr); + + if (entry.vrf_id) + { + attr.id = SAI_TWAMP_SESSION_ATTR_VIRTUAL_ROUTER; + attr.value.oid = entry.vrf_id; + attrs.emplace_back(attr); + } + + attr.id = SAI_TWAMP_SESSION_ATTR_SRC_IP; + copy(attr.value.ipaddr, entry.src_ip); + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_DST_IP; + copy(attr.value.ipaddr, entry.dst_ip); + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_UDP_SRC_PORT; + attr.value.u32 = entry.src_udp_port; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_UDP_DST_PORT; + attr.value.u32 = entry.dst_udp_port; + attrs.emplace_back(attr); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_SENDER) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_PACKET_NUM) + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.s32 = SAI_TWAMP_PKT_TX_MODE_PACKET_COUNT; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_PKT_CNT; + attr.value.u32 = entry.packet_count; + attrs.emplace_back(attr); + } + else if (entry.tx_mode == TWAMP_SESSION_TX_MODE_CONTINUOUS) + { + if (entry.monitor_time) + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.u32 = SAI_TWAMP_PKT_TX_MODE_PERIOD; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_PKT_PERIOD; + attr.value.u32 = entry.monitor_time; + attrs.emplace_back(attr); + } + else + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.u32 = SAI_TWAMP_PKT_TX_MODE_CONTINUOUS; + attrs.emplace_back(attr); + } + } + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_INTERVAL; + attr.value.u32 = entry.tx_interval; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TIMEOUT; + attr.value.u32 = entry.timeout; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_STATISTICS_INTERVAL; + attr.value.u32 = entry.statistics_interval; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT; + attr.value.booldata = entry.admin_state; + attrs.emplace_back(attr); + } + + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + + status = sai_twamp_api->create_twamp_session(&entry.session_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create twamp session %s, status %d", name.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + /* increase VRF reference count */ + m_vrfOrch->increaseVrfRefCount(entry.vrf_id); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_TWAMP_ENTRY); + + increaseTwampSessionCount(); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_REFLECTOR) + { + setSessionStatus(name, TWAMP_SESSION_STATUS_ACTIVE); + } + + return true; +} + +bool TwampOrch::deactivateSession(const string& name, TwampEntry& entry) +{ + SWSS_LOG_ENTER(); + sai_status_t status; + + status = sai_twamp_api->remove_twamp_session(entry.session_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove twamp session %s, status %d", name.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + /* decrease VRF reference count */ + m_vrfOrch->decreaseVrfRefCount(entry.vrf_id); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_TWAMP_ENTRY); + + decreaseTwampSessionCount(); + + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + + return true; +} + +bool TwampOrch::setSessionTransmitEn(TwampEntry& entry, string admin_state) +{ + SWSS_LOG_ENTER(); + + if (entry.role != SAI_TWAMP_SESSION_ROLE_SENDER) + { + return false; + } + + auto found = session_admin_state_map.find(admin_state); + if (found == session_admin_state_map.end()) + { + SWSS_LOG_ERROR("Incorrect transmit value: %s", admin_state.c_str()); + return false; + } + + sai_attribute_t attr; + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT; + attr.value.booldata = found->second; + sai_status_t status = sai_twamp_api->set_twamp_session_attribute(entry.session_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set twamp session %" PRIx64 " %s transmit, status %d", + entry.session_id, admin_state.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +task_process_status TwampOrch::createEntry(const string& key, const vector& data) +{ + SWSS_LOG_ENTER(); + + if (!register_event_notif) + { + if (!registerTwampEventNotification()) + { + SWSS_LOG_ERROR("TWAMP session for %s cannot be created", key.c_str()); + return task_process_status::task_failed; + } + register_event_notif = true; + } + + if (!checkTwampSessionCount()) + { + SWSS_LOG_NOTICE("Failed to create twamp session %s: resources are not available", key.c_str()); + return task_process_status::task_failed; + } + + TwampEntry entry; + for (auto i : data) + { + try { + string attr_name = to_upper(fvField(i)); + string attr_value = fvValue(i); + + if (attr_name == TWAMP_SESSION_MODE) + { + string value = to_upper(attr_value); + if (twamp_mode_map.find(value) == twamp_mode_map.end()) + { + SWSS_LOG_ERROR("Failed to parse valid mode %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.mode = twamp_mode_map[value]; + } + else if (attr_name == TWAMP_SESSION_ROLE) + { + string value = to_upper(attr_value); + if (twamp_role_map.find(value) == twamp_role_map.end()) + { + SWSS_LOG_ERROR("Failed to parse valid role %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.role = twamp_role_map[value]; + } + else if (attr_name == TWAMP_SESSION_SRC_IP) + { + entry.src_ip = attr_value; + } + else if (attr_name == TWAMP_SESSION_DST_IP) + { + entry.dst_ip = attr_value; + } + else if (attr_name == TWAMP_SESSION_SRC_UDP_PORT) + { + uint16_t value = to_uint(attr_value); + if (!validateUdpPort(value)) + { + SWSS_LOG_ERROR("Failed to parse valid souce udp port %d", value); + return task_process_status::task_invalid_entry; + } + entry.src_udp_port = value; + } + else if (attr_name == TWAMP_SESSION_DST_UDP_PORT) + { + uint16_t value = to_uint(attr_value); + if (!validateUdpPort(value)) + { + SWSS_LOG_ERROR("Failed to parse valid destination udp port %d", value); + return task_process_status::task_invalid_entry; + } + entry.dst_udp_port = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_VRF_NAME) + { + if (attr_value == "default") + { + entry.vrf_id = gVirtualRouterId; + } + else + { + if (!m_vrfOrch->isVRFexists(attr_value)) + { + SWSS_LOG_WARN("Vrf '%s' hasn't been created yet", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.vrf_id = m_vrfOrch->getVRFid(attr_value); + } + } + else if (attr_name == TWAMP_SESSION_DSCP) + { + entry.dscp = to_uint(attr_value, TWAMP_SESSION_DSCP_MIN, TWAMP_SESSION_DSCP_MAX); + } + else if (attr_name == TWAMP_SESSION_TTL) + { + entry.ttl = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_PACKET_TIMESTAMP_FORMAT) + { + string value = to_upper(attr_value); + if (timestamp_format_map.find(value) == timestamp_format_map.end()) + { + SWSS_LOG_ERROR("Failed to parse timestamp format value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.timestamp_format = timestamp_format_map[value]; + } + else if (attr_name == TWAMP_SESSION_PACKET_PADDING_SIZE) + { + entry.padding_size = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_TX_PACKET_COUNT) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_CONTINUOUS) + { + SWSS_LOG_ERROR("Configured packet count %s is conflict with monitor time", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + + entry.packet_count = to_uint(attr_value); + entry.tx_mode = TWAMP_SESSION_TX_MODE_PACKET_NUM; + } + else if (attr_name == TWAMP_SESSION_TX_MONITOR_TIME) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_PACKET_NUM) + { + SWSS_LOG_ERROR("Configured monitor time %s is conflict with packet count", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + + entry.monitor_time = to_uint(attr_value); + entry.tx_mode = TWAMP_SESSION_TX_MODE_CONTINUOUS; + } + else if (attr_name == TWAMP_SESSION_TX_INTERVAL) + { + entry.tx_interval = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_STATISTICS_INTERVAL) + { + entry.statistics_interval = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_TIMEOUT) + { + entry.timeout = to_uint(attr_value, TWAMP_SESSION_TIMEOUT_MIN, TWAMP_SESSION_TIMEOUT_MAX); + } + else if (attr_name == TWAMP_SESSION_ADMIN_STATE) + { + string value = to_upper(attr_value); + if (session_admin_state_map.find(value) == session_admin_state_map.end()) + { + SWSS_LOG_ERROR("Failed to parse transmit mode value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.admin_state = session_admin_state_map[value]; + } + else if (attr_name == TWAMP_SESSION_HW_LOOKUP) + { + string value = to_upper(attr_value); + if (hw_lookup_map.find(value) == hw_lookup_map.end()) + { + SWSS_LOG_ERROR("Failed to parse hw lookup value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.hw_lookup = hw_lookup_map[value]; + } + else + { + SWSS_LOG_ERROR("Failed to parse session %s configuration. Unknown attribute %s", key.c_str(), attr_name.c_str()); + return task_process_status::task_invalid_entry; + } + } + catch (const exception& e) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s error: %s.", key.c_str(), fvField(i).c_str(), e.what()); + return task_process_status::task_invalid_entry; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s. Unknown error has been occurred", key.c_str(), fvField(i).c_str()); + return task_process_status::task_failed; + } + } + + m_twampEntries.emplace(key, entry); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_SENDER) + { + TwampStats hw_stats; + m_twampStatistics.emplace(key, hw_stats); + initSessionStats(key); + } + + auto &session = m_twampEntries.find(key)->second; + if (!activateSession(key, session)) + { + SWSS_LOG_ERROR("Failed to create twamp session %s", key.c_str()); + return task_process_status::task_failed; + } + + return task_process_status::task_success; +} + +task_process_status TwampOrch::updateEntry(const string& key, const vector& data) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampEntries.find(key); + if (it == m_twampEntries.end()) + { + SWSS_LOG_NOTICE("Failed to set twamp session, session %s not exists", key.c_str()); + return task_process_status::task_invalid_entry; + } + TwampEntry& entry = it->second; + + for (auto i : data) + { + try { + const auto &attr_field = to_upper(fvField(i)); + const auto &attr_value = fvValue(i); + + if (attr_field == TWAMP_SESSION_ADMIN_STATE) + { + string value = to_upper(attr_value); + if (setSessionTransmitEn(entry, value)) + { + entry.admin_state = session_admin_state_map[value]; + if (entry.admin_state) + { + string running_status; + getSessionStatus(key, running_status); + if (running_status == TWAMP_SESSION_STATUS_INACTIVE) + { + removeSessionCounter(entry.session_id); + initSessionStats(key); + } + setSessionStatus(key, TWAMP_SESSION_STATUS_ACTIVE); + SWSS_LOG_NOTICE("Activated twamp session %s", key.c_str()); + } + else + { + setSessionStatus(key, TWAMP_SESSION_STATUS_INACTIVE); + SWSS_LOG_NOTICE("Deactivated twamp session %s", key.c_str()); + } + } + else + { + SWSS_LOG_ERROR("Failed to set twamp session %s transmit %s", key.c_str(), attr_value.c_str()); + } + } + else + { + SWSS_LOG_DEBUG("Ignore to parse session %s configuration attribute %s", key.c_str(), fvField(i).c_str()); + } + } + catch (const exception& e) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s error: %s.", key.c_str(), fvField(i).c_str(), e.what()); + return task_process_status::task_invalid_entry; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s. Unknown error has been occurred", key.c_str(), fvField(i).c_str()); + return task_process_status::task_failed; + } + } + + return task_process_status::task_success; +} + +task_process_status TwampOrch::deleteEntry(const string& key) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampEntries.find(key); + if (it == m_twampEntries.end()) + { + SWSS_LOG_ERROR("Failed to remove non-existent twamp session %s", key.c_str()); + return task_process_status::task_invalid_entry; + } + + TwampEntry& entry = it->second; + + if (!deactivateSession(key, entry)) + { + SWSS_LOG_ERROR("Failed to remove twamp session %s", key.c_str()); + return task_process_status::task_failed; + } + + /* remove TWAMP session in STATE_DB */ + removeSessionStatus(key); + + /* remove TWAMP session maps in COUNTERS_DB */ + m_counterTwampSessionNameMapTable->hdel("", key); + + /* remove TWAMP session in COUNTER_DB */ + removeSessionCounter(entry.session_id); + + /* remove soft table in orchagent */ + m_twampEntries.erase(key); + m_twampStatistics.erase(key); + + SWSS_LOG_NOTICE("Removed twamp session %s", key.c_str()); + + return task_process_status::task_success; +} + +void TwampOrch::doTask(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + if (!m_portsOrch->allPortsReady()) + { + return; + } + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + task_process_status task_status = task_process_status::task_failed; + + if (op == SET_COMMAND) + { + if (!isSessionExists(key)) + { + task_status = createEntry(key, data); + } + else + { + task_status = updateEntry(key, data); + } + } + else if (op == DEL_COMMAND) + { + task_status = deleteEntry(key); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + } + + /* Specifically retry the task when asked */ + if (task_status == task_process_status::task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } + } +} + +bool TwampOrch::addCounterNameMap(const string& name, const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + string value; + const auto id = sai_serialize_object_id(session_id); + + if (m_vidToRidTable->hget("", id, value)) + { + vector fields; + fields.emplace_back(name, id); + m_counterTwampSessionNameMapTable->set("", fields); + + return true; + } + else + { + SWSS_LOG_NOTICE("TWAMP session counter %s already exists.", name.c_str()); + return true; + } + + return false; +} + +void TwampOrch::saveSessionStatsLatest(const sai_object_id_t session_id, const uint32_t index, const vector& stats) +{ + SWSS_LOG_ENTER(); + + vector values; + + for (const auto& it: twamp_session_stat_ids) + { + values.emplace_back(sai_serialize_twamp_session_stat(it), to_string(stats[it])); + } + + m_countersTable->set(sai_serialize_object_id(session_id) + ":INDEX:" + to_string(index), values); + + return; +} + +void TwampOrch::calculateCounters(const string& name, const uint32_t index, const vector& stats) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to caculate non-existent twamp session %s", name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + /* packets */ + total_stats.rx_packets += stats[SAI_TWAMP_SESSION_STAT_RX_PACKETS]; + total_stats.rx_bytes += stats[SAI_TWAMP_SESSION_STAT_RX_BYTE]; + total_stats.tx_packets += stats[SAI_TWAMP_SESSION_STAT_TX_PACKETS]; + total_stats.tx_bytes += stats[SAI_TWAMP_SESSION_STAT_TX_BYTE]; + total_stats.drop_packets += stats[SAI_TWAMP_SESSION_STAT_DROP_PACKETS]; + + /* latency */ + total_stats.max_latency = (stats[SAI_TWAMP_SESSION_STAT_MAX_LATENCY] > total_stats.max_latency) ? + stats[SAI_TWAMP_SESSION_STAT_MAX_LATENCY] : total_stats.max_latency; + total_stats.min_latency = (index == 1) ? stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] : + ((stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] < total_stats.min_latency) ? + stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] : total_stats.min_latency); + total_stats.avg_latency_total += stats[SAI_TWAMP_SESSION_STAT_AVG_LATENCY]; + total_stats.avg_latency = total_stats.avg_latency_total / index; + + /* jitter */ + total_stats.max_jitter = (stats[SAI_TWAMP_SESSION_STAT_MAX_JITTER] > total_stats.max_jitter) ? + stats[SAI_TWAMP_SESSION_STAT_MAX_JITTER] : total_stats.max_jitter; + total_stats.min_jitter = (index == 1) ? stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] : + ((stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] < total_stats.min_jitter) ? + stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] : total_stats.min_jitter); + total_stats.avg_jitter_total += stats[SAI_TWAMP_SESSION_STAT_AVG_JITTER]; + total_stats.avg_jitter = total_stats.avg_jitter_total / index; +} + +void TwampOrch::saveCountersTotal(const string& name, const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + vector values; + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to caculate non-existent twamp session %s", + name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_RX_PACKETS), to_string(total_stats.rx_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_RX_BYTE), to_string(total_stats.rx_bytes)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_TX_PACKETS), to_string(total_stats.tx_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_TX_BYTE), to_string(total_stats.tx_bytes)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_DROP_PACKETS), to_string(total_stats.drop_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MAX_LATENCY), to_string(total_stats.max_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MIN_LATENCY), to_string(total_stats.min_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_AVG_LATENCY), to_string(total_stats.avg_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MAX_JITTER), to_string(total_stats.max_jitter)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MIN_JITTER), to_string(total_stats.min_jitter)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_AVG_JITTER), to_string(total_stats.avg_jitter)); + + m_countersTable->set(sai_serialize_object_id(session_id), values); +} + +void TwampOrch::doTask(NotificationConsumer& consumer) +{ + SWSS_LOG_ENTER(); + + if (!m_portsOrch->allPortsReady()) + { + return; + } + + std::string op; + std::string data; + std::vector values; + + consumer.pop(op, data, values); + + if (&consumer != m_twampNotificationConsumer) + { + return; + } + + if (op == "twamp_session_event") + { + uint32_t count = 0; + sai_twamp_session_event_notification_data_t *twamp_session = nullptr; + + sai_deserialize_twamp_session_event_ntf(data, count, &twamp_session); + + for (uint32_t i = 0; i < count; i++) + { + string name; + sai_object_id_t session_id = twamp_session[i].twamp_session_id; + sai_twamp_session_state_t session_state = twamp_session[i].session_state; + uint32_t stats_index = twamp_session[i].session_stats.index; + + if (!getSessionName(session_id, name)) + { + continue; + } + + /* update state db */ + if (session_state == SAI_TWAMP_SESSION_STATE_ACTIVE) + { + setSessionStatus(name, TWAMP_SESSION_STATUS_ACTIVE); + } + else + { + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + } + + /* save counter db */ + if (twamp_session[i].session_stats.number_of_counters) + { + if (0 == stats_index) + { + continue; + } + else if (1 == stats_index) + { + addCounterNameMap(name, session_id); + } + + vector hw_stats; + hw_stats.resize(twamp_session_stat_ids.size()); + for (uint32_t j = 0; j < twamp_session[i].session_stats.number_of_counters; j++) + { + uint32_t counters_id = twamp_session[i].session_stats.counters_ids[j]; + auto it = find(twamp_session_stat_ids.begin(), twamp_session_stat_ids.end(), counters_id); + if (it != twamp_session_stat_ids.end()) + { + hw_stats[counters_id] = twamp_session[i].session_stats.counters[j]; + } + } + + saveSessionStatsLatest(session_id, stats_index, hw_stats); + calculateCounters(name, stats_index, hw_stats); + saveCountersTotal(name, session_id); + } + } + + sai_deserialize_free_twamp_session_event_ntf(count, twamp_session); + } +} diff --git a/orchagent/twamporch.h b/orchagent/twamporch.h new file mode 100644 index 0000000000..09134f6be4 --- /dev/null +++ b/orchagent/twamporch.h @@ -0,0 +1,136 @@ +#ifndef SWSS_TWAMPORCH_H +#define SWSS_TWAMPORCH_H + +#include "orch.h" +#include "observer.h" +#include "switchorch.h" +#include "portsorch.h" +#include "vrforch.h" +#include "ipaddress.h" +#include "table.h" +#include + +struct TwampStats +{ + uint64_t rx_packets; + uint64_t rx_bytes; + uint64_t tx_packets; + uint64_t tx_bytes; + uint64_t drop_packets; + uint64_t max_latency; + uint64_t min_latency; + uint64_t avg_latency; + uint64_t max_jitter; + uint64_t min_jitter; + uint64_t avg_jitter; + uint64_t avg_latency_total; + uint64_t avg_jitter_total; +}; + +struct TwampEntry +{ + uint8_t mode; /* twamp mode: full, light */ + uint8_t role; /* sender, reflector */ + bool admin_state; /* test packet state. enabled, disabled */ + bool hw_lookup; + + sai_object_id_t vrf_id; + IpAddress src_ip; + IpAddress dst_ip; + uint16_t src_udp_port; + uint16_t dst_udp_port; + uint16_t padding_size; + uint8_t dscp; + uint8_t ttl; + uint8_t timestamp_format; + + /* sender attr */ + string tx_mode; + uint32_t packet_count; + uint32_t monitor_time; /* second */ + uint32_t tx_interval; /* millisecond */ + uint32_t statistics_interval; /* millisecond */ + uint8_t timeout; /* second */ + + sai_object_id_t session_id; + + TwampEntry() + { + session_id = 0; + admin_state = false; + hw_lookup = true; + vrf_id = 0; + packet_count = 0; + monitor_time = 0; + tx_interval = 0; + statistics_interval = 0; + timeout = 0; + }; +}; + +typedef map TwampEntryTable; +typedef map TwampStatsTable; + +class TwampOrch : public Orch +{ +public: + TwampOrch(TableConnector confDbConnector, TableConnector stateDbConnector, + SwitchOrch *switchOrch, PortsOrch *portOrch, VRFOrch *vrfOrch); + + ~TwampOrch() + { + // do nothing + } + + bool isSessionExists(const string&); + bool getSessionName(const sai_object_id_t oid, string& name); + +private: + SwitchOrch *m_switchOrch; + PortsOrch *m_portsOrch; + VRFOrch *m_vrfOrch; + NotificationConsumer* m_twampNotificationConsumer; + bool register_event_notif; + + unsigned int m_twampSessionCount; + unsigned int m_maxTwampSessionCount; + + TwampEntryTable m_twampEntries; + TwampStatsTable m_twampStatistics; + + shared_ptr m_asicDb; + shared_ptr m_countersDb; + unique_ptr
m_counterTwampSessionNameMapTable; + unique_ptr
m_countersTable; + unique_ptr
m_vidToRidTable; + Table m_stateDbTwampTable; + + bool validateUdpPort(uint16_t udp_port); + void increaseTwampSessionCount(void); + void decreaseTwampSessionCount(void); + bool checkTwampSessionCount(void); + + void setSessionStatus(const string&, const string&); + bool getSessionStatus(const string&, string&); + void removeSessionStatus(const string&); + void removeSessionCounter(const sai_object_id_t); + void initSessionStats(const string&); + + bool registerTwampEventNotification(void); + bool activateSession(const string&, TwampEntry&); + bool deactivateSession(const string&, TwampEntry&); + bool setSessionTransmitEn(TwampEntry&, string test_start); + + task_process_status createEntry(const string&, const vector&); + task_process_status updateEntry(const string&, const vector&); + task_process_status deleteEntry(const string&); + void doTask(Consumer& consumer); + + bool addCounterNameMap(const string&, const sai_object_id_t session_id); + void saveSessionStatsLatest(const sai_object_id_t session_id, const uint32_t index, const vector& stats); + void calculateCounters(const string&, const uint32_t index, const vector& stats); + void saveCountersTotal(const string&, const sai_object_id_t session_id); + void doTask(NotificationConsumer& consumer); +}; + +#endif /* SWSS_TWAMPORCH_H */ diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index a3acf10e0e..b976c728a7 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -429,6 +429,7 @@ bool VNetOrch::addOperation(const Request& request) uint32_t vni=0; string tunnel; string scope; + swss::MacAddress overlay_dmac; for (const auto& name: request.getAttrFieldNames()) { @@ -460,6 +461,10 @@ bool VNetOrch::addOperation(const Request& request) { advertise_prefix = request.getAttrBool("advertise_prefix"); } + else if (name == "overlay_dmac") + { + overlay_dmac = request.getAttrMacAddress("overlay_dmac"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -486,7 +491,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix, overlay_dmac }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -504,9 +509,14 @@ bool VNetOrch::addOperation(const Request& request) else { SWSS_LOG_NOTICE("VNET '%s' already exists ", vnet_name.c_str()); + if (!!overlay_dmac && overlay_dmac != it->second->getOverlayDMac()) + { + it->second->setOverlayDMac(overlay_dmac); + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + vnet_route_orch->updateAllMonitoringSession(vnet_name); + } } } - if (create) { vnet_table_[vnet_name] = std::move(obj); @@ -715,8 +725,11 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + app_db_ = shared_ptr(new DBConnector("APPL_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr
(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); + monitor_session_producer_ = unique_ptr
(new Table(app_db_.get(), APP_VNET_MONITOR_TABLE_NAME)); gBfdOrch->attach(this); } @@ -732,7 +745,7 @@ sai_object_id_t VNetRouteOrch::getNextHopGroupId(const string& vnet, const NextH return syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; } -bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj) +bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj, const string& monitoring) { SWSS_LOG_ENTER(); @@ -753,7 +766,7 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n for (auto it : next_hop_set) { nh_seq_id_in_nhgrp[it] = ++seq_id; - if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) + if (monitoring != "custom" && nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) { continue; } @@ -892,9 +905,178 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey return true; } +bool VNetRouteOrch::createNextHopGroup(const string& vnet, + NextHopGroupKey& nexthops, + VNetVrfObject *vrf_obj, + const string& monitoring) +{ + + if (nexthops.getSize() == 0) + { + return true; + } + else if (nexthops.getSize() == 1) + { + NextHopKey nexthop(nexthops.to_string(), true); + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); + next_hop_group_entry.ref_count = 0; + if (monitoring == "custom" || nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } + syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; + } + else + { + if (!addNextHopGroup(vnet, nexthops, vrf_obj, monitoring)) + { + SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); + return false; + } + } + return true; +} + +NextHopGroupKey VNetRouteOrch::getActiveNHSet(const string& vnet, + NextHopGroupKey& nexthops, + const IpPrefix& ipPrefix) +{ + // This function takes a nexthop group key and iterates over the nexthops in that group + // to identify the ones which are active based on their monitor session state. + // These next hops are collected into another next hop group key called nhg_custom and returned. + NextHopGroupKey nhg_custom("", true); + set next_hop_set = nexthops.getNextHops(); + for (auto it : next_hop_set) + { + if(monitor_info_.find(vnet) != monitor_info_.end() && + monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + for (auto monitor : monitor_info_[vnet][ipPrefix]) + { + if (monitor.second.endpoint == it) + { + if (monitor.second.state == MONITOR_SESSION_STATE_UP) + { + // monitor session exists and is up + nhg_custom.add(it); + + } + continue; + } + } + } + } + return nhg_custom; +} + +bool VNetRouteOrch::selectNextHopGroup(const string& vnet, + NextHopGroupKey& nexthops_primary, + NextHopGroupKey& nexthops_secondary, + const string& monitoring, + IpPrefix& ipPrefix, + VNetVrfObject *vrf_obj, + NextHopGroupKey& nexthops_selected, + const map& monitors) +{ + // This function returns the next hop group which is to be used to in the hardware. + // for non priority tunnel routes, this would return nexthops_primary or its subset if + // BFD sessions for the endpoits in the NHG are up. + // For priority tunnel scenario, it sets up endpoint monitors for both primary and secondary. + // This is followed by an attempt to create a NHG which can be subset of nexthops_primary + // depending on the endpoint monitor state. If no NHG from primary is created, we attempt + // the same for secondary. + if(nexthops_secondary.getSize() != 0 && monitoring == "custom") + { + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + } + else + { + if (it_route->second.primary != nexthops_primary) + { + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + } + if (it_route->second.secondary != nexthops_secondary) + { + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + } + nexthops_selected = it_route->second.nhg_key; + return true; + } + + NextHopGroupKey nhg_custom = getActiveNHSet( vnet, nexthops_primary, ipPrefix); + if (!hasNextHopGroup(vnet, nhg_custom)) + { + if (!createNextHopGroup(vnet, nhg_custom, vrf_obj, monitoring)) + { + SWSS_LOG_WARN("Failed to create Primary based custom next hop group. Cannot proceed."); + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + delEndpointMonitor(vnet, nexthops_secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + + return false; + } + } + if (nhg_custom.getSize() > 0 ) + { + SWSS_LOG_INFO(" Created Primary based custom next hop group.%s", nhg_custom.to_string().c_str() ); + nexthops_selected = nhg_custom; + return true; + } + NextHopGroupKey nhg_custom_sec = getActiveNHSet( vnet, nexthops_secondary, ipPrefix); + + if (!hasNextHopGroup(vnet, nhg_custom_sec)) + { + if (!createNextHopGroup(vnet, nhg_custom_sec, vrf_obj, monitoring)) + { + SWSS_LOG_WARN("Failed to create secondary based custom next hop group. Cannot proceed."); + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + delEndpointMonitor(vnet, nexthops_secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + + return false; + } + } + if (nhg_custom_sec.getSize() > 0 ) + { + SWSS_LOG_INFO(" Created Secondary based custom next hop group.(%s).", nhg_custom_sec.to_string().c_str() ); + nexthops_selected = nhg_custom_sec; + return true; + } + // nhg_custom is empty. we shall create a dummy enpty NHG for book keeping. + if (!hasNextHopGroup(vnet, nhg_custom) && !hasNextHopGroup(vnet, nhg_custom_sec) ) + { + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = SAI_NULL_OBJECT_ID; + next_hop_group_entry.ref_count = 0; + syncd_nexthop_groups_[vnet][nhg_custom] = next_hop_group_entry; + } + nexthops_selected = nhg_custom; + return true; + } + else if (!hasNextHopGroup(vnet, nexthops_primary)) + { + SWSS_LOG_INFO("Creating next hop group %s", nexthops_primary.to_string().c_str()); + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + if (!createNextHopGroup(vnet, nexthops_primary, vrf_obj, monitoring)) + { + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + return false; + } + } + nexthops_selected = nexthops_primary; + return true; +} + template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, NextHopGroupKey& nexthops_secondary, + const IpPrefix& adv_prefix, const map& monitors) { SWSS_LOG_ENTER(); @@ -932,33 +1114,15 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP if (op == SET_COMMAND) { - sai_object_id_t nh_id; - if (!hasNextHopGroup(vnet, nexthops)) + sai_object_id_t nh_id = SAI_NULL_OBJECT_ID; + NextHopGroupKey active_nhg("", true); + if (!selectNextHopGroup(vnet, nexthops, nexthops_secondary, monitoring, ipPrefix, vrf_obj, active_nhg, monitors)) { - setEndpointMonitor(vnet, monitors, nexthops); - if (nexthops.getSize() == 1) - { - NextHopKey nexthop(nexthops.to_string(), true); - NextHopGroupInfo next_hop_group_entry; - next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); - next_hop_group_entry.ref_count = 0; - if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) - { - next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; - } - syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; - } - else - { - if (!addNextHopGroup(vnet, nexthops, vrf_obj)) - { - delEndpointMonitor(vnet, nexthops); - SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); - return false; - } - } + return true; } - nh_id = syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; + + // note: nh_id can be SAI_NULL_OBJECT_ID when active_nhg is empty. + nh_id = syncd_nexthop_groups_[vnet][active_nhg].next_hop_group_id; auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); for (auto vr_id : vr_set) @@ -966,11 +1130,11 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP bool route_status = true; // Remove route if the nexthop group has no active endpoint - if (syncd_nexthop_groups_[vnet][nexthops].active_members.empty()) + if (syncd_nexthop_groups_[vnet][active_nhg].active_members.empty()) { if (it_route != syncd_tunnel_routes_[vnet].end()) { - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; // Remove route when updating from a nhg with active member to another nhg without if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) { @@ -986,7 +1150,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } else { - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; if (syncd_nexthop_groups_[vnet][nhg].active_members.empty()) { route_status = add_route(vr_id, pfx, nh_id); @@ -1002,52 +1166,110 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { SWSS_LOG_ERROR("Route add/update failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); /* Clean up the newly created next hop group entry */ - if (nexthops.getSize() > 1) + if (active_nhg.getSize() > 1) { - removeNextHopGroup(vnet, nexthops, vrf_obj); + removeNextHopGroup(vnet, active_nhg, vrf_obj); } return false; } } - - if (it_route != syncd_tunnel_routes_[vnet].end()) + bool route_updated = false; + bool priority_route_updated = false; + if (it_route != syncd_tunnel_routes_[vnet].end() && + ((monitoring == "" && it_route->second.nhg_key != nexthops) || + (monitoring == "custom" && (it_route->second.primary != nexthops || it_route->second.secondary != nexthops_secondary)))) { - // In case of updating an existing route, decrease the reference count for the previous nexthop group - NextHopGroupKey nhg = it_route->second; - if(--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + route_updated = true; + NextHopGroupKey nhg = it_route->second.nhg_key; + if (monitoring == "custom") { - if (nhg.getSize() > 1) + // if the previously active NHG is same as the newly created active NHG.case of primary secondary swap or + //when primary is active and secondary is changed or vice versa. In these cases we dont remove the NHG + // but only remove the monitors for the set which has changed. + if (it_route->second.primary != nexthops) { - removeNextHopGroup(vnet, nhg, vrf_obj); + delEndpointMonitor(vnet, it_route->second.primary, ipPrefix); } - else + if (it_route->second.secondary != nexthops_secondary) { - syncd_nexthop_groups_[vnet].erase(nhg); - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + delEndpointMonitor(vnet, it_route->second.secondary, ipPrefix); + } + if (monitor_info_[vnet][ipPrefix].empty()) + { + monitor_info_[vnet].erase(ipPrefix); } - delEndpointMonitor(vnet, nhg); + priority_route_updated = true; } else { - syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + // In case of updating an existing route, decrease the reference count for the previous nexthop group + if (--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + { + if (nhg.getSize() > 1) + { + removeNextHopGroup(vnet, nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(nhg); + if(nhg.getSize() == 1) + { + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + if (monitoring != "custom") + { + delEndpointMonitor(vnet, nhg, ipPrefix); + } + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + } + vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeProfile(ipPrefix); } - vrf_obj->removeRoute(ipPrefix); - vrf_obj->removeProfile(ipPrefix); } - - syncd_nexthop_groups_[vnet][nexthops].tunnel_routes.insert(ipPrefix); - - syncd_tunnel_routes_[vnet][ipPrefix] = nexthops; - syncd_nexthop_groups_[vnet][nexthops].ref_count++; - vrf_obj->addRoute(ipPrefix, nexthops); - if (!profile.empty()) { vrf_obj->addProfile(ipPrefix, profile); } + if (it_route == syncd_tunnel_routes_[vnet].end() || route_updated) + { + syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.insert(ipPrefix); + VNetTunnelRouteEntry tunnel_route_entry; + tunnel_route_entry.nhg_key = active_nhg; + tunnel_route_entry.primary = nexthops; + tunnel_route_entry.secondary = nexthops_secondary; + syncd_tunnel_routes_[vnet][ipPrefix] = tunnel_route_entry; + syncd_nexthop_groups_[vnet][active_nhg].ref_count++; + + if (priority_route_updated) + { + MonitorUpdate update; + update.prefix = ipPrefix; + update.state = MONITOR_SESSION_STATE_UNKNOWN; + update.vnet = vnet; + updateVnetTunnelCustomMonitor(update); + return true; + } - postRouteState(vnet, ipPrefix, nexthops, profile); + if (adv_prefix.to_string() != ipPrefix.to_string() && prefix_to_adv_prefix_.find(ipPrefix) == prefix_to_adv_prefix_.end()) + { + prefix_to_adv_prefix_[ipPrefix] = adv_prefix; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + if(active_nhg.getSize() > 0) + { + adv_prefix_refcount_[adv_prefix] += 1; + } + } + vrf_obj->addRoute(ipPrefix, active_nhg); + } + postRouteState(vnet, ipPrefix, active_nhg, profile); } else if (op == DEL_COMMAND) { @@ -1058,8 +1280,8 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP ipPrefix.to_string().c_str()); return true; } - NextHopGroupKey nhg = it_route->second; - + NextHopGroupKey nhg = it_route->second.nhg_key; + auto last_nhg_size = nhg.getSize(); for (auto vr_id : vr_set) { // If an nhg has no active member, the route should already be removed @@ -1082,15 +1304,29 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP else { syncd_nexthop_groups_[vnet].erase(nhg); - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + // We need to check specifically if there is only one next hop active. + // In case of Priority routes we can end up in a situation where the active NHG has 0 nexthops. + if(nhg.getSize() == 1) + { + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end()) + { + delEndpointMonitor(vnet, nhg, ipPrefix); } - delEndpointMonitor(vnet, nhg); } else { syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); } + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + delEndpointMonitor(vnet, it_route->second.primary, ipPrefix); + delEndpointMonitor(vnet, it_route->second.secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + } syncd_tunnel_routes_[vnet].erase(ipPrefix); if (syncd_tunnel_routes_[vnet].empty()) @@ -1102,8 +1338,21 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP vrf_obj->removeProfile(ipPrefix); removeRouteState(vnet, ipPrefix); - } + if (prefix_to_adv_prefix_.find(ipPrefix) != prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + prefix_to_adv_prefix_.erase(ipPrefix); + if (last_nhg_size > 0) + { + adv_prefix_refcount_[adv_pfx] -= 1; + if (adv_prefix_refcount_[adv_pfx] == 0) + { + adv_prefix_refcount_.erase(adv_pfx); + } + } + } + } return true; } @@ -1168,7 +1417,7 @@ bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, ipPrefix.to_string().c_str()); return true; } - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; for (auto vr_id : vr_set) { @@ -1567,11 +1816,12 @@ void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpo FieldValueTuple fvTuple("local_addr", src_ip.to_string()); data.push_back(fvTuple); - - data.emplace_back("multihop", "true"); - + data.emplace_back("multihop", "true"); + // The BFD sessions established by the Vnet routes with monitoring need to be brought down + // when the device goes into TSA. The following parameter ensures that these session are + // brought down while transitioning to TSA and brought back up when transitioning to TSB. + data.emplace_back("shutdown_bfd_during_tsa", "true"); bfd_session_producer_.set(key, data); - bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN; } @@ -1603,7 +1853,78 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo bfd_sessions_.erase(monitor_addr); } -void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +void VNetRouteOrch::updateAllMonitoringSession(const string& vnet) +{ + SWSS_LOG_ENTER(); + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + auto overlay_dmac = vnet_obj->getOverlayDMac(); + SWSS_LOG_INFO ("updating overlay dmac value to %s", overlay_dmac.to_string().c_str()); + + if (monitor_info_.find(vnet) != monitor_info_.end()) + { + for (auto prefix : monitor_info_[vnet]) + { + for (auto monitor_addr : monitor_info_[vnet][prefix.first]) + { + + string key = monitor_addr.first.to_string() + ":" + prefix.first.to_string(); + SWSS_LOG_INFO ("updating the overlay dmac of %s", key.c_str()); + + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + } + } + } +} + +void VNetRouteOrch::createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + + auto overlay_dmac = vnet_obj->getOverlayDMac(); + string key = monitor_addr.to_string() + ":" + ipPrefix.to_string(); + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + + MonitorSessionInfo info = monitor_info_[vnet][ipPrefix][monitor_addr]; + info.endpoint = endpoint; + info.ref_count = 1; + info.state = MONITOR_SESSION_STATE_DOWN; + monitor_info_[vnet][ipPrefix][monitor_addr] = info; + +} + +void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(monitor_addr) == monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitor session for prefix %s endpoint %s does not exist", ipPrefix.to_string().c_str(), endpoint.to_string().c_str()); + } + + string key = monitor_addr.to_string() + ":" + ipPrefix.to_string(); + + monitor_session_producer_->del(key); + monitor_info_[vnet][ipPrefix].erase(monitor_addr); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); @@ -1611,28 +1932,114 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map next_hop_set = nexthops.getNextHops(); + if (next_hop_set.find(nh) != next_hop_set.end()) { - createBfdSession(vnet, nh, monitor_ip); + if (monitoring == "custom") + { + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(monitor_ip) == monitor_info_[vnet][ipPrefix].end()) + { + createMonitoringSession(vnet, nh, monitor_ip, ipPrefix); + } + else + { + SWSS_LOG_INFO("Monitoring session for prefix %s endpoint %s, monitor %s already exists", ipPrefix.to_string().c_str(), + nh.to_string().c_str(), monitor_ip.to_string().c_str()); + monitor_info_[vnet][ipPrefix][monitor_ip].ref_count += 1; + } + } + else + { + if (nexthop_info_[vnet].find(nh.ip_address) == nexthop_info_[vnet].end()) + { + createBfdSession(vnet, nh, monitor_ip); + } + nexthop_info_[vnet][nh.ip_address].ref_count++; + } } - - nexthop_info_[vnet][nh.ip_address].ref_count++; } } -void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops) +void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); std::set nhks = nexthops.getNextHops(); + bool is_custom_monitoring = false; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + is_custom_monitoring = true; + } for (auto nhk: nhks) { IpAddress ip = nhk.ip_address; - if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { - if (--nexthop_info_[vnet][ip].ref_count == 0) + if (is_custom_monitoring) + { + for ( auto monitor : monitor_info_[vnet][ipPrefix]) { - IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; - removeBfdSession(vnet, nhk, monitor_addr); + if (monitor.second.endpoint == nhk) + { + if (--monitor_info_[vnet][ipPrefix][monitor.first].ref_count == 0) + { + removeMonitoringSession(vnet, nhk, monitor.first, ipPrefix); + break; + } + } + } + } + else + { + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; + removeBfdSession(vnet, nhk, monitor_addr); + } + } + } + } +} + +void VNetRouteOrch::updateMonitorState(string& op, const IpPrefix& prefix, const IpAddress& monitor, string state) +{ + SWSS_LOG_ENTER(); + if( op == SET_COMMAND) + { + for (auto iter : monitor_info_) + { + std::string vnet = iter.first; + if (monitor_info_[vnet].find(prefix) != monitor_info_[vnet].end() && + monitor_info_[vnet][prefix].find(monitor) != monitor_info_[vnet][prefix].end()) + { + if (state =="up") + { + if (monitor_info_[vnet][prefix][monitor].state != MONITOR_SESSION_STATE_UP) + { + SWSS_LOG_NOTICE("Monitor session state for %s|%s (%s) changed from down to up", prefix.to_string().c_str(), + monitor.to_string().c_str(), monitor_info_[vnet][prefix][monitor].endpoint.ip_address.to_string().c_str()); + struct MonitorUpdate status_update; + status_update.state = MONITOR_SESSION_STATE_UP; + status_update.prefix = prefix; + status_update.monitor = monitor; + status_update.vnet = vnet; + updateVnetTunnelCustomMonitor(status_update); + } + } + else if (state =="down") + { + if (monitor_info_[vnet][prefix][monitor].state != MONITOR_SESSION_STATE_DOWN) + { + SWSS_LOG_NOTICE("Monitor session state for %s|%s (%s) changed from up to down", prefix.to_string().c_str(), + monitor.to_string().c_str(), monitor_info_[vnet][prefix][monitor].endpoint.ip_address.to_string().c_str()); + struct MonitorUpdate status_update; + status_update.state = MONITOR_SESSION_STATE_DOWN; + status_update.prefix = prefix; + status_update.monitor = monitor; + status_update.vnet = vnet; + updateVnetTunnelCustomMonitor(status_update); + } + } } } } @@ -1642,11 +2049,11 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); vector fvVector; - NextHopGroupInfo& nhg_info = syncd_nexthop_groups_[vnet][nexthops]; string route_state = nhg_info.active_members.empty() ? "inactive" : "active"; string ep_str = ""; int idx_ep = 0; + for (auto nh_pair : nhg_info.active_members) { NextHopKey nh = nh_pair.first; @@ -1659,15 +2066,26 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); + auto prefix_to_use = ipPrefix; + if (prefix_to_adv_prefix_.find(ipPrefix) != prefix_to_adv_prefix_.end()) + { + route_state = ""; + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + if (adv_prefix_refcount_[adv_pfx] == 1) + { + route_state = "active"; + prefix_to_use = adv_pfx; + } + } if (vnet_orch_->getAdvertisePrefix(vnet)) { if (route_state == "active") { - addRouteAdvertisement(ipPrefix, profile); + addRouteAdvertisement(prefix_to_use, profile); } - else + else if (route_state == "inactive") { - removeRouteAdvertisement(ipPrefix); + removeRouteAdvertisement(prefix_to_use); } } } @@ -1676,7 +2094,19 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); - removeRouteAdvertisement(ipPrefix); + + if(prefix_to_adv_prefix_.find(ipPrefix) !=prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + if(adv_prefix_refcount_[adv_pfx] == 1) + { + removeRouteAdvertisement(adv_pfx); + } + } + else + { + removeRouteAdvertisement(ipPrefix); + } } void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix, string& profile) @@ -1730,7 +2160,7 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) size_t found_vrf = key.find(state_db_key_delimiter); if (found_vrf == string::npos) { - SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + SWSS_LOG_WARN("Failed to parse key %s, no vrf is given", key.c_str()); return; } @@ -1907,6 +2337,229 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) } } +void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) +{ + SWSS_LOG_ENTER(); +// This function recieves updates from the MonitorOrch for the endpoints state. +// Based on the state of the endpoints for a particular route, this function attempts +// to construct the primary next hop group. if it fails to do so,it attempts to create +// the secondary next hop group. After that it applies the next hop group and deletes +// the old next hop group. +// This function is also called in the case when the route configuration is updated to +// apply the new next hop group. In this case, the caller sets the state to +// MONITOR_SESSION_STATE_UNKNOWN and config_update and updateRoute are set to true. +// This function should never recieve MONITOR_SESSION_STATE_UNKNOWN from MonitorOrch. + + auto prefix = update.prefix; + auto state = update.state; + auto monitor = update.monitor; + auto vnet = update.vnet; + bool updateRoute = false; + bool config_update = false; + if (state != MONITOR_SESSION_STATE_UNKNOWN) + { + monitor_info_[vnet][prefix][monitor].state = state; + } + else + { + // we are coming here as a result of route config update. We need to repost the route if applicable. + updateRoute = true; + config_update = true; + } + + auto route = syncd_tunnel_routes_[vnet].find(prefix); + if (route == syncd_tunnel_routes_[vnet].end()) + { + SWSS_LOG_ERROR("Unexpected! Monitor Update for absent route."); + return; + + } + auto *vrf_obj = vnet_orch_->getTypePtr(vnet); + set vr_set; + + auto l_fn = [&] (const string& vnet) { + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + sai_object_id_t vr_id = vnet_obj->getVRidIngress(); + vr_set.insert(vr_id); + }; + + l_fn(vnet); + + auto primary = route->second.primary; + auto secondary = route->second.secondary; + auto active_nhg = route->second.nhg_key; + NextHopGroupKey nhg_custom("", true); + sai_ip_prefix_t pfx; + copy(pfx, prefix); + NextHopGroupKey nhg_custom_primary = getActiveNHSet( vnet, primary, prefix); + NextHopGroupKey nhg_custom_secondary = getActiveNHSet( vnet, secondary, prefix); + if (nhg_custom_primary.getSize() > 0) + { + if (nhg_custom_primary != active_nhg ) + { + if (!hasNextHopGroup(vnet, nhg_custom_primary)) + { + if (!createNextHopGroup(vnet, nhg_custom_primary, vrf_obj, "custom")) + { + SWSS_LOG_WARN("Failed to create primary based custom next hop group. Cannot proceed."); + return; + } + } + updateRoute = true; + } + if (updateRoute) + { + nhg_custom = nhg_custom_primary; + } + } + else if (nhg_custom_secondary.getSize() > 0) + { + if (nhg_custom_secondary != active_nhg ) + { + if (!hasNextHopGroup(vnet, nhg_custom_secondary)) + { + if (!createNextHopGroup(vnet, nhg_custom_secondary, vrf_obj, "custom")) + { + SWSS_LOG_WARN("Failed to create primary based custom next hop group. Cannot proceed."); + return; + } + } + updateRoute = true; + } + if (updateRoute) + { + nhg_custom = nhg_custom_secondary; + } + } + else + { + //both HHG's are inactive, need to remove the route. + updateRoute = true; + } + + if (nhg_custom.getSize() == 0) + { + // nhg_custom is empty. we shall create a dummy empty NHG for book keeping. + SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up."); + if (!hasNextHopGroup(vnet, nhg_custom)) + { + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = SAI_NULL_OBJECT_ID; + next_hop_group_entry.ref_count = 0; + syncd_nexthop_groups_[vnet][nhg_custom] = next_hop_group_entry; + } + } + auto active_nhg_size = active_nhg.getSize(); + if (updateRoute) + { + for (auto vr_id : vr_set) + { + if (nhg_custom.getSize() == 0) + { + if (active_nhg_size > 0) + { + // we need to remove the route + del_route(vr_id, pfx); + } + } + else + { + bool route_status = true; + // note: nh_id can be SAI_NULL_OBJECT_ID when active_nhg is empty. + auto nh_id = syncd_nexthop_groups_[vnet][nhg_custom].next_hop_group_id; + if (active_nhg_size > 0) + { + // we need to replace the nhg in the route + route_status = update_route(vr_id, pfx, nh_id); + } + else + { + // we need to readd the route. + route_status = add_route(vr_id, pfx, nh_id); + } + if (!route_status) + { + SWSS_LOG_ERROR("Route add/update failed for %s, vr_id '0x%" PRIx64, prefix.to_string().c_str(), vr_id); + /* Clean up the newly created next hop group entry */ + if (nhg_custom.getSize() > 1) + { + removeNextHopGroup(vnet, nhg_custom, vrf_obj); + } + return; + } + vrf_obj->addRoute(prefix, nhg_custom); + } + } + if (config_update && nhg_custom != active_nhg) + { + // This convoluted logic has very good reason behind it. + // when a route configuration gets updated, if the new endpoints are same but primaries + // are changed, we must increase the ref count of active group to save it from premature + // deletion at this place. So, we increment the refcount of existing active_nhg in doRotueTask right + // before we call this function. Once here we need to undo this increment of refCount for the active_nhg + // which is no longer relevant. + syncd_nexthop_groups_[vnet][active_nhg].ref_count--; + } + + if(--syncd_nexthop_groups_[vnet][active_nhg].ref_count == 0) + { + if (active_nhg_size > 1) + { + removeNextHopGroup(vnet, active_nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(active_nhg); + if(active_nhg_size == 1) + { + NextHopKey nexthop(active_nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + } + else + { + syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.erase(prefix); + } + syncd_nexthop_groups_[vnet][nhg_custom].tunnel_routes.insert(prefix); + syncd_tunnel_routes_[vnet][prefix].nhg_key = nhg_custom; + if (nhg_custom != active_nhg) + { + syncd_nexthop_groups_[vnet][nhg_custom].ref_count++; + } + if (nhg_custom.getSize() == 0 && active_nhg_size > 0) + { + vrf_obj->removeRoute(prefix); + removeRouteState(vnet, prefix); + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[prefix]; + adv_prefix_refcount_[adv_pfx] -=1; + if (adv_prefix_refcount_[adv_pfx] == 0) + { + adv_prefix_refcount_.erase(adv_pfx); + } + } + } + else if (nhg_custom.getSize() > 0 && active_nhg_size == 0) + { + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + adv_prefix_refcount_[adv_prefix] += 1; + string profile = vrf_obj->getProfile(prefix); + postRouteState(vnet, prefix, nhg_custom, profile); + } + else + { + string profile = vrf_obj->getProfile(prefix); + postRouteState(vnet, prefix, nhg_custom, profile); + } + } +} + bool VNetRouteOrch::handleTunnel(const Request& request) { SWSS_LOG_ENTER(); @@ -1916,7 +2569,11 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector vni_list; vector monitor_list; string profile = ""; - + vector primary_list; + string monitoring; + swss::IpPrefix adv_prefix; + bool has_priority_ep = false; + bool has_adv_pfx = false; for (const auto& name: request.getAttrFieldNames()) { if (name == "endpoint") @@ -1941,6 +2598,19 @@ bool VNetRouteOrch::handleTunnel(const Request& request) { profile = request.getAttrString(name); } + else if (name == "primary") + { + primary_list = request.getAttrIPList(name); + } + else if (name == "monitoring") + { + monitoring = request.getAttrString(name); + } + else if (name == "adv_prefix") + { + adv_prefix = request.getAttrIpPrefix(name); + has_adv_pfx = true; + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -1965,6 +2635,11 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_ERROR("Peer monitor size of %zu does not match endpoint size of %zu", monitor_list.size(), ip_list.size()); return false; } + if (!primary_list.empty() && monitor_list.empty()) + { + SWSS_LOG_ERROR("Primary/backup behaviour cannot function without endpoint monitoring."); + return true; + } const std::string& vnet_name = request.getKeyString(0); auto ip_pfx = request.getKeyIpPrefix(1); @@ -1973,6 +2648,14 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_INFO("VNET-RT '%s' op '%s' for pfx %s", vnet_name.c_str(), op.c_str(), ip_pfx.to_string().c_str()); + if (!primary_list.empty()) + { + has_priority_ep = true; + SWSS_LOG_INFO("Handling Priority Tunnel with prefix %s", ip_pfx.to_string().c_str()); + } + + NextHopGroupKey nhg_primary("", true); + NextHopGroupKey nhg_secondary("", true); NextHopGroupKey nhg("", true); map monitors; for (size_t idx_ip = 0; idx_ip < ip_list.size(); idx_ip++) @@ -1995,16 +2678,31 @@ bool VNetRouteOrch::handleTunnel(const Request& request) } NextHopKey nh(ip, mac, vni, true); - nhg.add(nh); if (!monitor_list.empty()) { monitors[nh] = monitor_list[idx_ip]; } + if (has_priority_ep) + { + if (std::find(primary_list.begin(), primary_list.end(), ip) != primary_list.end()) + { + // only add the primary endpoint ips. + nhg_primary.add(nh); + } + else + { + nhg_secondary.add(nh); + } + } + nhg.add(nh); + } + if (!has_adv_pfx) + { + adv_prefix = ip_pfx; } - if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitors); + return doRouteTask(vnet_name, ip_pfx, (has_priority_ep == true) ? nhg_primary : nhg, op, profile, monitoring, nhg_secondary, adv_prefix, monitors); } return true; @@ -2150,3 +2848,44 @@ bool VNetCfgRouteOrch::doVnetRouteTask(const KeyOpFieldsValuesTuple & t, const s return true; } + +MonitorOrch::MonitorOrch(DBConnector *db, string tableName): + Orch2(db, tableName, request_) +{ + SWSS_LOG_ENTER(); +} + +MonitorOrch::~MonitorOrch(void) +{ + SWSS_LOG_ENTER(); +} + +bool MonitorOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto monitor = request.getKeyIpAddress(0); + auto ip_Prefix = request.getKeyIpPrefix(1); + + auto session_state = request.getAttrString("state"); + SWSS_LOG_INFO("Added state table entry for monitor %s|%s", ip_Prefix.to_string().c_str(),monitor.to_string().c_str()); + + string op = SET_COMMAND; + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + vnet_route_orch->updateMonitorState(op ,ip_Prefix, monitor, session_state ); + + return true; +} + +bool MonitorOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto monitor = request.getKeyIpAddress(0); + auto ip_Prefix = request.getKeyIpPrefix(1); + + SWSS_LOG_INFO("Deleting state table entry for monitor %s|%s", ip_Prefix.to_string().c_str(),monitor.to_string().c_str()); + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + string op = DEL_COMMAND; + vnet_route_orch->updateMonitorState(op, ip_Prefix, monitor, "" ); + + return true; +} diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 4f63764a0e..0cffa115fd 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -24,6 +24,14 @@ extern sai_object_id_t gVirtualRouterId; + +typedef enum +{ + MONITOR_SESSION_STATE_UNKNOWN, + MONITOR_SESSION_STATE_UP, + MONITOR_SESSION_STATE_DOWN, +} monitor_session_state_t; + const request_description_t vnet_request_description = { { REQ_T_STRING }, { @@ -34,6 +42,8 @@ const request_description_t vnet_request_description = { { "guid", REQ_T_STRING }, { "scope", REQ_T_STRING }, { "advertise_prefix", REQ_T_BOOL}, + { "overlay_dmac", REQ_T_MAC_ADDRESS}, + }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -59,6 +69,7 @@ struct VNetInfo set peers; string scope; bool advertise_prefix; + swss::MacAddress overlay_dmac; }; typedef map vrid_list_t; @@ -86,7 +97,8 @@ class VNetObject peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), scope_(vnetInfo.scope), - advertise_prefix_(vnetInfo.advertise_prefix) + advertise_prefix_(vnetInfo.advertise_prefix), + overlay_dmac_(vnetInfo.overlay_dmac) { } virtual bool updateObj(vector&) = 0; @@ -121,6 +133,16 @@ class VNetObject return advertise_prefix_; } + swss::MacAddress getOverlayDMac() const + { + return overlay_dmac_; + } + + void setOverlayDMac(swss::MacAddress mac_addr) + { + overlay_dmac_ = mac_addr; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -129,6 +151,7 @@ class VNetObject uint32_t vni_; string scope_; bool advertise_prefix_; + swss::MacAddress overlay_dmac_; }; struct nextHop @@ -282,10 +305,40 @@ const request_description_t vnet_route_description = { { "mac_address", REQ_T_STRING }, { "endpoint_monitor", REQ_T_IP_LIST }, { "profile", REQ_T_STRING }, + { "primary", REQ_T_IP_LIST }, + { "monitoring", REQ_T_STRING }, + { "adv_prefix", REQ_T_IP_PREFIX }, }, { } }; +const request_description_t monitor_state_request_description = { + { REQ_T_IP, REQ_T_IP_PREFIX, }, + { + { "state", REQ_T_STRING }, + }, + { "state" } +}; + +class MonitorStateRequest : public Request +{ +public: + MonitorStateRequest() : Request(monitor_state_request_description, '|') { } +}; + +class MonitorOrch : public Orch2 +{ +public: + MonitorOrch(swss::DBConnector *db, std::string tableName); + virtual ~MonitorOrch(void); + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + MonitorStateRequest request_; +}; + class VNetRouteRequest : public Request { public: @@ -326,9 +379,35 @@ struct BfdSessionInfo NextHopKey endpoint; }; +struct MonitorSessionInfo +{ + monitor_session_state_t state; + NextHopKey endpoint; + int ref_count; +}; + +struct MonitorUpdate +{ + monitor_session_state_t state; + IpAddress monitor; + IpPrefix prefix; + std::string vnet; +}; +struct VNetTunnelRouteEntry +{ + // The nhg_key is the key for the next hop group which is currently active in hardware. + // For priority routes, this can be a subset of eith primary or secondary NHG or an empty NHG. + NextHopGroupKey nhg_key; + // For regular Ecmp rotues the priamry and secondary fields wil lbe empty. For priority + // routes they wil lcontain the origna lprimary and secondary NHGs. + NextHopGroupKey primary; + NextHopGroupKey secondary; +}; + typedef std::map VNetNextHopGroupInfoTable; -typedef std::map VNetTunnelRouteTable; +typedef std::map VNetTunnelRouteTable; typedef std::map BfdSessionTable; +typedef std::map> MonitorSessionTable; typedef std::map VNetEndpointInfoTable; class VNetRouteOrch : public Orch2, public Subject, public Observer @@ -343,6 +422,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void detach(Observer* observer, const IpAddress& dstAddr); void update(SubjectType, void *); + void updateMonitorState(string& op, const IpPrefix& prefix , const IpAddress& endpoint, string state); + void updateAllMonitoringSession(const string& vnet); private: virtual bool addOperation(const Request& request); @@ -356,23 +437,36 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer bool hasNextHopGroup(const string&, const NextHopGroupKey&); sai_object_id_t getNextHopGroupId(const string&, const NextHopGroupKey&); - bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj, + const string& monitoring); bool removeNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + bool createNextHopGroup(const string&, NextHopGroupKey&, VNetVrfObject *vrf_obj, + const string& monitoring); + NextHopGroupKey getActiveNHSet(const string&, NextHopGroupKey&, const IpPrefix& ); + + bool selectNextHopGroup(const string&, NextHopGroupKey&, NextHopGroupKey&, const string&, IpPrefix&, + VNetVrfObject *vrf_obj, NextHopGroupKey&, + const std::map& monitors=std::map()); void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); - void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); - void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); + void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, + const string& monitoring, IpPrefix& ipPrefix); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); void addRouteAdvertisement(IpPrefix& ipPrefix, string& profile); void removeRouteAdvertisement(IpPrefix& ipPrefix); void updateVnetTunnel(const BfdUpdate&); + void updateVnetTunnelCustomMonitor(const MonitorUpdate& update); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix, const std::map& monitors=std::map()); template @@ -387,9 +481,14 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; BfdSessionTable bfd_sessions_; + std::map monitor_info_; std::map nexthop_info_; + std::map prefix_to_adv_prefix_; + std::map adv_prefix_refcount_; ProducerStateTable bfd_session_producer_; + unique_ptr
monitor_session_producer_; shared_ptr state_db_; + shared_ptr app_db_; unique_ptr
state_vnet_rt_tunnel_table_; unique_ptr
state_vnet_rt_adv_table_; }; diff --git a/orchagent/vrforch.h b/orchagent/vrforch.h index 195015fa08..07e0df55ec 100644 --- a/orchagent/vrforch.h +++ b/orchagent/vrforch.h @@ -155,6 +155,19 @@ class VRFOrch : public Orch2 return (-1); } } + + bool isL3VniVlan(const uint32_t vni) const + { + if (l3vni_table_.find(vni) != std::end(l3vni_table_)) + { + return l3vni_table_.at(vni).l3_vni; + } + else + { + return false; + } + } + int updateL3VniVlan(uint32_t vni, uint16_t vlan_id); private: virtual bool addOperation(const Request& request); diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 1995675fdd..1983cf7286 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -1903,6 +1903,7 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) sai_vlan_id_t vlan_id = (sai_vlan_id_t)request.getAttrVlan("vlan"); Port tempPort; + bool isL3Vni = false; const auto full_tunnel_map_entry_name = request.getFullKey(); SWSS_LOG_INFO("Full name = %s",full_tunnel_map_entry_name.c_str()); @@ -1974,11 +1975,21 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) tunnel_obj->vlan_vrf_vni_count++; SWSS_LOG_INFO("vni count increased to %d",tunnel_obj->vlan_vrf_vni_count); + VRFOrch* vrf_orch = gDirectory.get(); + isL3Vni = vrf_orch->isL3VniVlan(vni_id); + try { - auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, - tunnel_map_id, vni_id, vlan_id); - vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + if (isL3Vni == false) + { + auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, + tunnel_map_id, vni_id, vlan_id); + vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + } + else + { + vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = SAI_NULL_OBJECT_ID; + } vxlan_tunnel_map_table_[full_tunnel_map_entry_name].vlan_id = vlan_id; vxlan_tunnel_map_table_[full_tunnel_map_entry_name].vni_id = vni_id; } @@ -2124,9 +2135,13 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) bool VxlanVrfMapOrch::addOperation(const Request& request) { SWSS_LOG_ENTER(); + std::string vniVlanMapName; + uint32_t vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; auto tunnel_name = request.getKeyString(0); VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); if (!tunnel_orch->isTunnelExists(tunnel_name)) { SWSS_LOG_WARN("Vxlan tunnel '%s' doesn't exist", tunnel_name.c_str()); @@ -2188,6 +2203,15 @@ bool VxlanVrfMapOrch::addOperation(const Request& request) vrf_map_entry_t entry; try { + entry.isL2Vni = vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &vlan_id); + entry.vni_id = vni_id; + if (entry.isL2Vni) + { + entry.vniVlanMapName = vniVlanMapName; + entry.vlan_id = vlan_id; + remove_tunnel_map_entry(tnl_map_entry_id); + SWSS_LOG_DEBUG("remove_tunnel_map_entry name %s, vlan %d, vni %d\n", entry.vniVlanMapName.c_str(), entry.vlan_id, entry.vni_id); + } /* * Create encap and decap mapper */ @@ -2219,7 +2243,12 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) SWSS_LOG_ENTER(); VRFOrch* vrf_orch = gDirectory.get(); + VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); const auto full_map_entry_name = request.getFullKey(); + std::string vniVlanMapName; + uint32_t vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; if (!isVrfMapExists(full_map_entry_name)) { @@ -2241,6 +2270,9 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) return false; } SWSS_LOG_NOTICE("VxlanVrfMapOrch VRF VNI mapping '%s' remove vrf %s", full_map_entry_name.c_str(), vrf_name.c_str()); + auto tunnel_name = request.getKeyString(0); + auto tunnel_obj = tunnel_orch->getVxlanTunnel(tunnel_name); + vrf_map_entry_t entry; try { @@ -2256,6 +2288,32 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) vrf_orch->decreaseVrfRefCount(vrf_name); remove_tunnel_map_entry(entry.decap_id); vrf_orch->decreaseVrfRefCount(vrf_name); + + if (!entry.isL2Vni) + { + entry.isL2Vni = vxlan_tun_map_orch->isVniVlanMapExists(entry.vni_id, vniVlanMapName, &tnl_map_entry_id, &vlan_id); + SWSS_LOG_NOTICE("VxlanVrfMapOrch vni %d, isL2Vni %d\n", entry.vni_id, entry.isL2Vni); + + if (entry.isL2Vni) + { + entry.vniVlanMapName = vniVlanMapName; + entry.vlan_id = vlan_id; + SWSS_LOG_DEBUG("add_tunnel_map_entry name %s, vlan %d, vni %d\n", entry.vniVlanMapName.c_str(), entry.vlan_id, entry.vni_id); + } + } + if(entry.isL2Vni) + { + const auto tunnel_map_id = tunnel_obj->getDecapMapId(TUNNEL_MAP_T_VLAN); + SWSS_LOG_NOTICE("Adding tunnel map entry. Tunnel: %s %s",tunnel_name.c_str(),entry.vniVlanMapName.c_str()); + + SWSS_LOG_DEBUG("create_tunnel_map_entry vni %d, vlan %d\n", entry.vni_id, entry.vlan_id); + auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, + tunnel_map_id, entry.vni_id, (uint16_t)entry.vlan_id); + SWSS_LOG_DEBUG("updateTnlMapId name %s\n", entry.vniVlanMapName.c_str()); + + vxlan_tun_map_orch->updateTnlMapId(entry.vniVlanMapName, tunnel_map_entry_id); + } + vxlan_vrf_table_.erase(full_map_entry_name); vxlan_vrf_tunnel_.erase(vrf_name); } @@ -2289,8 +2347,21 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) return true; } + EvpnNvoOrch* evpn_orch = gDirectory.get(); + auto vtep_ptr = evpn_orch->getEVPNVtep(); + if (!vtep_ptr) + { + SWSS_LOG_WARN("Remote VNI add: Source VTEP not found. remote=%s vid=%d", + remote_vtep.c_str(), vlan_id); + return true; + } + VxlanTunnelOrch* tunnel_orch = gDirectory.get(); Port tunnelPort, vlanPort; + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; if (!gPortsOrch->getVlanByVlanId(vlan_id, vlanPort)) { @@ -2298,22 +2369,28 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni:%d", vni_id); + return false; + } + + VRFOrch* vrf_orch = gDirectory.get(); + if (vrf_orch->isL3VniVlan(vni_id)) + { + SWSS_LOG_WARN("Ignoring remote VNI add for L3 VNI:%d, remote:%s", vni_id, remote_vtep.c_str()); + return false; + } + if (tunnel_orch->getTunnelPort(remote_vtep,tunnelPort)) { SWSS_LOG_INFO("Vxlan tunnelPort exists: %s", remote_vtep.c_str()); if (gPortsOrch->isVlanMember(vlanPort, tunnelPort)) { - EvpnNvoOrch* evpn_orch = gDirectory.get(); - auto vtep_ptr = evpn_orch->getEVPNVtep(); - if (!vtep_ptr) - { - SWSS_LOG_WARN("Remote VNI add: VTEP not found. remote=%s vid=%d", - remote_vtep.c_str(),vlan_id); - return true; - } SWSS_LOG_WARN("tunnelPort %s already member of vid %d", - remote_vtep.c_str(),vlan_id); + remote_vtep.c_str(),vlan_id); vtep_ptr->increment_spurious_imr_add(remote_vtep); return true; } @@ -2434,6 +2511,11 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) } VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; + Port tunnelPort, vlanPort; auto vtep_ptr = evpn_orch->getEVPNVtep(); if (!vtep_ptr) @@ -2449,6 +2531,20 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni: %d", vni_id); + return false; + } + + VRFOrch* vrf_orch = gDirectory.get(); + if (vrf_orch->isL3VniVlan(vni_id)) + { + SWSS_LOG_WARN("Ignoring remote VNI add for L3 VNI:%d, remote:%s", vni_id, end_point_ip.c_str()); + return false; + } + auto src_vtep = vtep_ptr->getSrcIP().to_string(); if (tunnel_orch->getTunnelPort(src_vtep,tunnelPort, true)) { @@ -2599,3 +2695,35 @@ bool EvpnNvoOrch::delOperation(const Request& request) return true; } + +bool VxlanTunnelMapOrch::isVniVlanMapExists(uint32_t vni_id, std::string& vniVlanMapName, sai_object_id_t *tnl_map_entry_id, uint32_t *vlan_id) +{ + SWSS_LOG_ENTER(); + bool map_entry_exists = false; + std::map::iterator it; + for(it = vxlan_tunnel_map_table_.begin(); it != vxlan_tunnel_map_table_.end(); it++) + { + auto full_tunnel_map_entry_name = it->first; + tunnel_map_entry_t tunnel_map_entry = it->second; + + if (vni_id == tunnel_map_entry.vni_id) + { + vniVlanMapName = full_tunnel_map_entry_name; + *tnl_map_entry_id = tunnel_map_entry.map_entry_id; + *vlan_id = tunnel_map_entry.vlan_id; + map_entry_exists = true; + SWSS_LOG_NOTICE("vniVlanMapName %s, vlan %d\n", vniVlanMapName.c_str(), *vlan_id); + break; + } + } + + return map_entry_exists; +} + +void VxlanTunnelMapOrch::updateTnlMapId(std::string vniVlanMapName, sai_object_id_t tunnel_map_id) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_NOTICE("name %s\n", vniVlanMapName.c_str()); + vxlan_tunnel_map_table_[vniVlanMapName].map_entry_id = tunnel_map_id; +} + diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 9529a86ce7..695f7441e0 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -410,6 +410,10 @@ class VxlanTunnelMapOrch : public Orch2 { return vxlan_tunnel_map_table_.find(name) != std::end(vxlan_tunnel_map_table_); } + + bool isVniVlanMapExists(uint32_t vni_id, std::string& vniVlanMapName, sai_object_id_t *tnl_map_entry_id, uint32_t *vlan_id); + + void updateTnlMapId(std::string vniVlanMapName, sai_object_id_t tunnel_map_id); private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -436,6 +440,10 @@ class VxlanVrfRequest : public Request struct vrf_map_entry_t { sai_object_id_t encap_id; sai_object_id_t decap_id; + bool isL2Vni; + std::string vniVlanMapName; + uint32_t vlan_id; + uint32_t vni_id; }; typedef std::map VxlanVrfTable; diff --git a/orchagent/zmqorch.cpp b/orchagent/zmqorch.cpp new file mode 100644 index 0000000000..a3aef7d81b --- /dev/null +++ b/orchagent/zmqorch.cpp @@ -0,0 +1,65 @@ +#include "zmqorch.h" + +using namespace swss; +using namespace std; + +extern int gBatchSize; + +void ZmqConsumer::execute() +{ + SWSS_LOG_ENTER(); + + size_t update_size = 0; + auto table = static_cast(getSelectable()); + do + { + std::deque entries; + table->pops(entries); + update_size = addToSync(entries); + } while (update_size != 0); + + drain(); +} + +void ZmqConsumer::drain() +{ + if (!m_toSync.empty()) + (static_cast(m_orch))->doTask(*this); +} + + +ZmqOrch::ZmqOrch(DBConnector *db, const vector &tableNames, ZmqServer *zmqServer) +: Orch() +{ + for (auto it : tableNames) + { + addConsumer(db, it, default_orch_pri, zmqServer); + } +} + +void ZmqOrch::addConsumer(DBConnector *db, string tableName, int pri, ZmqServer *zmqServer) +{ + if (db->getDbId() == APPL_DB) + { + if (zmqServer != nullptr) + { + SWSS_LOG_DEBUG("ZmqConsumer initialize for: %s", tableName.c_str()); + addExecutor(new ZmqConsumer(new ZmqConsumerStateTable(db, tableName, *zmqServer, gBatchSize, pri), this, tableName)); + } + else + { + SWSS_LOG_DEBUG("Consumer initialize for: %s", tableName.c_str()); + addExecutor(new Consumer(new ConsumerStateTable(db, tableName, gBatchSize, pri), this, tableName)); + } + } + else + { + SWSS_LOG_WARN("ZmqOrch does not support create consumer for db: %d, table: %s", db->getDbId(), tableName.c_str()); + } +} + +void ZmqOrch::doTask(Consumer &consumer) +{ + // When ZMQ disabled, forward data from Consumer + doTask((ConsumerBase &)consumer); +} \ No newline at end of file diff --git a/orchagent/zmqorch.h b/orchagent/zmqorch.h new file mode 100644 index 0000000000..1541996932 --- /dev/null +++ b/orchagent/zmqorch.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include "zmqserver.h" + +class ZmqConsumer : public ConsumerBase { +public: + ZmqConsumer(swss::ZmqConsumerStateTable *select, Orch *orch, const std::string &name) + : ConsumerBase(select, orch, name) + { + } + + swss::TableBase *getConsumerTable() const override + { + // ZmqConsumerStateTable is a subclass of TableBase + return static_cast(getSelectable()); + } + + void execute() override; + void drain() override; +}; + +class ZmqOrch : public Orch +{ +public: + ZmqOrch(swss::DBConnector *db, const std::vector &tableNames, swss::ZmqServer *zmqServer); + + virtual void doTask(ConsumerBase &consumer) { }; + void doTask(Consumer &consumer) override; + +private: + void addConsumer(swss::DBConnector *db, std::string tableName, int pri, swss::ZmqServer *zmqServer); +}; \ No newline at end of file diff --git a/portsyncd/Makefile.am b/portsyncd/Makefile.am index 3db6187059..b65e3b4a4f 100644 --- a/portsyncd/Makefile.am +++ b/portsyncd/Makefile.am @@ -15,7 +15,7 @@ portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) portsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -portsyncd_LDADD += -lgcovpreload +portsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/portsyncd/linksync.cpp b/portsyncd/linksync.cpp index fc28411613..66cdc4df5f 100644 --- a/portsyncd/linksync.cpp +++ b/portsyncd/linksync.cpp @@ -33,6 +33,7 @@ const string LAG_PREFIX = "PortChannel"; extern set g_portSet; extern bool g_init; +extern string g_switchType; LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : m_portTableProducer(appl_db, APP_PORT_TABLE_NAME), @@ -114,6 +115,14 @@ LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : } } + /* In DPU SONiC netdevs in Kernel are created in the early stage of the syncd service start, + * when the driver is loading. And exist while the driver remains loaded. + *The comparison logic to distinguish "old" interfaces is not needed. */ + if (g_switchType == "dpu") + { + return; + } + for (idx_p = if_ni.get(); idx_p != NULL && idx_p->if_index != 0 && idx_p->if_name != NULL; idx_p++) diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index c12e44e4f6..d9f69ba754 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -34,6 +34,7 @@ using namespace swss; */ set g_portSet; bool g_init = false; +string g_switchType; void usage() { @@ -46,33 +47,40 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo int main(int argc, char **argv) { - Logger::linkToDbNative("portsyncd"); - int opt; - - while ((opt = getopt(argc, argv, "v:h")) != -1 ) + try { - switch (opt) + Logger::linkToDbNative("portsyncd"); + int opt; + + while ((opt = getopt(argc, argv, "v:h")) != -1 ) { - case 'h': - usage(); - return 1; - default: /* '?' */ - usage(); - return EXIT_FAILURE; + switch (opt) + { + case 'h': + usage(); + return 1; + default: /* '?' */ + usage(); + return EXIT_FAILURE; + } } - } - DBConnector cfgDb("CONFIG_DB", 0); - DBConnector appl_db("APPL_DB", 0); - DBConnector state_db("STATE_DB", 0); - ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appl_db("APPL_DB", 0); + DBConnector state_db("STATE_DB", 0); + ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); - WarmStart::initialize("portsyncd", "swss"); - WarmStart::checkWarmStart("portsyncd", "swss"); - const bool warm = WarmStart::isWarmStart(); + Table cfgDeviceMetaDataTable(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", g_switchType)) + { + //Switch type is not configured. Consider it default = "switch" (regular switch) + g_switchType = "switch"; + } + + WarmStart::initialize("portsyncd", "swss"); + WarmStart::checkWarmStart("portsyncd", "swss"); + const bool warm = WarmStart::isWarmStart(); - try - { NetLink netlink; Select s; @@ -136,6 +144,16 @@ int main(int argc, char **argv) } } } + catch (const swss::RedisError& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } + catch (const std::out_of_range& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } catch (const std::exception& e) { cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; diff --git a/swssconfig/Makefile.am b/swssconfig/Makefile.am index 3cfc0b9629..bd8bcc226a 100644 --- a/swssconfig/Makefile.am +++ b/swssconfig/Makefile.am @@ -21,8 +21,8 @@ swssplayer_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) swssplayer_LDADD = $(LDFLAGS_ASAN) -lswsscommon if GCOV_ENABLED -swssconfig_LDADD += -lgcovpreload -swssplayer_LDADD += -lgcovpreload +swssconfig_SOURCES += ../gcovpreload/gcovpreload.cpp +swssplayer_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/swssconfig/swssconfig.cpp b/swssconfig/swssconfig.cpp index e61d038381..8029f3a3d8 100644 --- a/swssconfig/swssconfig.cpp +++ b/swssconfig/swssconfig.cpp @@ -9,7 +9,7 @@ #include "logger.h" #include "dbconnector.h" #include "producerstatetable.h" -#include "json.hpp" +#include using namespace std; using namespace swss; diff --git a/teamsyncd/Makefile.am b/teamsyncd/Makefile.am index a13573bf25..c72498d9e3 100644 --- a/teamsyncd/Makefile.am +++ b/teamsyncd/Makefile.am @@ -15,7 +15,7 @@ teamsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) teamsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lhiredis -lswsscommon -lteam if GCOV_ENABLED -teamsyncd_LDADD += -lgcovpreload +teamsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/tests/Makefile.am b/tests/Makefile.am index 0b6831be97..8f2aa131c4 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,3 +1,5 @@ +INCLUDES = -I $(top_srcdir)/lib + CFLAGS_SAI = -I /usr/include/sai TESTS = tests @@ -18,7 +20,7 @@ CFLAGS_GTEST = LDADD_GTEST = -L/usr/src/gtest tests_SOURCES = swssnet_ut.cpp request_parser_ut.cpp ../orchagent/request_parser.cpp \ - quoted_ut.cpp + quoted_ut.cpp ../lib/recorder.cpp tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) -I../orchagent diff --git a/tests/README.md b/tests/README.md index 33c9c3479a..bf93a84279 100644 --- a/tests/README.md +++ b/tests/README.md @@ -35,7 +35,7 @@ SWSS, Redis, and all the other required components run inside a virtual switch D ``` sudo modprobe team - sudo apt install python3-pip net-tools ethtool vlan libnl-nf-3-200 libnl-cli-3-200 + sudo apt install python3-pip net-tools bridge-utils ethtool vlan libnl-nf-3-200 libnl-cli-3-200 sudo pip3 install docker pytest flaky redis distro dataclasses fstring ``` @@ -43,11 +43,13 @@ SWSS, Redis, and all the other required components run inside a virtual switch D ``` sudo apt install libhiredis0.13 ``` + ****Dash testcases aren't supported in Ubuntu 18.04**** If you are running **Ubuntu 20.04** you will need to install this package: ``` sudo apt install libhiredis0.14 ``` + If you want to run DASH testcases, please download and install the latest ubuntu20.04 [dependencies](https://dev.azure.com/mssonic/build/_build?definitionId=1055&_a=summary&repositoryFilter=158&branchFilter=11237%2C11237%2C11237%2C11237%2C11237) of DASH from Azp. 4. Install `swsscommon`. @@ -56,15 +58,16 @@ SWSS, Redis, and all the other required components run inside a virtual switch D ``` You can get these two packages by: - - [Building it from scratch](https://github.com/Azure/sonic-swss-common) - - Downloading the latest build from Jenkins: - - [Ubuntu 18.04](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-swss-common-build-ubuntu/lastSuccessfulBuild/artifact/target/) - - [Ubuntu 20.04](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-swss-common-build-ubuntu-20_04/lastSuccessfulBuild/artifact/target/) + - [Building it from scratch](https://github.com/sonic-net/sonic-swss-common) + - Downloading the latest build from Azure: + - [Ubuntu 20.04](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&definitionId=9&artifactName=sonic-swss-common.amd64.ubuntu20_04) 5. Load the `docker-sonic-vs.gz` file into docker. You can get the image by: - - [Building it from scratch](https://github.com/Azure/sonic-buildimage) - - [Downloading the latest build from Jenkins](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-all/lastSuccessfulBuild/artifact/target/) - + - [Building it from scratch](https://github.com/sonic-net/sonic-buildimage) + - Downloading the latest build from Azure: + - [docker-sonic-vs-asan.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs-asan.gz) + - [docker-sonic-vs.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs.gz) + Once you have the file, you can load it into docker by running `docker load < docker-sonic-vs.gz`. ## Running the tests @@ -76,7 +79,7 @@ sudo pytest ## Setting up a persistent testbed For those developing new features for SWSS or the DVS framework, you might find it helpful to setup a persistent DVS container that you can inspect and make modifications to (e.g. using `dpkg -i` to install a new version of SWSS to test a new feature). -1. [Download `create_vnet.sh`](https://github.com/Azure/sonic-buildimage/blob/master/platform/vs/create_vnet.sh). +1. [Download `create_vnet.sh`](https://github.com/sonic-net/sonic-buildimage/blob/master/platform/vs/create_vnet.sh). 2. Setup a virtual server and network. **Note**: It is _highly_ recommended you include the `-n 32` option or you may run into problems running the tests later. diff --git a/tests/_test_dash_crm.py b/tests/_test_dash_crm.py new file mode 100644 index 0000000000..8064946cfd --- /dev/null +++ b/tests/_test_dash_crm.py @@ -0,0 +1,484 @@ +import os +import re +import time +import json +import pytest + +from swsscommon import swsscommon + + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + + +def crm_update(dvs, field, value): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(cfg_db, "CRM") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set("Config", fvs) + + +@pytest.fixture(scope="module") +def dpu_only(dvs): + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + if metatbl.get("switch_type") != "dpu": + pytest.skip("The test can be run only on the DPU") + + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + return str(value) + + +class DashTable: + + def __init__(self, dvs, name) -> None: + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + self.table = swsscommon.ProducerStateTable(app_db, name) + self.keys = set() + + def add(self, key, value) -> None: + key = to_string(key) + assert key not in self.keys + + fvs = swsscommon.FieldValuePairs([(to_string(k), to_string(v)) for k, v in value.items()]) + self.table.set(key, fvs) + self.keys.add(key) + + def remove(self, key) -> None: + key = str(key) + assert key in self.keys + self.table.delete(str(key)) + self.keys.remove(key) + + def remove_all(self): + for key in list(self.keys): + self.remove(key) + + +class Resource: + + counters = None + + def __init__(self, dvs, low_th=0, high_th=2) -> None: + self.dvs = dvs + self.low_th = low_th + self.high_th = high_th + self.polling_interval = 1 + self.marker = self.dvs.add_log_marker() + + def set_tresholds(self) -> None: + for counter in self.counters: + crm_update(self.dvs, "polling_interval", str(self.polling_interval)) + crm_update(self.dvs, f"dash_{counter}_threshold_type", "used") + crm_update(self.dvs, f"dash_{counter}_low_threshold", str(self.low_th)) + crm_update(self.dvs, f"dash_{counter}_high_threshold", str(self.high_th)) + + def check_used_counters(self, used): + for counter, value in self.counters.items(): + entry_used_counter = self.dvs.getCrmCounterValue('STATS', f'crm_stats_dash_{counter}_used') + assert entry_used_counter == used, f"crm_stats_dash_{counter}_used is not equal to expected {used} value" + + def check_threshold_exceeded_message(self): + for counter, value in self.counters.items(): + self.check_syslog(f"{value['th_name']} THRESHOLD_EXCEEDED for TH_USED", 1) + self.check_syslog(f"{value['th_name']} THRESHOLD_CLEAR for TH_USED", 0) + + def check_threshold_cleared_message(self): + for counter, value in self.counters.items(): + self.check_syslog(f"{value['th_name']} THRESHOLD_CLEAR for TH_USED", 1) + + def check_treshold_exceeded(self) -> None: + # Wait for CrmOrch to update counters + time.sleep(self.polling_interval + 1) + + self.check_used_counters(self.high_th) + self.check_threshold_exceeded_message() + + def check_treshold_cleared(self) -> None: + # Wait for CrmOrch to update counters + time.sleep(self.polling_interval + 1) + + self.check_used_counters(self.low_th) + self.check_threshold_cleared_message() + + def configure(self): + raise NotImplementedError() + + def clear(self): + raise NotImplementedError() + + def check_syslog(self, err_log, expected_cnt): + (exitcode, num) = self.dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (self.marker, err_log)]) + assert int(num.strip()) >= expected_cnt, f"Expexted message is not found: '{err_log}'" + + +class Vnet(Resource): + + counters = { + 'vnet': {'th_name': 'VNET'} + } + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.appliance_table = DashTable(self.dvs, swsscommon.APP_DASH_APPLIANCE_TABLE_NAME) + self.vnet_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_TABLE_NAME) + + def configure(self): + self.appliance_table.add("123", {'sip': '10.1.0.32', 'vm_vni': '123'}) + + for i in range(1, self.high_th + 1): + self.vnet_table.add(f'vnet{i}', {'vni': i, 'guid': i}) + + def clear(self): + self.vnet_table.remove_all() + self.appliance_table.remove_all() + + +class Eni(Resource): + + counters = { + 'eni': {'th_name': 'ENI'}, + 'eni_ether_address_map': {'th_name': 'ENI_ETHER_ADDRESS_MAP'} + } + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.appliance_table = DashTable(self.dvs, swsscommon.APP_DASH_APPLIANCE_TABLE_NAME) + self.vnet_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_TABLE_NAME) + self.eni_table = DashTable(self.dvs, swsscommon.APP_DASH_ENI_TABLE_NAME) + + def configure(self): + self.appliance_table.add('123', {'sip': '10.1.0.32', 'vm_vni': 123}) + self.vnet_table.add('vnet1', {'vni': 1, 'guid': 1}) + + for i in range(1, self.high_th + 1): + self.eni_table.add(f'eni{i}', {"eni_id":f"eni{i}", + "mac_address":f"00:00:00:00:00:{i:02x}", + "underlay_ip":"10.0.1.1", + "admin_state":"enabled", + "vnet":"vnet1", + "qos":"qos100"}) + + def clear(self): + self.eni_table.remove_all() + self.vnet_table.remove_all() + self.appliance_table.remove_all() + + +class VnetMapping(Resource): + + addr_family = None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.appliance_table = DashTable(self.dvs, swsscommon.APP_DASH_APPLIANCE_TABLE_NAME) + self.vnet_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_TABLE_NAME) + self.eni_table = DashTable(self.dvs, swsscommon.APP_DASH_ENI_TABLE_NAME) + self.route_table = DashTable(self.dvs, swsscommon.APP_DASH_ROUTE_TABLE_NAME) + self.vnet_mapping_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_MAPPING_TABLE_NAME) + + self.counters = { + f'{self.addr_family}_outbound_ca_to_pa': {'th_name': 'OUTBOUND_CA_TO_PA'}, + f'{self.addr_family}_pa_validation': {'th_name': 'PA_VALIDATION'}, + } + + def configure(self): + self.appliance_table.add('123', {'sip': '10.1.0.32', 'vm_vni': 123}) + self.vnet_table.add('vnet1', {'vni': 1, 'guid': 1}) + + src_pa_ip = "10.0.1.1" + if self.addr_family == 'ipv6': + src_pa_ip = "2001::1011" + + self.eni_table.add(f'eni1', { + "eni_id":f"eni1", + "mac_address":f"00:00:00:00:00:01", + "underlay_ip": src_pa_ip, + "admin_state":"enabled", + "vnet":"vnet1", + "qos":"qos100" + }) + + for i in range(1, self.high_th + 1): + dst_ca_ip = f'20.2.{i}.1' + dst_pa_ip = f"10.0.{i}.2" + if self.addr_family == 'ipv6': + dst_ca_ip = f'2001::{i}:1011' + dst_pa_ip = f'2002::{i}:1011' + self.vnet_mapping_table.add(f'vnet1:{dst_ca_ip}', { + "routing_type":"vnet_encap", + "underlay_ip":f"{dst_pa_ip}", + "mac_address":"F9:22:83:99:22:A2", + "use_dst_vni":"true" + }) + + def clear(self): + self.vnet_mapping_table.remove_all() + self.eni_table.remove_all() + self.vnet_table.remove_all() + self.appliance_table.remove_all() + + +class Ipv4VnetMapping(VnetMapping): + + addr_family = 'ipv4' + + +class Ipv6VnetMapping(VnetMapping): + + addr_family = 'ipv6' + + +class OutboundRouting(Resource): + + addr_family = None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.appliance_table = DashTable(self.dvs, swsscommon.APP_DASH_APPLIANCE_TABLE_NAME) + self.vnet_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_TABLE_NAME) + self.eni_table = DashTable(self.dvs, swsscommon.APP_DASH_ENI_TABLE_NAME) + self.route_table = DashTable(self.dvs, swsscommon.APP_DASH_ROUTE_TABLE_NAME) + + self.counters = { + f'{self.addr_family}_outbound_routing': {'th_name': 'OUTBOUND_ROUTING'}, + } + + def configure(self): + self.appliance_table.add('123', {'sip': '10.1.0.32', 'vm_vni': 123}) + self.vnet_table.add('vnet1', {'vni': 1, 'guid': 1}) + + src_pa_ip = "10.0.1.1" + if self.addr_family == 'ipv6': + src_pa_ip = "2001::1011" + + self.eni_table.add(f'eni1', { + "eni_id":f"eni1", + "mac_address":f"00:00:00:00:00:01", + "underlay_ip": src_pa_ip, + "admin_state":"enabled", + "vnet":"vnet1", + "qos":"qos100" + }) + + for i in range(1, self.high_th + 1): + prefix = f"20.2.{i}.0/24" + if self.addr_family == 'ipv6': + prefix = f'2002::{i}:1011/126' + self.route_table.add(f"eni1:{prefix}", {"action_type":"vnet", "vnet":"vnet1"}) + + def clear(self): + self.route_table.remove_all() + self.eni_table.remove_all() + self.vnet_table.remove_all() + self.appliance_table.remove_all() + + +class Ipv4OutboundRouting(OutboundRouting): + + addr_family = 'ipv4' + + +class Ipv6OutboundRouting(OutboundRouting): + + addr_family = 'ipv6' + + +class InboundRouting(Resource): + + addr_family = None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.appliance_table = DashTable(self.dvs, swsscommon.APP_DASH_APPLIANCE_TABLE_NAME) + self.vnet_table = DashTable(self.dvs, swsscommon.APP_DASH_VNET_TABLE_NAME) + self.eni_table = DashTable(self.dvs, swsscommon.APP_DASH_ENI_TABLE_NAME) + self.route_rule_table = DashTable(self.dvs, swsscommon.APP_DASH_ROUTE_RULE_TABLE_NAME) + + self.counters = { + f'{self.addr_family}_inbound_routing': {'th_name': 'INBOUND_ROUTING'}, + } + + def configure(self): + self.appliance_table.add('123', {'sip': '10.1.0.32', 'vm_vni': 123}) + self.vnet_table.add('vnet1', {'vni': 1, 'guid': 1}) + + src_pa_ip = "10.0.1.1" + if self.addr_family == 'ipv6': + src_pa_ip = "2001::1011" + + self.eni_table.add(f'eni1', { + "eni_id":f"eni1", + "mac_address":f"00:00:00:00:00:01", + "underlay_ip": src_pa_ip, + "admin_state":"enabled", + "vnet":"vnet1", + "qos":"qos100" + }) + + for i in range(1, self.high_th + 1): + dst_pa_prefix = f"11.2.{i}.0/24" + if self.addr_family == 'ipv6': + dst_pa_prefix = f'2003::{i}:1011/126' + + self.route_rule_table.add(f"eni1:1:{dst_pa_prefix}", { + "action_type":"decap", + "priority":"1", + "pa_validation":"true", + "vnet":"vnet1" + }) + + def clear(self): + self.route_rule_table.remove_all() + self.eni_table.remove_all() + self.vnet_table.remove_all() + self.appliance_table.remove_all() + + +class Ipv4InboundRouting(InboundRouting): + + addr_family = 'ipv4' + + +class Ipv6InboundRouting(InboundRouting): + + addr_family = 'ipv6' + + +class AclGroup(Resource): + + addr_family = None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.acl_group_table = DashTable(self.dvs, swsscommon.APP_DASH_ACL_GROUP_TABLE_NAME) + + self.counters = { + f'{self.addr_family}_acl_group': {'th_name': 'ACL_GROUP'}, + } + + def configure(self): + for i in range(1, self.high_th + 1): + self.acl_group_table.add(f"group{i}", {"ip_version": self.addr_family, "guid": f"dash-group-{i}"}) + + def clear(self): + self.acl_group_table.remove_all() + + +class Ipv4AclGroup(AclGroup): + + addr_family = 'ipv4' + + +class Ipv6AclGroup(AclGroup): + + addr_family = 'ipv6' + + +class AclRule(Resource): + + addr_family = None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.acl_group_table = DashTable(self.dvs, swsscommon.APP_DASH_ACL_GROUP_TABLE_NAME) + self.acl_rule_table = DashTable(self.dvs, swsscommon.APP_DASH_ACL_RULE_TABLE_NAME) + + self.counters = { + f'{self.addr_family}_acl_rule': {'th_name': 'ACL_RULE'}, + } + + self.pubsub = self.dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE_DASH_ACL_GROUP") + self.acl_group_id = None + + def get_acl_group_counters(self, oid, counter): + return self.dvs.getCrmCounterValue(f"DASH_ACL_GROUP_STATS:{oid}", counter) + + def check_used_counters(self, used): + for counter, value in self.counters.items(): + entry_used_counter = self.get_acl_group_counters(self.acl_group_id, f'crm_stats_dash_{counter}_used') + if used: + assert entry_used_counter == used, f"crm_stats_dash_{counter}_used is not equal to expected {used} value" + else: + # Verify that counter is removed from the DB + assert entry_used_counter == None, f"crm_stats_dash_{counter}_used is not removed from DB" + + def configure(self): + self.acl_group_table.add(f"group1", {"ip_version": self.addr_family, "guid": f"dash-group-1"}) + + (added, deleted) = self.dvs.GetSubscribedAsicDbObjects(self.pubsub) + assert len(added) == 1 + assert len(deleted) == 0 + + oid = added[0]['key'].replace("oid:", "") + self.acl_group_id = (oid) + + for i in range(1, self.high_th + 1): + self.acl_rule_table.add(f"group1:rule{i}", { + "priority": i, + "action": "allow", + "terminating": "true", + "src_addr": f"{i}.0.0.0/0", + "dst_addr": f"{i}.0.0.0/0", + "src_port": "0-65535", + "dst_port": "0-65535" + }) + + def clear(self): + self.acl_rule_table.remove_all() + # Wait for the counters update by CrmOrch before removing the ACL group. + # If the ACL group will be removed immediately after removing its rules the CrmOrch + # won't send a "threshold cleared" message to Syslog. + time.sleep(self.polling_interval + 1) + + # Verify that used counter is 0 after removal of all ACL rules + for counter, value in self.counters.items(): + entry_used_counter = self.get_acl_group_counters(self.acl_group_id, f'crm_stats_dash_{counter}_used') + assert entry_used_counter == 0 + + self.acl_group_table.remove_all() + + +class Ipv4AclRule(AclRule): + + addr_family = 'ipv4' + + +class Ipv6AclRule(AclRule): + + addr_family = 'ipv6' + + +class TestCrmDash: + + @pytest.mark.parametrize('dash_entry', [ + Vnet, + Eni, + Ipv4VnetMapping, + Ipv6VnetMapping, + Ipv4OutboundRouting, + Ipv6OutboundRouting, + Ipv4InboundRouting, + Ipv6InboundRouting, + Ipv4AclGroup, + Ipv6AclGroup, + Ipv4AclRule, + Ipv6AclRule + ]) + def test_crm_dash_entry(self, dash_entry, dvs, testlog, dpu_only): + entry = dash_entry(dvs) + entry.set_tresholds() + entry.configure() + entry.check_treshold_exceeded() + entry.clear() + entry.check_treshold_cleared() diff --git a/tests/conftest.py b/tests/conftest.py index 9a7abb1f06..93f54c824e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,7 @@ import sys import tarfile import io +import traceback from typing import Dict, Tuple from datetime import datetime @@ -26,6 +27,9 @@ from dvslib import dvs_lag from dvslib import dvs_mirror from dvslib import dvs_policer +from dvslib import dvs_hash +from dvslib import dvs_switch +from dvslib import dvs_twamp from buffer_model import enable_dynamic_buffer @@ -92,7 +96,18 @@ def pytest_addoption(parser): parser.addoption("--graceful-stop", action="store_true", default=False, - help="Stop swss before stopping a conatainer") + help="Stop swss and syncd before stopping a conatainer") + + parser.addoption("--num-ports", + action="store", + default=NUM_PORTS, + type=int, + help="number of ports") + + parser.addoption("--enable-coverage", + action="store_true", + default=False, + help="Collect the test coverage information") def random_string(size=4, chars=string.ascii_uppercase + string.digits): @@ -100,11 +115,12 @@ def random_string(size=4, chars=string.ascii_uppercase + string.digits): class AsicDbValidator(DVSDatabase): - def __init__(self, db_id: int, connector: str): + def __init__(self, db_id: int, connector: str, switch_type: str): DVSDatabase.__init__(self, db_id, connector) - self._wait_for_asic_db_to_initialize() - self._populate_default_asic_db_values() - self._generate_oid_to_interface_mapping() + if switch_type not in ['fabric']: + self._wait_for_asic_db_to_initialize() + self._populate_default_asic_db_values() + self._generate_oid_to_interface_mapping() def _wait_for_asic_db_to_initialize(self) -> None: """Wait up to 30 seconds for the default fields to appear in ASIC DB.""" @@ -150,6 +166,10 @@ def _populate_default_asic_db_values(self) -> None: self.default_acl_tables = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE") self.default_acl_entries = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY") + self.default_hash_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH") + + self.default_switch_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") + self.default_copp_policers = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_POLICER") @@ -270,6 +290,7 @@ def __init__( newctnname: str = None, ctnmounts: Dict[str, str] = None, buffer_model: str = None, + enable_coverage: bool = False ): self.basicd = ["redis-server", "rsyslogd"] self.swssd = [ @@ -291,6 +312,7 @@ def __init__( self.dvsname = name self.vct = vct self.ctn = None + self.enable_coverage = enable_coverage self.cleanup = not keeptb @@ -427,10 +449,37 @@ def del_appl_db(self): if getattr(self, 'appldb', False): del self.appldb + def collect_coverage(self): + if not self.enable_coverage: + return + try: + # Generate the gcda files + self.runcmd('killall5 -15') + time.sleep(1) + + # Stop the services to reduce the CPU comsuption + if self.cleanup: + self.runcmd('supervisorctl stop all') + + # Generate the converage info by lcov and copy to the host + cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; rm -rf **/.libs ./lib/libSaiRedis*; lcov -c --directory . --no-external --exclude tests --ignore-errors gcov,unused --output-file /tmp/coverage.info; sed -i \"s#SF:$BUILD_DIR/#SF:#\" /tmp/coverage.info; lcov_cobertura /tmp/coverage.info -o /tmp/coverage.xml'" + subprocess.getstatusoutput(cmd) + cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; find . -name *.gcda -type f -exec tar -rf /tmp/gcda.tar {{}} \\;'" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/gcda.tar {self.ctn.short_id}.gcda.tar" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/coverage.info {self.ctn.short_id}.coverage.info" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/coverage.xml {self.ctn.short_id}.coverage.xml" + subprocess.getstatusoutput(cmd) + except: + traceback.print_exc() def destroy(self) -> None: self.del_appl_db() + self.collect_coverage() + # In case persistent dvs was used removed all the extra server link # that were created if self.persistent: @@ -438,10 +487,13 @@ def destroy(self) -> None: # persistent and clean-up flag are mutually exclusive elif self.cleanup: - self.ctn.remove(force=True) - self.ctn_sw.remove(force=True) - os.system(f"rm -rf {self.mount}") - self.destroy_servers() + try: + self.ctn.remove(force=True) + self.ctn_sw.remove(force=True) + os.system(f"rm -rf {self.mount}") + self.destroy_servers() + except docker.errors.NotFound: + print("Skipped the container not found error, the container has already removed.") def destroy_servers(self): for s in self.servers: @@ -497,7 +549,9 @@ def _polling_function(): wait_for_result(_polling_function, service_polling_config) def init_asic_db_validator(self) -> None: - self.asicdb = AsicDbValidator(self.ASIC_DB_ID, self.redis_sock) + self.get_config_db() + metadata = self.config_db.get_entry('DEVICE_METADATA|localhost', '') + self.asicdb = AsicDbValidator(self.ASIC_DB_ID, self.redis_sock, metadata.get("switch_type")) def init_appl_db_validator(self) -> None: self.appldb = ApplDbValidator(self.APPL_DB_ID, self.redis_sock) @@ -526,11 +580,13 @@ def _polling_function(): port_table_keys = app_db.get_keys("PORT_TABLE") return ("PortInitDone" in port_table_keys and "PortConfigDone" in port_table_keys, None) - wait_for_result(_polling_function, startup_polling_config) + if metadata.get('switch_type') not in ['fabric']: + wait_for_result(_polling_function, startup_polling_config) # Verify that all ports have been created - asic_db = self.get_asic_db() - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port + if metadata.get('switch_type') not in ['fabric']: + asic_db = self.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port # Verify that fabric ports are monitored in STATE_DB if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: @@ -595,8 +651,8 @@ def restart(self) -> None: self.ctn_restart() self.check_ready_status_and_init_db() - def runcmd(self, cmd: str) -> Tuple[int, str]: - res = self.ctn.exec_run(cmd) + def runcmd(self, cmd: str, include_stderr=True) -> Tuple[int, str]: + res = self.ctn.exec_run(cmd, stdout=True, stderr=include_stderr) exitcode = res.exit_code out = res.output.decode("utf-8") @@ -665,6 +721,10 @@ def stop_swss(self): self.runcmd(['sh', '-c', cmd]) time.sleep(5) + def stop_syncd(self): + self.runcmd(['sh', '-c', 'supervisorctl stop syncd']) + time.sleep(5) + # deps: warm_reboot def start_zebra(self): self.runcmd(['sh', '-c', 'supervisorctl start zebra']) @@ -1132,10 +1192,10 @@ def remove_fdb(self, vlan, mac): # deps: acl, fdb_update, fdb, intf_mac, mirror_port_erspan, mirror_port_span, # policer, port_dpb_vlan, vlan def setup_db(self): - self.pdb = swsscommon.DBConnector(0, self.redis_sock, 0) - self.adb = swsscommon.DBConnector(1, self.redis_sock, 0) - self.cdb = swsscommon.DBConnector(4, self.redis_sock, 0) - self.sdb = swsscommon.DBConnector(6, self.redis_sock, 0) + self.pdb = swsscommon.DBConnector(swsscommon.APPL_DB, self.redis_sock, 0) + self.adb = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) + self.cdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, self.redis_sock, 0) + self.sdb = swsscommon.DBConnector(swsscommon.STATE_DB, self.redis_sock, 0) def getSwitchOid(self): tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") @@ -1329,6 +1389,8 @@ def get_asic_db(self) -> AsicDbValidator: db = DVSDatabase(self.ASIC_DB_ID, self.redis_sock) db.default_acl_tables = self.asicdb.default_acl_tables db.default_acl_entries = self.asicdb.default_acl_entries + db.default_hash_keys = self.asicdb.default_hash_keys + db.default_switch_keys = self.asicdb.default_switch_keys db.default_copp_policers = self.asicdb.default_copp_policers db.port_name_map = self.asicdb.portnamemap db.default_vlan_id = self.asicdb.default_vlan_id @@ -1377,7 +1439,8 @@ def __init__( log_path=None, max_cpu=2, forcedvs=None, - topoFile=None + topoFile=None, + enable_coverage=False, ): self.ns = namespace self.chassbr = "br4chs" @@ -1391,6 +1454,7 @@ def __init__( self.log_path = log_path self.max_cpu = max_cpu self.forcedvs = forcedvs + self.enable_coverage = enable_coverage if self.ns is None: self.ns = random_string() @@ -1443,7 +1507,7 @@ def find_all_ctns(self): self.dvss[ctn.name] = DockerVirtualSwitch(ctn.name, self.imgname, self.keeptb, self.env, log_path=ctn.name, max_cpu=self.max_cpu, forcedvs=self.forcedvs, - vct=self) + vct=self, enable_coverage=self.enable_coverage) if self.chassbr is None and len(self.dvss) > 0: ret, res = self.ctn_runcmd(self.dvss.values()[0].ctn, "sonic-cfggen --print-data -j /usr/share/sonic/virtual_chassis/vct_connections.json") @@ -1514,6 +1578,8 @@ def handle_request(self): def destroy(self): self.verify_vct() + for dv in self.dvss.values(): + dv.collect_coverage() if self.keeptb: return self.oper = "delete" @@ -1564,7 +1630,8 @@ def create_vct_ctn(self, ctndir): max_cpu=self.max_cpu, forcedvs=self.forcedvs, vct=self,newctnname=ctnname, - ctnmounts=vol) + ctnmounts=vol, + enable_coverage=self.enable_coverage) self.set_ctninfo(ctndir, ctnname, self.dvss[ctnname].pid) return @@ -1736,6 +1803,7 @@ def manage_dvs(request) -> str: buffer_model = request.config.getoption("--buffer_model") force_recreate = request.config.getoption("--force-recreate-dvs") graceful_stop = request.config.getoption("--graceful-stop") + enable_coverage = request.config.getoption("--enable-coverage") dvs = None curr_dvs_env = [] # lgtm[py/unused-local-variable] @@ -1767,7 +1835,7 @@ def update_dvs(log_path, new_dvs_env=[]): dvs.get_logs() dvs.destroy() - dvs = DockerVirtualSwitch(name, imgname, keeptb, new_dvs_env, log_path, max_cpu, forcedvs, buffer_model = buffer_model) + dvs = DockerVirtualSwitch(name, imgname, keeptb, new_dvs_env, log_path, max_cpu, forcedvs, buffer_model = buffer_model, enable_coverage=enable_coverage) curr_dvs_env = new_dvs_env @@ -1787,6 +1855,8 @@ def update_dvs(log_path, new_dvs_env=[]): if graceful_stop: dvs.stop_swss() + dvs.stop_syncd() + dvs.get_logs() dvs.destroy() @@ -1797,11 +1867,36 @@ def update_dvs(log_path, new_dvs_env=[]): @pytest.fixture(scope="module") def dvs(request, manage_dvs) -> DockerVirtualSwitch: dvs_env = getattr(request.module, "DVS_ENV", []) + global NUM_PORTS + if getattr(request.module, "NUM_PORTS", None): + NUM_PORTS = getattr(request.module, "NUM_PORTS") + else: + NUM_PORTS = request.config.getoption("--num-ports") name = request.config.getoption("--dvsname") log_path = name if name else request.module.__name__ return manage_dvs(log_path, dvs_env) +@pytest.yield_fixture(scope="module") +def vst(request): + vctns = request.config.getoption("--vctns") + topo = request.config.getoption("--topo") + forcedvs = request.config.getoption("--forcedvs") + keeptb = request.config.getoption("--keeptb") + imgname = request.config.getoption("--imgname") + max_cpu = request.config.getoption("--max_cpu") + enable_coverage = request.config.getoption("--enable-coverage") + log_path = vctns if vctns else request.module.__name__ + dvs_env = getattr(request.module, "DVS_ENV", []) + if not topo: + # use ecmp topology as default + topo = "virtual_chassis/chassis_supervisor.json" + vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, + forcedvs, topo, enable_coverage) + yield vct + vct.get_logs(request.module.__name__) + vct.destroy() + @pytest.fixture(scope="module") def vct(request): vctns = request.config.getoption("--vctns") @@ -1810,13 +1905,14 @@ def vct(request): keeptb = request.config.getoption("--keeptb") imgname = request.config.getoption("--imgname") max_cpu = request.config.getoption("--max_cpu") + enable_coverage = request.config.getoption("--enable-coverage") log_path = vctns if vctns else request.module.__name__ dvs_env = getattr(request.module, "DVS_ENV", []) if not topo: # use ecmp topology as default topo = "virtual_chassis/chassis_with_ecmp_neighbors.json" vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, - forcedvs, topo) + forcedvs, topo, enable_coverage) yield vct vct.get_logs(request.module.__name__) vct.destroy() @@ -1825,7 +1921,7 @@ def vct(request): @pytest.fixture def testlog(request, dvs): dvs.runcmd(f"logger -t pytest === start test {request.node.nodeid} ===") - yield testlog + yield dvs.runcmd(f"logger -t pytest === finish test {request.node.nodeid} ===") ################# DVSLIB module manager fixtures ############################# @@ -1870,6 +1966,7 @@ def dvs_vlan_manager(request, dvs): @pytest.fixture(scope="class") def dvs_port_manager(request, dvs): request.cls.dvs_port = dvs_port.DVSPort(dvs.get_asic_db(), + dvs.get_app_db(), dvs.get_config_db()) @@ -1887,6 +1984,23 @@ def dvs_policer_manager(request, dvs): request.cls.dvs_policer = dvs_policer.DVSPolicer(dvs.get_asic_db(), dvs.get_config_db()) +@pytest.fixture(scope="class") +def dvs_hash_manager(request, dvs): + request.cls.dvs_hash = dvs_hash.DVSHash(dvs.get_asic_db(), + dvs.get_config_db()) + +@pytest.fixture(scope="class") +def dvs_switch_manager(request, dvs): + request.cls.dvs_switch = dvs_switch.DVSSwitch(dvs.get_asic_db()) + +@pytest.fixture(scope="class") +def dvs_twamp_manager(request, dvs): + request.cls.dvs_twamp = dvs_twamp.DVSTwamp(dvs.get_asic_db(), + dvs.get_config_db(), + dvs.get_state_db(), + dvs.get_counters_db(), + dvs.get_app_db()) + ##################### DPB fixtures ########################################### def create_dpb_config_file(dvs): cmd = "sonic-cfggen -j /etc/sonic/init_cfg.json -j /tmp/ports.json --print-data > /tmp/dpb_config_db.json" diff --git a/tests/create_appliance.py b/tests/create_appliance.py new file mode 100644 index 0000000000..199a8ec636 --- /dev/null +++ b/tests/create_appliance.py @@ -0,0 +1,42 @@ +#!/usr/bin/python3 + +""" + Connect to Dash orch with ZMQ and send create appliance request. + usage: + python3 create_appliance.py [appliance ID] + Example: + python3 create_appliance.py 1234 +""" + +from swsscommon import swsscommon +from dash_api.appliance_pb2 import * +import typing +import ipaddress +import socket +import sys + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, bytes): + return value + return str(value) + +# connect to Dash ZMQ endpoint +db_connection = swsscommon.DBConnector("APPL_DB", 0) +zmq_client = swsscommon.ZmqClient("tcp://127.0.0.1:8100") +app_dash_appliance_table = swsscommon.ZmqProducerStateTable( + db_connection, + "DASH_APPLIANCE_TABLE", + zmq_client, + True) + +# prepare create appliance request +pairs_str = [] +pb = Appliance() +pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address("10.0.0.1"))) +pb.vm_vni = int(sys.argv[1]) +pairs_str.append(("pb", pb.SerializeToString())) + +# send create appliance request via ZMQ +app_dash_appliance_table.set("100", pairs_str) diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index 266761c568..4315da3798 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -1,6 +1,6 @@ """Utilities for interacting with ACLs when writing VS tests.""" from typing import Callable, Dict, List - +from swsscommon import swsscommon class DVSAcl: """Manage ACL tables and rules on the virtual switch.""" @@ -18,6 +18,9 @@ class DVSAcl: ADB_ACL_GROUP_MEMBER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER" ADB_ACL_COUNTER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER" + STATE_DB_ACL_TABLE_TABLE_NAME = "ACL_TABLE_TABLE" + STATE_DB_ACL_RULE_TABLE_NAME = "ACL_RULE_TABLE" + ADB_ACL_STAGE_LOOKUP = { "ingress": "SAI_ACL_STAGE_INGRESS", "egress": "SAI_ACL_STAGE_EGRESS" @@ -54,7 +57,8 @@ def create_acl_table_type( self, name: str, matches: List[str], - bpoint_types: List[str] + bpoint_types: List[str], + actions: List[str] ) -> None: """Create a new ACL table type in Config DB. @@ -62,10 +66,12 @@ def create_acl_table_type( name: The name for the new ACL table type. matches: A list of matches to use in ACL table. bpoint_types: A list of bind point types to use in ACL table. + actions: A list of actions to use in ACL table """ table_type_attrs = { "matches@": ",".join(matches), - "bind_points@": ",".join(bpoint_types) + "bind_points@": ",".join(bpoint_types), + "actions@": ",".join(actions) } self.config_db.create_entry(self.CDB_ACL_TABLE_TYPE_NAME, name, table_type_attrs) @@ -306,6 +312,26 @@ def verify_acl_table_port_binding( self.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, num_tables) + + def verify_acl_table_action_list( + self, + acl_table_id: str, + expected_action_list: List[str], + ) -> None: + """Verify that the ACL table has specified action list. + Args: + acl_table_id: The ACL table that is being checked. + expected_action_list: The expected action list set to the given ACL table. + """ + fvs = self.asic_db.wait_for_entry(self.ADB_ACL_TABLE_NAME, acl_table_id) + action_list_str = fvs.get('SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST') + action_count, actions = action_list_str.split(':') + action_list = actions.split(',') + assert (int(action_count) == len(action_list)) + for action in expected_action_list: + assert action in action_list + + def create_acl_rule( self, table_name: str, @@ -717,3 +743,43 @@ def _check_acl_entry_counters_map(self, acl_entry_oid: str): rule_to_counter_map = self.counters_db.get_entry("ACL_COUNTER_RULE_MAP", "") counter_to_rule_map = {v: k for k, v in rule_to_counter_map.items()} assert counter_oid in counter_to_rule_map + + def verify_acl_table_status( + self, + acl_table_name, + expected_status + ) -> None: + """Verify that the STATE_DB status of ACL table is as expected. + + Args: + acl_table_name: The name of ACL table to check + expected_status: The expected status in STATE_DB + """ + if expected_status: + fvs = self.state_db.wait_for_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, acl_table_name) + assert len(fvs) > 0 + assert (fvs['status'] == expected_status) + else: + self.state_db.wait_for_deleted_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, acl_table_name) + + def verify_acl_rule_status( + self, + acl_table_name, + acl_rule_name, + expected_status + ) -> None: + """Verify that the STATE_DB status of ACL rule is as expected. + + Args: + acl_table_name: The name of ACL table to check + acl_rule_name: The name of ACL rule to check + expected_status: The expected status in STATE_DB + """ + key = acl_table_name + "|" + acl_rule_name + if expected_status: + fvs = self.state_db.wait_for_entry(self.STATE_DB_ACL_RULE_TABLE_NAME, key) + assert len(fvs) > 0 + assert (fvs['status'] == expected_status) + else: + self.state_db.wait_for_deleted_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, key) + diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index 371b7f61e9..553c0d7710 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -6,6 +6,7 @@ """ from typing import Dict, List from swsscommon import swsscommon +from swsscommon.swsscommon import SonicDBConfig from dvslib.dvs_common import wait_for_result, PollingConfig @@ -21,6 +22,12 @@ def __init__(self, db_id: int, connector: str): redis (e.g. UNIX socket, TCP socket, etc.). """ self.db_connection = swsscommon.DBConnector(db_id, connector, 0) + self._separator = SonicDBConfig.getSeparator(self.db_connection) + + @property + def separator(self) -> str: + """Get DB separator.""" + return self._separator def create_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: """Add the mapping {`key` -> `entry`} to the specified table. diff --git a/tests/dvslib/dvs_hash.py b/tests/dvslib/dvs_hash.py new file mode 100644 index 0000000000..5ac896962c --- /dev/null +++ b/tests/dvslib/dvs_hash.py @@ -0,0 +1,80 @@ +"""Utilities for interacting with HASH objects when writing VS tests.""" +from typing import Dict, List + + +class DVSHash: + """Manage hash objects on the virtual switch.""" + + CDB_SWITCH_HASH = "SWITCH_HASH" + KEY_SWITCH_HASH_GLOBAL = "GLOBAL" + + ADB_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + + def __init__(self, asic_db, config_db): + """Create a new DVS hash manager.""" + self.asic_db = asic_db + self.config_db = config_db + + def update_switch_hash( + self, + qualifiers: Dict[str, str] + ) -> None: + """Update switch hash global in Config DB.""" + self.config_db.update_entry(self.CDB_SWITCH_HASH, self.KEY_SWITCH_HASH_GLOBAL, qualifiers) + + def get_hash_ids( + self, + expected: int = None + ) -> List[str]: + """Get all of the hash ids in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + + Returns: + The list of hash ids in ASIC DB. + """ + if expected is None: + return self.asic_db.get_keys(self.ADB_HASH) + + num_keys = len(self.asic_db.default_hash_keys) + expected + keys = self.asic_db.wait_for_n_keys(self.ADB_HASH, num_keys) + + for k in self.asic_db.default_hash_keys: + assert k in keys + + return [k for k in keys if k not in self.asic_db.default_hash_keys] + + def verify_hash_count( + self, + expected: int + ) -> None: + """Verify that there are N hash objects in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + """ + self.get_hash_ids(expected) + + def verify_hash_generic( + self, + sai_hash_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that hash object has correct ASIC DB representation. + + Args: + sai_hash_id: The specific hash id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + entry = self.asic_db.wait_for_entry(self.ADB_HASH, sai_hash_id) + + for k, v in entry.items(): + if k == "NULL": + continue + elif k in sai_qualifiers: + if k == "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": + hfList = v[v.index(":")+1:].split(",") + assert set(sai_qualifiers[k]) == set(hfList) + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) diff --git a/tests/dvslib/dvs_pbh.py b/tests/dvslib/dvs_pbh.py index df612638ea..2caf059adc 100644 --- a/tests/dvslib/dvs_pbh.py +++ b/tests/dvslib/dvs_pbh.py @@ -11,6 +11,7 @@ class DVSPbh: CDB_PBH_HASH_FIELD = "PBH_HASH_FIELD" ADB_PBH_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + ADB_PBH_HASH_FIELD = "ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD" def __init__(self, asic_db, config_db): """Create a new DVS PBH Manager.""" @@ -110,13 +111,6 @@ def remove_pbh_hash( """Remove PBH hash from Config DB.""" self.config_db.delete_entry(self.CDB_PBH_HASH, hash_name) - def verify_pbh_hash_count( - self, - expected: int - ) -> None: - """Verify that there are N hash objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH", expected) - def create_pbh_hash_field( self, hash_field_name: str, @@ -147,11 +141,4 @@ def verify_pbh_hash_field_count( expected: int ) -> None: """Verify that there are N hash field objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD", expected) - - def get_pbh_hash_ids( - self, - expected: int - ) -> List[str]: - """Get all of the PBH hash IDs in ASIC DB.""" - return self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH, expected) + self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH_FIELD, expected) diff --git a/tests/dvslib/dvs_port.py b/tests/dvslib/dvs_port.py index 8c53994242..330245099c 100644 --- a/tests/dvslib/dvs_port.py +++ b/tests/dvslib/dvs_port.py @@ -1,8 +1,44 @@ +"""Utilities for interacting with PORT objects when writing VS tests.""" +from typing import Dict, List +from swsscommon import swsscommon + class DVSPort(object): - def __init__(self, adb, cdb): - self.asic_db = adb - self.config_db = cdb + """Manage PORT objects on the virtual switch.""" + ASIC_DB = swsscommon.ASIC_DB + APPL_DB = swsscommon.APPL_DB + + CFGDB_PORT = "PORT" + APPDB_PORT = "PORT_TABLE" + ASICDB_PORT = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + + def __init__(self, asicdb, appdb, cfgdb): + self.asic_db = asicdb + self.app_db = appdb + self.config_db = cfgdb + + def create_port_generic( + self, + port_name: str, + lanes: str, + speed: str, + qualifiers: Dict[str, str] = {} + ) -> None: + """Create PORT in Config DB.""" + attr_dict = { + "lanes": lanes, + "speed": speed, + **qualifiers + } + + self.config_db.create_entry(self.CFGDB_PORT, port_name, attr_dict) + + def remove_port_generic( + self, + port_name: str + )-> None: + """Remove PORT from Config DB.""" + self.config_db.delete_entry(self.CFGDB_PORT, port_name) def remove_port(self, port_name): self.config_db.delete_field("CABLE_LENGTH", "AZURE", port_name) @@ -18,3 +54,42 @@ def remove_port(self, port_name): self.config_db.delete_entry("BREAKOUT_CFG|%s" % port_name, "") self.config_db.delete_entry("INTERFACE|%s" % port_name, "") self.config_db.delete_entry("PORT", port_name) + + def update_port( + self, + port_name: str, + attr_dict: Dict[str, str] + ) -> None: + """Update PORT in Config DB.""" + self.config_db.update_entry(self.CFGDB_PORT, port_name, attr_dict) + + def get_port_ids( + self, + expected: int = None, + dbid: int = swsscommon.ASIC_DB + ) -> List[str]: + """Get all of the PORT objects in ASIC/APP DB.""" + conn = None + table = None + + if dbid == swsscommon.ASIC_DB: + conn = self.asic_db + table = self.ASICDB_PORT + elif dbid == swsscommon.APPL_DB: + conn = self.app_db + table = self.APPDB_PORT + else: + raise RuntimeError("Interface not implemented") + + if expected is None: + return conn.get_keys(table) + + return conn.wait_for_n_keys(table, expected) + + def verify_port_count( + self, + expected: int, + dbid: int = swsscommon.ASIC_DB + ) -> None: + """Verify that there are N PORT objects in ASIC/APP DB.""" + self.get_port_ids(expected, dbid) diff --git a/tests/dvslib/dvs_switch.py b/tests/dvslib/dvs_switch.py new file mode 100644 index 0000000000..b57dc7082f --- /dev/null +++ b/tests/dvslib/dvs_switch.py @@ -0,0 +1,96 @@ +"""Utilities for interacting with SWITCH objects when writing VS tests.""" +from typing import Dict, List + + +class DVSSwitch: + """Manage switch objects on the virtual switch.""" + + ADB_SWITCH = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + + def __init__(self, asic_db): + """Create a new DVS switch manager.""" + self.asic_db = asic_db + + def get_switch_ids( + self, + expected: int = None + ) -> List[str]: + """Get all of the switch ids in ASIC DB. + + Args: + expected: The number of switch ids that are expected to be present in ASIC DB. + + Returns: + The list of switch ids in ASIC DB. + """ + if expected is None: + return self.asic_db.get_keys(self.ADB_SWITCH) + + num_keys = len(self.asic_db.default_switch_keys) + expected + keys = self.asic_db.wait_for_n_keys(self.ADB_SWITCH, num_keys) + + for k in self.asic_db.default_switch_keys: + assert k in keys + + return [k for k in keys if k not in self.asic_db.default_switch_keys] + + def verify_switch_count( + self, + expected: int + ) -> None: + """Verify that there are N switch objects in ASIC DB. + + Args: + expected: The number of switch ids that are expected to be present in ASIC DB. + """ + self.get_switch_ids(expected) + + def verify_switch_generic( + self, + sai_switch_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that switch object has correct ASIC DB representation. + + Args: + sai_switch_id: The specific switch id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) + + for k, v in entry.items(): + if k == "NULL": + continue + elif k in sai_qualifiers: + if k == "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM": + assert sai_qualifiers[k] == v + elif k == "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM": + assert sai_qualifiers[k] == v + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) + + def verify_switch( + self, + sai_switch_id: str, + sai_qualifiers: Dict[str, str], + strict: bool = False + ) -> None: + """Verify that switch object has correct ASIC DB representation. + + Args: + sai_switch_id: The specific switch id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + strict: Specifies whether verification should be strict + """ + if strict: + self.verify_switch_generic(sai_switch_id, sai_qualifiers) + return + + entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) + + attr_dict = { + **entry, + **sai_qualifiers + } + + self.verify_switch_generic(sai_switch_id, attr_dict) diff --git a/tests/dvslib/dvs_twamp.py b/tests/dvslib/dvs_twamp.py new file mode 100644 index 0000000000..864b072bd6 --- /dev/null +++ b/tests/dvslib/dvs_twamp.py @@ -0,0 +1,98 @@ +"""Utilities for interacting with TWAMP Light objects when writing VS tests.""" + +class DVSTwamp(object): + def __init__(self, adb, cdb, sdb, cntrdb, appdb): + self.asic_db = adb + self.config_db = cdb + self.state_db = sdb + self.counters_db = cntrdb + self.app_db = appdb + + def create_twamp_light_session_sender_packet_count(self, name, sip, sport, dip, dport, packet_count=100, tx_interval=100, timeout=5, stats_interval=None): + twamp_light_entry = {"mode": "LIGHT", + "role": "SENDER", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport, + "packet_count": packet_count, + "tx_interval": tx_interval, + "timeout": timeout + } + if stats_interval: + twamp_light_entry["statistics_interval"] = str(stats_interval) + else: + twamp_light_entry["statistics_interval"] = str(int(packet_count) * int(tx_interval) + int(timeout)*1000) + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def create_twamp_light_session_sender_continuous(self, name, sip, sport, dip, dport, monitor_time=0, tx_interval=100, timeout=5, stats_interval=None): + twamp_light_entry = {"mode": "LIGHT", + "role": "SENDER", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport, + "monitor_time": monitor_time, + "tx_interval": tx_interval, + "timeout": timeout + } + if stats_interval: + twamp_light_entry["statistics_interval"] = str(stats_interval) + else: + twamp_light_entry["statistics_interval"] = str(int(monitor_time)*1000) + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def create_twamp_light_session_reflector(self, name, sip, sport, dip, dport): + twamp_light_entry = {"mode": "LIGHT", + "role": "REFLECTOR", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport + } + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def start_twamp_light_sender(self, name): + twamp_light_entry = {"admin_state": "enabled"} + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def stop_twamp_light_sender(self, name): + twamp_light_entry = {"admin_state": "disabled"} + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def remove_twamp_light_session(self, name): + self.config_db.delete_entry("TWAMP_SESSION", name) + + def get_twamp_light_session_status(self, name): + return self.get_twamp_light_session_state(name)["status"] + + def get_twamp_light_session_state(self, name): + tbl = swsscommon.Table(self.sdb, "TWAMP_SESSION_TABLE") + (status, fvs) = tbl.get(name) + assert status == True + assert len(fvs) > 0 + return { fv[0]: fv[1] for fv in fvs } + + def verify_session_status(self, name, status="active", expected=1): + self.state_db.wait_for_n_keys("TWAMP_SESSION_TABLE", expected) + if expected: + self.state_db.wait_for_field_match("TWAMP_SESSION_TABLE", name, {"status": status}) + + def verify_no_session(self): + self.config_db.wait_for_n_keys("TWAMP_SESSION", 0) + self.state_db.wait_for_n_keys("TWAMP_SESSION_TABLE", 0) + + def verify_session_asic_db(self, dvs, name, asic_table=None, expected=1): + session_oids = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TWAMP_SESSION", expected) + session_oid = session_oids[0] + dvs.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_TWAMP_SESSION", session_oid, asic_table) + + def verify_session_counter_db(self, dvs, name, counter_table=None, expected=1, expected_item=1): + fvs = dvs.counters_db.get_entry("COUNTERS_TWAMP_SESSION_NAME_MAP", "") + fvs = dict(fvs) + total_key = self.counters_db.db_connection.keys("COUNTERS:{}".format(fvs[name])) + assert len(total_key) == expected, "TWAMP Light counter entries are not available in counter db" + dvs.counters_db.wait_for_field_match("COUNTERS", fvs[name], counter_table) + item_keys = self.counters_db.db_connection.keys("COUNTERS:{}:INDEX:*".format(fvs[name])) + assert len(item_keys) == expected_item, "TWAMP Light counter entries are not available in counter db" + diff --git a/tests/dvslib/dvs_vlan.py b/tests/dvslib/dvs_vlan.py index 5ebbf51d45..418f3be666 100644 --- a/tests/dvslib/dvs_vlan.py +++ b/tests/dvslib/dvs_vlan.py @@ -13,6 +13,17 @@ def create_vlan(self, vlanID): vlan_entry = {"vlanid": vlanID} self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_interface(self, vlanID): + vlan = "Vlan{}".format(vlanID) + vlan_intf_entry = {} + self.config_db.create_entry("VLAN_INTERFACE", vlan, vlan_intf_entry) + + def set_vlan_intf_property(self, vlanID, property, value): + vlan_key = "Vlan{}".format(vlanID) + vlan_entry = self.config_db.get_entry("VLAN_INTERFACE", vlan_key) + vlan_entry[property] = value + self.config_db.update_entry("VLAN_INTERFACE", vlan_key, vlan_entry) + def create_vlan_hostif(self, vlan, hostif_name): vlan = "Vlan{}".format(vlan) vlan_entry = {"vlanid": vlan, "host_ifname": hostif_name} @@ -35,6 +46,10 @@ def remove_vlan_member(self, vlanID, interface): member = "Vlan{}|{}".format(vlanID, interface) self.config_db.delete_entry("VLAN_MEMBER", member) + def remove_vlan_interface(self, vlanID): + vlan = "Vlan{}".format(vlanID) + self.config_db.delete_entry("VLAN_INTERFACE", vlan) + def check_app_db_vlan_fields(self, fvs, admin_status="up", mtu="9100"): assert fvs.get("admin_status") == admin_status assert fvs.get("mtu") == mtu @@ -57,7 +72,7 @@ def get_and_verify_vlan_ids(self, polling_config=PollingConfig()): vlan_entries = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VLAN", expected_num + 1, - polling_config) + polling_config=polling_config) return [v for v in vlan_entries if v != self.asic_db.default_vlan_id] @@ -91,10 +106,11 @@ def verify_vlan_hostif(self, hostif_name, hostifs_oid, vlan_oid): assert hostif.get("SAI_HOSTIF_ATTR_TYPE") == "SAI_HOSTIF_TYPE_NETDEV" assert hostif.get("SAI_HOSTIF_ATTR_OBJ_ID") == vlan_oid assert hostif.get("SAI_HOSTIF_ATTR_NAME") == hostif_name + assert hostif.get("SAI_HOSTIF_ATTR_QUEUE") == "7" def get_and_verify_vlan_hostif_ids(self, expected_num, polling_config=PollingConfig()): hostif_entries = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF", expected_num + 1, - polling_config) + polling_config=polling_config) return hostif_entries diff --git a/tests/evpn_tunnel.py b/tests/evpn_tunnel.py index 14c9928ce8..346064e004 100644 --- a/tests/evpn_tunnel.py +++ b/tests/evpn_tunnel.py @@ -485,6 +485,22 @@ def check_vxlan_tunnel_map_entry(self, dvs, tunnel_name, vidlist, vnidlist): (exitcode, out) = dvs.runcmd(iplinkcmd) assert exitcode == 0, "Kernel device not created" + def check_vxlan_tunnel_map_entry_removed(self, dvs, tunnel_name, vidlist, vnidlist): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + expected_attributes_1 = { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': self.tunnel_map_map[tunnel_name][0], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vidlist[0], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vnidlist[0], + } + + for x in range(len(vidlist)): + expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE'] = vidlist[x] + expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY'] = vnidlist[x] + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, expected_attributes_1) + assert len(ret) == 0, "SIP TunnelMap entry not removed" + def check_vxlan_sip_tunnel_delete(self, dvs, tunnel_name, sip, ignore_bp = True): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -517,7 +533,8 @@ def check_vxlan_sip_tunnel_delete(self, dvs, tunnel_name, sip, ignore_bp = True) assert status == False, "Tunnel bridgeport entry not deleted" def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, - dst_ip = '0.0.0.0', skip_dst_ip = 'True', ignore_bp = True): + dst_ip = '0.0.0.0', skip_dst_ip = 'True', ignore_bp = True, + tunnel_map_entry_count = 3): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -527,7 +544,7 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, # check that the vxlan tunnel termination are there assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" - assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 3), "The TUNNEL_MAP_ENTRY is created" + assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + tunnel_map_entry_count), "The TUNNEL_MAP_ENTRY is created" assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" @@ -680,6 +697,18 @@ def check_vxlan_dip_tunnel(self, dvs, vtep_name, src_ip, dip): self.bridgeport_map[dip] = ret[0] + def check_vxlan_dip_tunnel_not_created(self, dvs, vtep_name, src_ip, dip): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + expected_state_attributes = { + 'src_ip': src_ip, + 'dst_ip': dip, + 'tnl_src': 'EVPN', + } + + ret = self.helper.get_key_with_attr(state_db, 'VXLAN_TUNNEL_TABLE', expected_state_attributes) + assert len(ret) == 0, "Tunnel Statetable entry created" + def check_vlan_extension_delete(self, dvs, vlan_name, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -694,6 +723,17 @@ def check_vlan_extension_delete_p2mp(self, dvs, vlan_name, sip, dip): status, fvs = tbl.get(self.l2mcgroup_member_map[dip+vlan_name]) assert status == False, "L2MC Group Member entry not deleted" + def check_vlan_obj(self, dvs, vlan_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + def check_vlan_extension(self, dvs, vlan_name, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) expected_vlan_attributes = { @@ -714,6 +754,25 @@ def check_vlan_extension(self, dvs, vlan_name, dip): assert len(ret) == 1, "More than 1 VLAN member created" self.vlan_member_map[dip+vlan_name] = ret[0] + def check_vlan_extension_not_created(self, dvs, vlan_name, dip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + + if dip in self.bridgeport_map: + expected_vlan_member_attributes = { + 'SAI_VLAN_MEMBER_ATTR_VLAN_ID': self.vlan_id_map[vlan_name], + 'SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID': self.bridgeport_map[dip], + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER', expected_vlan_member_attributes) + assert len(ret) == 0, "VLAN member created" + def check_vlan_extension_p2mp(self, dvs, vlan_name, sip, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN') @@ -760,6 +819,32 @@ def check_vlan_extension_p2mp(self, dvs, vlan_name, sip, dip): assert len(ret) == 1, "More than 1 L2MC group member created" self.l2mcgroup_member_map[dip+vlan_name] = ret[0] + def check_vlan_extension_not_created_p2mp(self, dvs, vlan_name, sip, dip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN') + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + status, fvs = tbl.get(self.vlan_id_map[vlan_name]) + + print(fvs) + + uuc_flood_type = None + bc_flood_type = None + uuc_flood_group = None + bc_flood_group = None + + for attr,value in fvs: + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Unknown unicast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE', "Broadcast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_GROUP', "Unknown unicast flood group is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Broadcast flood group is set" + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -798,10 +883,10 @@ def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): def check_vxlan_tunnel_vrf_map_entry(self, dvs, tunnel_name, vrf_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 3) + tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) # check that the vxlan tunnel termination are there - assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 3), "The TUNNEL_MAP_ENTRY is created too early" + assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, { @@ -931,6 +1016,27 @@ def check_vrf_routes(self, dvs, prefix, vrf_name, endpoint, tunnel, mac="", vni= return True + def check_vrf_routes_absence(self, dvs, prefix, vrf_name, endpoint, tunnel, mac="", vni=0, no_update=0): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vrf_route_ids(dvs, vrf_name) + count = len(vr_ids) + + # Check routes in ingress VRF + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + + if vni: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) + + if mac: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) + + self.helper.get_created_entries(asic_db, self.ASIC_NEXT_HOP, self.nhops, 0) + def check_vrf_routes_ecmp(self, dvs, prefix, vrf_name, tunnel, nh_count, no_update=0): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -1050,7 +1156,7 @@ def check_del_vrf_routes(self, dvs, prefix, vrf_name): assert found_route self.helper.check_deleted_object(asic_db, self.ASIC_ROUTE_ENTRY, self.route_id[vrf_name + ":" + prefix]) - self.route_id.clear() + del self.route_id[vrf_name + ":" + prefix] return True diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index c7ddddb961..8582d5899d 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -125,17 +125,15 @@ lcov_merge_all() fi done < infolist - lcov --extract total.info '*sonic-gcov/*' -o total.info - # Remove unit test files. - lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/orchagent/p4orch/tests/*" - lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/tests/*" + lcov -o total.info -r total.info "*tests/*" + lcov -o total.info -r total.info "/usr/*" cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#common_work/gcov/##" coverage.xml - sed -i "s#common_work.gcov.##" coverage.xml + sed -i "s#\.\./s/##" coverage.xml + sed -i "s#\.\.\.s\.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then @@ -385,9 +383,7 @@ main() echo "Usage:" echo " collect collect .gcno files based on module" echo " collect_gcda collect .gcda files" - echo " collect_gcda_files collect .gcda files in a docker" echo " generate generate gcov report in html form (all or submodule_name)" - echo " tar_output tar gcov_output forder" echo " merge_container_info merge homonymic info files from different container" echo " set_environment set environment ready for report generating in containers" esac diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 6ee6faef46..96c95b121b 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -1,12 +1,13 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch +DASH_PROTO_DIR = $(top_srcdir)/orchagent/dash/proto CFLAGS_SAI = -I /usr/include/sai -TESTS = tests tests_intfmgrd tests_portsyncd +TESTS = tests tests_intfmgrd tests_teammgrd tests_portsyncd tests_fpmsyncd tests_response_publisher -noinst_PROGRAMS = tests tests_intfmgrd tests_portsyncd +noinst_PROGRAMS = tests tests_intfmgrd tests_teammgrd tests_portsyncd tests_fpmsyncd tests_response_publisher LDADD_SAI = -lsaimeta -lsaimetadata -lsaivs -lsairedis @@ -21,7 +22,7 @@ LDADD_GTEST = -L/usr/src/gtest ## Orchagent Unit Tests -tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent +tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(top_srcdir)/warmrestart tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ @@ -30,6 +31,7 @@ tests_SOURCES = aclorch_ut.cpp \ bufferorch_ut.cpp \ buffermgrdyn_ut.cpp \ fdborch/flush_syncd_notif_ut.cpp \ + copp_ut.cpp \ copporch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ @@ -38,17 +40,30 @@ tests_SOURCES = aclorch_ut.cpp \ mock_orchagent_main.cpp \ mock_dbconnector.cpp \ mock_consumerstatetable.cpp \ + mock_subscriberstatetable.cpp \ common/mock_shell_command.cpp \ mock_table.cpp \ mock_hiredis.cpp \ mock_redisreply.cpp \ + mock_sai_api.cpp \ bulker_ut.cpp \ portmgr_ut.cpp \ + sflowmgrd_ut.cpp \ fake_response_publisher.cpp \ swssnet_ut.cpp \ flowcounterrouteorch_ut.cpp \ + orchdaemon_ut.cpp \ + intfsorch_ut.cpp \ + mux_rollback_ut.cpp \ + warmrestartassist_ut.cpp \ + test_failure_handling.cpp \ + warmrestarthelper_ut.cpp \ + neighorch_ut.cpp \ + twamporch_ut.cpp \ + $(top_srcdir)/warmrestart/warmRestartHelper.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(top_srcdir)/orchagent/orchdaemon.cpp \ $(top_srcdir)/orchagent/orch.cpp \ $(top_srcdir)/orchagent/notifications.cpp \ @@ -61,6 +76,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/cbf/nhgmaporch.cpp \ $(top_srcdir)/orchagent/neighorch.cpp \ $(top_srcdir)/orchagent/intfsorch.cpp \ + $(top_srcdir)/orchagent/port/port_capabilities.cpp \ + $(top_srcdir)/orchagent/port/porthlpr.cpp \ $(top_srcdir)/orchagent/portsorch.cpp \ $(top_srcdir)/orchagent/fabricportsorch.cpp \ $(top_srcdir)/orchagent/copporch.cpp \ @@ -77,6 +94,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/pbhorch.cpp \ $(top_srcdir)/orchagent/saihelper.cpp \ $(top_srcdir)/orchagent/saiattr.cpp \ + $(top_srcdir)/orchagent/switch/switch_capabilities.cpp \ + $(top_srcdir)/orchagent/switch/switch_helper.cpp \ $(top_srcdir)/orchagent/switchorch.cpp \ $(top_srcdir)/orchagent/pfcwdorch.cpp \ $(top_srcdir)/orchagent/pfcactionhandler.cpp \ @@ -103,13 +122,26 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/srv6orch.cpp \ $(top_srcdir)/orchagent/nvgreorch.cpp \ $(top_srcdir)/cfgmgr/portmgr.cpp \ - $(top_srcdir)/cfgmgr/buffermgrdyn.cpp + $(top_srcdir)/cfgmgr/sflowmgr.cpp \ + $(top_srcdir)/orchagent/zmqorch.cpp \ + $(top_srcdir)/orchagent/dash/dashaclorch.cpp \ + $(top_srcdir)/orchagent/dash/dashorch.cpp \ + $(top_srcdir)/orchagent/dash/dashaclgroupmgr.cpp \ + $(top_srcdir)/orchagent/dash/dashtagmgr.cpp \ + $(top_srcdir)/orchagent/dash/dashrouteorch.cpp \ + $(top_srcdir)/orchagent/dash/dashvnetorch.cpp \ + $(top_srcdir)/cfgmgr/buffermgrdyn.cpp \ + $(top_srcdir)/warmrestart/warmRestartAssist.cpp \ + $(top_srcdir)/orchagent/dash/pbutils.cpp \ + $(top_srcdir)/cfgmgr/coppmgr.cpp \ + $(top_srcdir)/orchagent/twamporch.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp tests_SOURCES += $(P4_ORCH_DIR)/p4orch.cpp \ $(P4_ORCH_DIR)/p4orch_util.cpp \ $(P4_ORCH_DIR)/p4oidmapper.cpp \ + $(P4_ORCH_DIR)/tables_definition_manager.cpp \ $(P4_ORCH_DIR)/router_interface_manager.cpp \ $(P4_ORCH_DIR)/neighbor_manager.cpp \ $(P4_ORCH_DIR)/next_hop_manager.cpp \ @@ -120,16 +152,19 @@ tests_SOURCES += $(P4_ORCH_DIR)/p4orch.cpp \ $(P4_ORCH_DIR)/wcmp_manager.cpp \ $(P4_ORCH_DIR)/mirror_session_manager.cpp \ $(P4_ORCH_DIR)/gre_tunnel_manager.cpp \ - $(P4_ORCH_DIR)/l3_admit_manager.cpp + $(P4_ORCH_DIR)/l3_admit_manager.cpp \ + $(P4_ORCH_DIR)/ext_tables_manager.cpp \ + $(P4_ORCH_DIR)/tests/mock_sai_switch.cpp -tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_INCLUDES) tests_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis -lpthread \ - -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lgmock -lgmock_main -lprotobuf -ldashapi ## portsyncd unit tests tests_portsyncd_SOURCES = portsyncd/portsyncd_ut.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(top_srcdir)/portsyncd/linksync.cpp \ mock_dbconnector.cpp \ common/mock_shell_command.cpp \ @@ -137,7 +172,7 @@ tests_portsyncd_SOURCES = portsyncd/portsyncd_ut.cpp \ mock_hiredis.cpp \ mock_redisreply.cpp -tests_portsyncd_INCLUDES = -I $(top_srcdir)/portsyncd -I $(top_srcdir)/cfgmgr +tests_portsyncd_INCLUDES = -I $(top_srcdir)/portsyncd -I $(top_srcdir)/cfgmgr -I $(top_srcdir)/lib tests_portsyncd_CXXFLAGS = -Wl,-wrap,if_nameindex -Wl,-wrap,if_freenameindex tests_portsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) tests_portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(tests_portsyncd_INCLUDES) @@ -146,9 +181,10 @@ tests_portsyncd_LDADD = $(LDADD_GTEST) -lnl-genl-3 -lhiredis -lhiredis \ ## intfmgrd unit tests -tests_intfmgrd_SOURCES = intfmgrd/add_ipv6_prefix_ut.cpp \ +tests_intfmgrd_SOURCES = intfmgrd/intfmgr_ut.cpp \ $(top_srcdir)/cfgmgr/intfmgr.cpp \ $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(top_srcdir)/orchagent/orch.cpp \ $(top_srcdir)/orchagent/request_parser.cpp \ mock_orchagent_main.cpp \ @@ -163,4 +199,64 @@ tests_intfmgrd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/cfgmgr -I$(top_srcdi tests_intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_intfmgrd_INCLUDES) tests_intfmgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## teammgrd unit tests + +tests_teammgrd_SOURCES = teammgrd/teammgr_ut.cpp \ + $(top_srcdir)/cfgmgr/teammgr.cpp \ + $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ + $(top_srcdir)/orchagent/orch.cpp \ + $(top_srcdir)/orchagent/request_parser.cpp \ + mock_orchagent_main.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + fake_response_publisher.cpp \ + mock_redisreply.cpp \ + common/mock_shell_command.cpp + +tests_teammgrd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/lib +tests_teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_teammgrd_INCLUDES) +tests_teammgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## fpmsyncd unit tests + +tests_fpmsyncd_SOURCES = fpmsyncd/test_fpmlink.cpp \ + fpmsyncd/test_routesync.cpp \ + fake_netlink.cpp \ + fake_warmstarthelper.cpp \ + fake_producerstatetable.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + $(top_srcdir)/warmrestart/ \ + $(top_srcdir)/fpmsyncd/fpmlink.cpp \ + $(top_srcdir)/fpmsyncd/routesync.cpp + +tests_fpmsyncd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/tests_fpmsyncd -I$(top_srcdir)/lib -I$(top_srcdir)/warmrestart +tests_fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_fpmsyncd_INCLUDES) +tests_fpmsyncd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## response publisher unit tests + +tests_response_publisher_SOURCES = response_publisher/response_publisher_ut.cpp \ + $(top_srcdir)/orchagent/response_publisher.cpp \ + $(top_srcdir)/lib/recorder.cpp \ + mock_orchagent_main.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + mock_redisreply.cpp + +tests_response_publisher_INCLUDES = $(tests_INCLUDES) +tests_response_publisher_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_response_publisher_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_response_publisher_INCLUDES) +tests_response_publisher_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread + diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 9886e5d8ff..8005199935 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -19,6 +19,7 @@ extern VRFOrch *gVrfOrch; extern sai_acl_api_t *sai_acl_api; extern sai_switch_api_t *sai_switch_api; +extern sai_hash_api_t *sai_hash_api; extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; @@ -312,6 +313,7 @@ namespace aclorch_test ASSERT_EQ(status, SAI_STATUS_SUCCESS); sai_api_query(SAI_API_SWITCH, (void **)&sai_switch_api); + sai_api_query(SAI_API_HASH, (void **)&sai_hash_api); sai_api_query(SAI_API_BRIDGE, (void **)&sai_bridge_api); sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); @@ -457,6 +459,10 @@ namespace aclorch_test gMirrorOrch = nullptr; delete gRouteOrch; gRouteOrch = nullptr; + delete gFlowCounterRouteOrch; + gFlowCounterRouteOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; delete gNeighOrch; gNeighOrch = nullptr; delete gFdbOrch; @@ -471,8 +477,6 @@ namespace aclorch_test gPortsOrch = nullptr; delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; auto status = sai_switch_api->remove_switch(gSwitchId); ASSERT_EQ(status, SAI_STATUS_SUCCESS); @@ -587,8 +591,7 @@ namespace aclorch_test return false; } - sai_attribute_t new_attr; - memset(&new_attr, 0, sizeof(new_attr)); + sai_attribute_t new_attr = {}; new_attr.id = attr.id; @@ -648,8 +651,7 @@ namespace aclorch_test return false; } - sai_attribute_t new_attr; - memset(&new_attr, 0, sizeof(new_attr)); + sai_attribute_t new_attr = {}; new_attr.id = attr.id; @@ -1411,7 +1413,7 @@ namespace aclorch_test { { ACL_TABLE_TYPE_MATCHES, - string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + comma + MATCH_BTH_OPCODE + comma + MATCH_AETH_SYNDROME }, { ACL_TABLE_TYPE_BPOINT_TYPES, @@ -1433,6 +1435,8 @@ namespace aclorch_test { "SAI_ACL_TABLE_ATTR_FIELD_SRC_IP", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE", "1:SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE" }, + { "SAI_ACL_TABLE_ATTR_FIELD_BTH_OPCODE", "true" }, + { "SAI_ACL_TABLE_ATTR_FIELD_AETH_SYNDROME", "true" }, }; ASSERT_TRUE(validateAclTable( @@ -1479,6 +1483,42 @@ namespace aclorch_test // DST_IP is not in the table type ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60" }, + } + } + } + ) + ); + + // MATCH_BTH_OPCODE invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_AETH_SYNDROME, "0x60" }, + } + } + } + ) + ); + + // MATCH_AETH_SYNDROME invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( deque( { @@ -1488,6 +1528,8 @@ namespace aclorch_test { { MATCH_SRC_IP, "1.1.1.1/32" }, { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60/0xff" }, + { MATCH_AETH_SYNDROME, "0x60/0x60" }, } } } diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp index 9dd17a5da8..1c23a17410 100644 --- a/tests/mock_tests/buffermgrdyn_ut.cpp +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -22,6 +22,7 @@ namespace buffermgrdyn_test shared_ptr m_app_db = make_shared("APPL_DB", 0); shared_ptr m_config_db = make_shared("CONFIG_DB", 0); shared_ptr m_state_db = make_shared("STATE_DB", 0); + shared_ptr m_app_state_db = make_shared("APPL_STATE_DB", 0); BufferMgrDynamic *m_dynamicBuffer; SelectableTimer m_selectableTable(timespec({ .tv_sec = BUFFERMGR_TIMER_PERIOD, .tv_nsec = 0 }), 0); @@ -180,7 +181,7 @@ namespace buffermgrdyn_test TableConnector(m_state_db.get(), STATE_PORT_TABLE_NAME) }; - m_dynamicBuffer = new BufferMgrDynamic(m_config_db.get(), m_state_db.get(), m_app_db.get(), buffer_table_connectors, nullptr, zero_profile); + m_dynamicBuffer = new BufferMgrDynamic(m_config_db.get(), m_state_db.get(), m_app_db.get(), m_app_state_db.get(), buffer_table_connectors, nullptr, zero_profile); } void InitPort(const string &port="Ethernet0", const string &admin_status="up") @@ -667,6 +668,13 @@ namespace buffermgrdyn_test CheckProfileList("Ethernet0", true, "ingress_lossless_profile", false); CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", false); + // Initialize a port with all profiles undefined + InitPort("Ethernet8"); + InitBufferPg("Ethernet8|0", "ingress_not_defined_profile"); + InitBufferQueue("Ethernet8|0", "egress_not_defined_profile"); + InitBufferProfileList("Ethernet8", "egress_not_defined_profile", bufferEgrProfileListTable); + InitBufferProfileList("Ethernet8", "ingress_not_defined_profile", bufferIngProfileListTable); + // All default buffer profiles should be generated and pushed into BUFFER_PROFILE_TABLE static_cast(m_dynamicBuffer)->doTask(); @@ -686,6 +694,36 @@ namespace buffermgrdyn_test CheckProfileList("Ethernet0", true, "ingress_lossless_profile", true); CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", true); + // Check no items applied on port Ethernet8 + ASSERT_EQ(appBufferPgTable.get("Ethernet8:0", fieldValues), false); + CheckQueue("Ethernet8", "Ethernet8:0", "", false); + CheckProfileList("Ethernet8", true, "", false); + CheckProfileList("Ethernet8", false, "", false); + + // Configure the missing buffer profiles + bufferProfileTable.set("ingress_not_defined_profile", + { + {"pool", "ingress_lossless_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + }); + bufferProfileTable.set("egress_not_defined_profile", + { + {"pool", "egress_lossless_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + }); + m_dynamicBuffer->addExistingData(&bufferProfileTable); + // For buffer profile + static_cast(m_dynamicBuffer)->doTask(); + // For all other items + static_cast(m_dynamicBuffer)->doTask(); + ASSERT_EQ(appBufferPgTable.get("Ethernet8:0", fieldValues), true); + ASSERT_EQ(fvValue(fieldValues[0]), "ingress_not_defined_profile"); + CheckQueue("Ethernet8", "Ethernet8:0", "egress_not_defined_profile", true); + CheckProfileList("Ethernet8", true, "ingress_not_defined_profile", true); + CheckProfileList("Ethernet8", false, "egress_not_defined_profile", true); + InitPort("Ethernet4"); InitPort("Ethernet6"); InitBufferQueue("Ethernet6|0-2", "egress_lossy_profile"); diff --git a/tests/mock_tests/bufferorch_ut.cpp b/tests/mock_tests/bufferorch_ut.cpp index 86e3ef8aa7..2cd15ee549 100644 --- a/tests/mock_tests/bufferorch_ut.cpp +++ b/tests/mock_tests/bufferorch_ut.cpp @@ -7,9 +7,11 @@ #include "ut_helper.h" #include "mock_orchagent_main.h" #include "mock_table.h" +#include "mock_response_publisher.h" extern string gMySwitchType; +extern std::unique_ptr gMockResponsePublisher; namespace bufferorch_test { @@ -17,8 +19,13 @@ namespace bufferorch_test sai_port_api_t ut_sai_port_api; sai_port_api_t *pold_sai_port_api; + sai_buffer_api_t ut_sai_buffer_api; + sai_buffer_api_t *pold_sai_buffer_api; + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; shared_ptr m_app_db; + shared_ptr m_app_state_db; shared_ptr m_config_db; shared_ptr m_state_db; shared_ptr m_chassis_app_db; @@ -48,17 +55,95 @@ namespace bufferorch_test return pold_sai_port_api->set_port_attribute(port_id, attr); } - void _hook_sai_port_api() + uint32_t _ut_stub_set_pg_count; + sai_status_t _ut_stub_sai_set_ingress_priority_group_attribute( + _In_ sai_object_id_t ingress_priority_group_id, + _In_ const sai_attribute_t *attr) + { + _ut_stub_set_pg_count++; + return pold_sai_buffer_api->set_ingress_priority_group_attribute(ingress_priority_group_id, attr); + } + + sai_uint64_t _ut_stub_buffer_profile_size; + sai_uint64_t _ut_stub_buffer_profile_xon; + sai_uint64_t _ut_stub_buffer_profile_xoff; + bool _ut_stub_buffer_profile_sanity_check = false; + sai_status_t _ut_stub_sai_set_buffer_profile_attribute( + _In_ sai_object_id_t buffer_profile_id, + _In_ const sai_attribute_t *attr) + { + if (_ut_stub_buffer_profile_sanity_check) + { + if (SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE == attr[0].id) + { + if (attr[0].value.u64 < _ut_stub_buffer_profile_xon + _ut_stub_buffer_profile_xoff) + { + return SAI_STATUS_INVALID_PARAMETER; + } + else + { + _ut_stub_buffer_profile_size = attr[0].value.u64; + } + } + if (SAI_BUFFER_PROFILE_ATTR_XOFF_TH == attr[0].id) + { + if (_ut_stub_buffer_profile_size < _ut_stub_buffer_profile_xon + attr[0].value.u64) + { + return SAI_STATUS_INVALID_PARAMETER; + } + else + { + _ut_stub_buffer_profile_xoff = attr[0].value.u64; + } + } + if (SAI_BUFFER_PROFILE_ATTR_XON_TH == attr[0].id) + { + if (_ut_stub_buffer_profile_size < _ut_stub_buffer_profile_xoff + attr[0].value.u64) + { + return SAI_STATUS_INVALID_PARAMETER; + } + else + { + _ut_stub_buffer_profile_xon = attr[0].value.u64; + } + } + } + return pold_sai_buffer_api->set_buffer_profile_attribute(buffer_profile_id, attr); + } + + uint32_t _ut_stub_set_queue_count; + sai_status_t _ut_stub_sai_set_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ const sai_attribute_t *attr) + { + _ut_stub_set_queue_count++; + return pold_sai_queue_api->set_queue_attribute(queue_id, attr); + } + + void _hook_sai_apis() { ut_sai_port_api = *sai_port_api; pold_sai_port_api = sai_port_api; ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; sai_port_api = &ut_sai_port_api; + + ut_sai_buffer_api = *sai_buffer_api; + pold_sai_buffer_api = sai_buffer_api; + ut_sai_buffer_api.set_ingress_priority_group_attribute = _ut_stub_sai_set_ingress_priority_group_attribute; + ut_sai_buffer_api.set_buffer_profile_attribute = _ut_stub_sai_set_buffer_profile_attribute; + sai_buffer_api = &ut_sai_buffer_api; + + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + sai_queue_api = &ut_sai_queue_api; } - void _unhook_sai_port_api() + void _unhook_sai_apis() { sai_port_api = pold_sai_port_api; + sai_buffer_api = pold_sai_buffer_api; + sai_queue_api = pold_sai_queue_api; } struct BufferOrchTest : public ::testing::Test @@ -113,6 +198,7 @@ namespace bufferorch_test m_app_db = make_shared("APPL_DB", 0); m_config_db = make_shared("CONFIG_DB", 0); m_state_db = make_shared("STATE_DB", 0); + m_app_state_db = make_shared("APPL_STATE_DB", 0); if(gMySwitchType == "voq") m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); @@ -317,8 +403,27 @@ namespace bufferorch_test } }; + TEST_F(BufferOrchTest, BufferOrchTestSharedHeadroomPool) + { + gMockResponsePublisher = std::make_unique(); + + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferPoolStateTable = Table(m_app_state_db.get(), APP_BUFFER_POOL_TABLE_NAME); + + bufferPoolTable.set("ingress_lossless_pool", + { + {"xoff", "10240"} + }); + gBufferOrch->addExistingData(&bufferPoolTable); + EXPECT_CALL(*gMockResponsePublisher, publish(APP_BUFFER_POOL_TABLE_NAME, "ingress_lossless_pool", std::vector{{"xoff", "10240"}}, ReturnCode(SAI_STATUS_SUCCESS), true)).Times(1); + static_cast(gBufferOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + TEST_F(BufferOrchTest, BufferOrchTestBufferPgReferencingObjRemoveThenAdd) { + _hook_sai_apis(); vector ts; std::deque entries; Table bufferPgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); @@ -376,18 +481,34 @@ namespace bufferorch_test bufferProfileConsumer->addToSync(entries); entries.clear(); // Drain BUFFER_PROFILE_TABLE table + auto sai_pg_attr_set_count = _ut_stub_set_pg_count; static_cast(gBufferOrch)->doTask(); // Make sure the dependency recovers CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + ASSERT_EQ(++sai_pg_attr_set_count, _ut_stub_set_pg_count); // All items have been drained static_cast(gBufferOrch)->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); + + // Try applying the same profile, which should not call SAI API + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "ingress_lossy_profile"} + }}); + bufferPgConsumer->addToSync(entries); + entries.clear(); + sai_pg_attr_set_count = _ut_stub_set_pg_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_pg_attr_set_count, _ut_stub_set_pg_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + _unhook_sai_apis(); } TEST_F(BufferOrchTest, BufferOrchTestReferencingObjRemoveThenAdd) { - _hook_sai_port_api(); + _hook_sai_apis(); vector ts; std::deque entries; Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); @@ -472,6 +593,29 @@ namespace bufferorch_test // As an side-effect, all pending notifications should be drained ASSERT_TRUE(ts.empty()); + // Apply a buffer item only if it is changed + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; + _ut_stub_expected_profile_count = 1; + entries.push_back({"Ethernet0", "SET", + { + {"profile_list", "ingress_lossy_profile"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + // Drain BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE table + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Try applying it for the second time, which should not call SAI API + consumer->addToSync(entries); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + // To satisfy the coverage requirement bufferProfileListTable.set("Ethernet0", { @@ -483,12 +627,12 @@ namespace bufferorch_test ASSERT_EQ(ts[0], "BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:ingress_no_exist_profile"); ts.clear(); - _unhook_sai_port_api(); + _unhook_sai_apis(); } TEST_F(BufferOrchTest, BufferOrchTestCreateAndRemoveEgressProfileList) { - _hook_sai_port_api(); + _hook_sai_apis(); vector ts; std::deque entries; Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); @@ -531,9 +675,21 @@ namespace bufferorch_test CheckDependency(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", APP_BUFFER_PROFILE_TABLE_NAME, "egress_lossless_profile"); + // Try applying it for the second time, which should not call SAI API + entries.push_back({"Ethernet0", "SET", + { + {"profile_list", "egress_lossless_profile"} + }}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + // Remove egress port profile list entries.push_back({"Ethernet0", "DEL", {}}); - auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)); consumer->addToSync(entries); entries.clear(); // Drain BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE table @@ -545,6 +701,95 @@ namespace bufferorch_test static_cast(gBufferOrch)->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); - _unhook_sai_port_api(); + // Queue table + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "egress_lossless_profile"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + auto sai_queue_set_count = _ut_stub_set_queue_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_queue_set_count, _ut_stub_set_queue_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + consumer->addToSync(entries); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_queue_set_count, _ut_stub_set_queue_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_apis(); + } + + TEST_F(BufferOrchTest, BufferOrchTestSetBufferProfile) + { + _hook_sai_apis(); + vector ts; + std::deque entries; + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + + bufferPoolTable.set("ingress_lossless_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "ingress"} + }); + bufferProfileTable.set("test_lossless_profile", + { + {"pool", "ingress_lossless_pool"}, + {"dynamic_th", "0"}, + {"size", "39936"}, + {"xon", "19456"}, + {"xoff", "20480"} + }); + + gBufferOrch->addExistingData(&bufferPoolTable); + gBufferOrch->addExistingData(&bufferProfileTable); + + static_cast(gBufferOrch)->doTask(); + + _ut_stub_buffer_profile_size = 39936; + _ut_stub_buffer_profile_xon = 19456; + _ut_stub_buffer_profile_xoff = 20480; + _ut_stub_buffer_profile_sanity_check = true; + + // Decrease xoff, size + entries.push_back({"test_lossless_profile", "SET", + { + {"size", "29936"}, + {"xon", "19456"}, + {"xoff", "10480"} + }}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(_ut_stub_buffer_profile_size, 29936); + ASSERT_EQ(_ut_stub_buffer_profile_xoff, 10480); + ASSERT_EQ(_ut_stub_buffer_profile_xon, 19456); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Increase xoff, size + entries.push_back({"test_lossless_profile", "SET", + { + {"xoff", "20480"}, + {"size", "39936"}, + {"xon", "19456"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(_ut_stub_buffer_profile_size, 39936); + ASSERT_EQ(_ut_stub_buffer_profile_xoff, 20480); + ASSERT_EQ(_ut_stub_buffer_profile_xon, 19456); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _ut_stub_buffer_profile_sanity_check = false; + _unhook_sai_apis(); } } diff --git a/tests/mock_tests/copp_cfg.json b/tests/mock_tests/copp_cfg.json new file mode 100644 index 0000000000..46d921b827 --- /dev/null +++ b/tests/mock_tests/copp_cfg.json @@ -0,0 +1,111 @@ +{ + "COPP_GROUP": { + "default": { + "queue": "0", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group1": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue4_group2": { + "trap_action":"copy", + "trap_priority":"4", + "queue": "4", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group3": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue1_group1": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"6000", + "cbs":"6000", + "red_action":"drop" + }, + "queue1_group2": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue2_group1": { + "cbs": "1000", + "cir": "1000", + "genetlink_mcgrp_name": "packets", + "genetlink_name": "psample", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "2", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + + } + }, + "COPP_TRAP": { + "bgp": { + "trap_ids": "bgp,bgpv6", + "trap_group": "queue4_group1" + }, + "lacp": { + "trap_ids": "lacp", + "trap_group": "queue4_group1", + "always_enabled": "true" + }, + "arp": { + "trap_ids": "arp_req,arp_resp,neigh_discovery", + "trap_group": "queue4_group2", + "always_enabled": "true" + }, + "lldp": { + "trap_ids": "lldp", + "trap_group": "queue4_group3" + }, + "dhcp_relay": { + "trap_ids": "dhcp,dhcpv6", + "trap_group": "queue4_group3" + }, + "udld": { + "trap_ids": "udld", + "trap_group": "queue4_group3", + "always_enabled": "true" + }, + "ip2me": { + "trap_ids": "ip2me", + "trap_group": "queue1_group1", + "always_enabled": "true" + }, + "macsec": { + "trap_ids": "eapol", + "trap_group": "queue4_group3" + }, + "nat": { + "trap_ids": "src_nat_miss,dest_nat_miss", + "trap_group": "queue1_group2" + }, + "sflow": { + "trap_group": "queue2_group1", + "trap_ids": "sample_packet" + } + } +} diff --git a/tests/mock_tests/copp_ut.cpp b/tests/mock_tests/copp_ut.cpp new file mode 100644 index 0000000000..f5d0b85cf5 --- /dev/null +++ b/tests/mock_tests/copp_ut.cpp @@ -0,0 +1,54 @@ +#include "gtest/gtest.h" +#include +#include "schema.h" +#include "warm_restart.h" +#include "ut_helper.h" +#include "coppmgr.h" +#include +#include + +using namespace std; +using namespace swss; + +TEST(CoppMgrTest, CoppTest) +{ + const vector cfg_copp_tables = { + CFG_COPP_TRAP_TABLE_NAME, + CFG_COPP_GROUP_TABLE_NAME, + CFG_FEATURE_TABLE_NAME, + }; + + WarmStart::initialize("coppmgrd", "swss"); + WarmStart::checkWarmStart("coppmgrd", "swss"); + + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appDb("APPL_DB", 0); + DBConnector stateDb("STATE_DB", 0); + + /* The test will set an entry with queue1_group1|cbs value which differs from the init value + * found in the copp_cfg.json file. Then coppmgr constructor will be called and it will detect + * that there is already an entry for queue1_group1|cbs with different value and it should be + * overwritten with the init value. + * hget will verify that this indeed happened. + */ + Table coppTable = Table(&appDb, APP_COPP_TABLE_NAME); + coppTable.set("queue1_group1", + { + {"cbs", "6100"}, + {"cir", "6000"}, + {"meter_type", "packets"}, + {"mode", "sr_tcm"}, + {"queue", "1"}, + {"red_action", "drop"}, + {"trap_action", "trap"}, + {"trap_priority", "1"}, + {"trap_ids", "ip2me"} + }); + + CoppMgr coppmgr(&cfgDb, &appDb, &stateDb, cfg_copp_tables, "./copp_cfg.json"); + + string overide_val; + coppTable.hget("queue1_group1", "cbs",overide_val); + EXPECT_EQ( overide_val, "6000"); +} + diff --git a/tests/mock_tests/copporch_ut.cpp b/tests/mock_tests/copporch_ut.cpp index fa7c360f01..9f94e58634 100644 --- a/tests/mock_tests/copporch_ut.cpp +++ b/tests/mock_tests/copporch_ut.cpp @@ -33,6 +33,18 @@ namespace copporch_test static_cast(this->coppOrch.get())->doTask(*consumer); } + task_process_status doProcessCoppRule(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_COPP_TABLE_NAME, 1, 1), + this->coppOrch.get(), APP_COPP_TABLE_NAME + )); + + consumer->addToSync(entries); + return Portal::CoppOrchInternal::processCoppRule(*coppOrch, *consumer); + } + CoppOrch& get() { return *coppOrch; @@ -322,7 +334,7 @@ namespace copporch_test } } - TEST_F(CoppOrchTest, TrapGroupWithPolicer_AddRemove) + TEST_F(CoppOrchTest, TrapGroupWithPolicer_AddUpdateRemove) { const std::string trapGroupName = "queue4_group2"; @@ -341,6 +353,7 @@ namespace copporch_test { copp_queue_field, "4" }, { copp_policer_meter_type_field, "packets" }, { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_color_field, "aware" }, { copp_policer_cir_field, "600" }, { copp_policer_cbs_field, "600" }, { copp_policer_action_red_field, "drop" } @@ -358,8 +371,27 @@ namespace copporch_test const auto &trapGroupOid = cit1->second; const auto &cit2 = trapGroupPolicerMap.find(trapGroupOid); EXPECT_TRUE(cit2 != trapGroupPolicerMap.end()); + EXPECT_TRUE(cit2->second.meter == SAI_METER_TYPE_PACKETS); + EXPECT_TRUE(cit2->second.mode == SAI_POLICER_MODE_SR_TCM); + + /* Update the non create only attributes */ + auto tableKofvt2 = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_policer_cir_field, "1000" }, + { copp_policer_cbs_field, "1000" }, + { copp_policer_action_red_field, "drop" } + } + } + } + ); + ASSERT_EQ(coppOrch.doProcessCoppRule(tableKofvt2), task_process_status::task_success); } + // Delete CoPP Trap Group { auto tableKofvt = std::deque( @@ -376,6 +408,52 @@ namespace copporch_test } } + TEST_F(CoppOrchTest, TrapGroupWithPolicer_nothrowExec) + { + const std::string trapGroupName = "queue4_group2"; + + MockCoppOrch coppOrch; + + { + // Create CoPP Trap Group + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "copy" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" }, + { copp_policer_meter_type_field, "packets" }, + { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_cir_field, "600" }, + { copp_policer_cbs_field, "600" }, + { copp_policer_action_red_field, "drop" } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + // Update create-only Policer Attributes + auto tableKofvt2 = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_policer_meter_type_field, "bytes" }, + { copp_policer_mode_field, "tr_tcm" }, + { copp_policer_color_field, "blind" }, + } + } + } + ); + EXPECT_NO_THROW(coppOrch.doProcessCoppRule(tableKofvt2)); + } + } + TEST_F(CoppOrchTest, Trap_AddRemove) { const std::string trapGroupName = "queue4_group1"; diff --git a/tests/mock_tests/database_config.json b/tests/mock_tests/database_config.json index 68f850481d..baf705ea23 100644 --- a/tests/mock_tests/database_config.json +++ b/tests/mock_tests/database_config.json @@ -27,11 +27,6 @@ "separator": ":", "instance" : "redis" }, - "LOGLEVEL_DB" : { - "id" : 3, - "separator": ":", - "instance" : "redis" - }, "CONFIG_DB" : { "id" : 4, "separator": "|", diff --git a/tests/mock_tests/fake_netlink.cpp b/tests/mock_tests/fake_netlink.cpp new file mode 100644 index 0000000000..2370e13129 --- /dev/null +++ b/tests/mock_tests/fake_netlink.cpp @@ -0,0 +1,18 @@ +#include +#include + +static rtnl_link* g_fakeLink = [](){ + auto fakeLink = rtnl_link_alloc(); + rtnl_link_set_ifindex(fakeLink, 42); + return fakeLink; +}(); + +extern "C" +{ + +struct rtnl_link* rtnl_link_get_by_name(struct nl_cache *cache, const char *name) +{ + return g_fakeLink; +} + +} diff --git a/tests/mock_tests/fake_producerstatetable.cpp b/tests/mock_tests/fake_producerstatetable.cpp new file mode 100644 index 0000000000..33fab17ecf --- /dev/null +++ b/tests/mock_tests/fake_producerstatetable.cpp @@ -0,0 +1,16 @@ +#include "producerstatetable.h" + +using namespace std; + +namespace swss +{ + +ProducerStateTable::ProducerStateTable(RedisPipeline *pipeline, const string &tableName, bool buffered) + : TableBase(tableName, SonicDBConfig::getSeparator(pipeline->getDBConnector())), TableName_KeySet(tableName), m_buffered(buffered) + , m_pipeowned(false) + , m_tempViewActive(false) + , m_pipe(pipeline) {} + +ProducerStateTable::~ProducerStateTable() {} + +} diff --git a/tests/mock_tests/fake_response_publisher.cpp b/tests/mock_tests/fake_response_publisher.cpp index 94480913d5..29a28d2360 100644 --- a/tests/mock_tests/fake_response_publisher.cpp +++ b/tests/mock_tests/fake_response_publisher.cpp @@ -2,21 +2,42 @@ #include #include "response_publisher.h" +#include "mock_response_publisher.h" -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) {} +/* This mock plugs into this fake response publisher implementation + * when needed to test code that uses response publisher. */ +std::unique_ptr gMockResponsePublisher; + +ResponsePublisher::ResponsePublisher(bool buffered) : m_db(std::make_unique("APPL_STATE_DB", 0)), m_buffered(buffered) {} void ResponsePublisher::publish( const std::string& table, const std::string& key, const std::vector& intent_attrs, const ReturnCode& status, - const std::vector& state_attrs, bool replace) {} + const std::vector& state_attrs, bool replace) +{ + if (gMockResponsePublisher) + { + gMockResponsePublisher->publish(table, key, intent_attrs, status, state_attrs, replace); + } +} void ResponsePublisher::publish( const std::string& table, const std::string& key, const std::vector& intent_attrs, - const ReturnCode& status, bool replace) {} + const ReturnCode& status, bool replace) +{ + if (gMockResponsePublisher) + { + gMockResponsePublisher->publish(table, key, intent_attrs, status, replace); + } +} void ResponsePublisher::writeToDB( const std::string& table, const std::string& key, const std::vector& values, const std::string& op, bool replace) {} + +void ResponsePublisher::flush() {} + +void ResponsePublisher::setBuffered(bool buffered) {} diff --git a/tests/mock_tests/fake_warmstarthelper.cpp b/tests/mock_tests/fake_warmstarthelper.cpp new file mode 100644 index 0000000000..147227df15 --- /dev/null +++ b/tests/mock_tests/fake_warmstarthelper.cpp @@ -0,0 +1,79 @@ +#include "warmRestartHelper.h" + +static swss::DBConnector gDb("APPL_DB", 0); + +namespace swss { + +WarmStartHelper::WarmStartHelper(RedisPipeline *pipeline, + ProducerStateTable *syncTable, + const std::string &syncTableName, + const std::string &dockerName, + const std::string &appName) : + m_restorationTable(&gDb, "") +{ +} + +WarmStartHelper::~WarmStartHelper() +{ +} + +void WarmStartHelper::setState(WarmStart::WarmStartState state) +{ +} + +WarmStart::WarmStartState WarmStartHelper::getState() const +{ + return WarmStart::WarmStartState::INITIALIZED; +} + +bool WarmStartHelper::checkAndStart() +{ + return false; +} + +bool WarmStartHelper::isReconciled() const +{ + return false; +} + +bool WarmStartHelper::inProgress() const +{ + return false; +} + +uint32_t WarmStartHelper::getRestartTimer() const +{ + return 0; +} + +bool WarmStartHelper::runRestoration() +{ + return false; +} + +void WarmStartHelper::insertRefreshMap(const KeyOpFieldsValuesTuple &kfv) +{ +} + +void WarmStartHelper::reconcile() +{ +} + +const std::string WarmStartHelper::printKFV(const std::string &key, + const std::vector &fv) +{ + return ""; +} + +bool WarmStartHelper::compareAllFV(const std::vector &left, + const std::vector &right) +{ + return false; +} + +bool WarmStartHelper::compareOneFV(const std::string &v1, const std::string &v2) +{ + return false; +} + +} diff --git a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp index d0f8954fd8..e6bd8bea1c 100644 --- a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp +++ b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp @@ -10,6 +10,7 @@ #define ETH0 "Ethernet0" #define VLAN40 "Vlan40" +#define VXLAN_REMOTE "Vxlan_1.1.1.1" extern redisReply *mockReply; extern CrmOrch* gCrmOrch; @@ -19,6 +20,28 @@ Test Fixture */ namespace fdb_syncd_flush_test { + + sai_fdb_api_t ut_sai_fdb_api; + sai_fdb_api_t *pold_sai_fdb_api; + + sai_status_t _ut_stub_sai_create_fdb_entry ( + _In_ const sai_fdb_entry_t *fdb_entry, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + return SAI_STATUS_SUCCESS; + } + void _hook_sai_fdb_api() + { + ut_sai_fdb_api = *sai_fdb_api; + pold_sai_fdb_api = sai_fdb_api; + ut_sai_fdb_api.create_fdb_entry = _ut_stub_sai_create_fdb_entry; + sai_fdb_api = &ut_sai_fdb_api; + } + void _unhook_sai_fdb_api() + { + sai_fdb_api = pold_sai_fdb_api; + } struct FdbOrchTest : public ::testing::Test { std::shared_ptr m_config_db; @@ -40,7 +63,7 @@ namespace fdb_syncd_flush_test }; ut_helper::initSaiApi(profile); - + /* Create Switch */ sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; @@ -70,6 +93,8 @@ namespace fdb_syncd_flush_test // 2) Crmorch ASSERT_EQ(gCrmOrch, nullptr); gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + VxlanTunnelOrch *vxlan_tunnel_orch_1 = new VxlanTunnelOrch(m_state_db.get(), m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME); + gDirectory.set(vxlan_tunnel_orch_1); // Construct fdborch vector app_fdb_tables = { @@ -91,7 +116,7 @@ namespace fdb_syncd_flush_test virtual void TearDown() override { delete gCrmOrch; gCrmOrch = nullptr; - + gDirectory.m_values.clear(); ut_helper::uninitSaiApi(); } }; @@ -126,6 +151,17 @@ namespace fdb_syncd_flush_test m_portsOrch->saiOidToAlias[oid] = alias; } + void setUpVxlanPort(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for Ethernet0 */ + std::string alias = VXLAN_REMOTE; + sai_object_id_t oid = 0x10000000004a5; + + Port port(alias, Port::PHY); + m_portsOrch->m_portList[alias] = port; + m_portsOrch->saiOidToAlias[oid] = alias; + } + + void setUpVlanMember(PortsOrch* m_portsOrch){ /* Updates portsOrch internal cache for adding Ethernet0 into Vlan40 */ sai_object_id_t bridge_port_id = 0x3a000000002c33; @@ -136,6 +172,16 @@ namespace fdb_syncd_flush_test m_portsOrch->m_portList[VLAN40].m_members.insert(ETH0); } + void setUpVxlanMember(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for adding Ethernet0 into Vlan40 */ + sai_object_id_t bridge_port_id = 0x3a000000002c34; + + /* Add Bridge Port */ + m_portsOrch->m_portList[VXLAN_REMOTE].m_bridge_port_id = bridge_port_id; + m_portsOrch->saiOidToAlias[bridge_port_id] = VXLAN_REMOTE; + m_portsOrch->m_portList[VLAN40].m_members.insert(VXLAN_REMOTE); + } + void triggerUpdate(FdbOrch* m_fdborch, sai_fdb_event_t type, vector mac_addr, @@ -146,7 +192,7 @@ namespace fdb_syncd_flush_test *(entry.mac_address+i) = mac_addr[i]; } entry.bv_id = bv_id; - m_fdborch->update(type, &entry, bridge_port_id); + m_fdborch->update(type, &entry, bridge_port_id, SAI_FDB_ENTRY_TYPE_DYNAMIC); } } @@ -445,4 +491,46 @@ namespace fdb_syncd_flush_test ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); } + + /* Test Consolidated Flush with origin VXLAN */ + TEST_F(FdbOrchTest, ConsolidatedFlushAllVxLAN) + { + _hook_sai_fdb_api(); + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpVxlanPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(VXLAN_REMOTE), m_portsOrch->m_portList.end()); + setUpVxlanMember(m_portsOrch.get()); + + FdbData fdbData; + fdbData.bridge_port_id = SAI_NULL_OBJECT_ID; + fdbData.type = "dynamic"; + fdbData.origin = FDB_ORIGIN_VXLAN_ADVERTIZED; + fdbData.remote_ip = "1.1.1.1"; + fdbData.esi = ""; + fdbData.vni = 100; + FdbEntry entry; + + MacAddress mac1 = MacAddress("52:54:00:ac:3a:99"); + entry.mac = mac1; + entry.port_name = VXLAN_REMOTE; + + entry.bv_id = m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid; + m_fdborch->addFdbEntry(entry, VXLAN_REMOTE, fdbData); + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[VXLAN_REMOTE].m_fdb_count, 1); + + /* Event2: Send a Consolidated Flush response from syncd */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, + SAI_NULL_OBJECT_ID); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[VXLAN_REMOTE].m_fdb_count, 1); + _unhook_sai_fdb_api(); + } } diff --git a/tests/mock_tests/flowcounterrouteorch_ut.cpp b/tests/mock_tests/flowcounterrouteorch_ut.cpp index 25ed95cb1e..42c96c4c63 100644 --- a/tests/mock_tests/flowcounterrouteorch_ut.cpp +++ b/tests/mock_tests/flowcounterrouteorch_ut.cpp @@ -25,9 +25,9 @@ namespace flowcounterrouteorch_test sai_remove_counter_fn old_remove_counter; sai_status_t _ut_stub_create_counter( - _Out_ sai_object_id_t *counter_id, - _In_ sai_object_id_t switch_id, - _In_ uint32_t attr_count, + _Out_ sai_object_id_t *counter_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) { num_created_counter ++; @@ -98,7 +98,7 @@ namespace flowcounterrouteorch_test gVirtualRouterId = attr.value.oid; - + ASSERT_EQ(gCrmOrch, nullptr); gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); @@ -135,6 +135,7 @@ namespace flowcounterrouteorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); vector vnet_tables = { APP_VNET_RT_TABLE_NAME, @@ -200,6 +201,12 @@ namespace flowcounterrouteorch_test }; gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + // Start FlowCounterRouteOrch + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + ASSERT_EQ(gRouteOrch, nullptr); const int routeorch_pri = 5; vector route_tables = { @@ -276,14 +283,7 @@ namespace flowcounterrouteorch_test consumer->addToSync(entries); static_cast(flexCounterOrch)->doTask(); - // Start FlowCounterRouteOrch - static const vector route_pattern_tables = { - CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, - }; - gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); - static_cast(gFlowCounterRouteOrch)->doTask(); - return; } @@ -300,6 +300,9 @@ namespace flowcounterrouteorch_test delete gBfdOrch; gBfdOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; + delete gNeighOrch; gNeighOrch = nullptr; @@ -311,13 +314,10 @@ namespace flowcounterrouteorch_test delete gIntfsOrch; gIntfsOrch = nullptr; - + delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; - delete gRouteOrch; gRouteOrch = nullptr; @@ -358,4 +358,44 @@ namespace flowcounterrouteorch_test ASSERT_TRUE(current_counter_num - num_created_counter == 1); } + + TEST_F(FlowcounterRouteOrchTest, DelayAddVRF) + { + std::deque entries; + // Setting route pattern with VRF does not exist + auto current_counter_num = num_created_counter; + entries.push_back({"Vrf1|1.1.1.0/24", "SET", { {"max_match_count", "10"}}}); + auto consumer = dynamic_cast(gFlowCounterRouteOrch->getExecutor(CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 0); + + // Create VRF + entries.push_back({"Vrf1", "SET", { {"v4", "true"} }}); + auto vrf_consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + vrf_consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 0); + + // Add route to VRF + Table routeTable = Table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + routeTable.set("Vrf1:1.1.1.1/32", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + gRouteOrch->addExistingData(&routeTable); + static_cast(gRouteOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 1); + + // Deleting route pattern + current_counter_num = num_created_counter; + entries.clear(); + entries.push_back({"Vrf1|1.1.1.0/24", "DEL", { {"max_match_count", "10"}}}); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(current_counter_num - num_created_counter == 1); + + // Deleting VRF + entries.push_back({"Vrf1", "DEL", { {"v4", "true"} }}); + vrf_consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + } } \ No newline at end of file diff --git a/tests/mock_tests/fpmsyncd/test_fpmlink.cpp b/tests/mock_tests/fpmsyncd/test_fpmlink.cpp new file mode 100644 index 0000000000..258ba669a8 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/test_fpmlink.cpp @@ -0,0 +1,71 @@ +#include "fpmsyncd/fpmlink.h" + +#include + +#include +#include + +using namespace swss; + +using ::testing::_; + +class MockMsgHandler : public NetMsg +{ +public: + MOCK_METHOD2(onMsg, void(int, nl_object*)); +}; + +class FpmLinkTest : public ::testing::Test +{ +public: + void SetUp() override + { + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWROUTE, &m_mock); + NetDispatcher::getInstance().registerMessageHandler(RTM_DELROUTE, &m_mock); + } + + void TearDown() override + { + NetDispatcher::getInstance().unregisterMessageHandler(RTM_NEWROUTE); + NetDispatcher::getInstance().unregisterMessageHandler(RTM_DELROUTE); + } + + DBConnector m_db{"APPL_DB", 0}; + RedisPipeline m_pipeline{&m_db, 1}; + RouteSync m_routeSync{&m_pipeline}; + FpmLink m_fpm{&m_routeSync}; + MockMsgHandler m_mock; +}; + +TEST_F(FpmLinkTest, SingleNlMessageInFpmMessage) +{ + // Single FPM message containing single RTM_NEWROUTE + alignas(fpm_msg_hdr_t) unsigned char fpmMsgBuffer[] = { + 0x01, 0x01, 0x00, 0x40, 0x3C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0xE0, + 0x12, 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x05, + 0x00, 0xAC, 0x1E, 0x38, 0xA6, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00 + }; + + EXPECT_CALL(m_mock, onMsg(_, _)).Times(1); + + m_fpm.processFpmMessage(reinterpret_cast(static_cast(fpmMsgBuffer))); +} + +TEST_F(FpmLinkTest, TwoNlMessagesInFpmMessage) +{ + // Single FPM message containing RTM_DELROUTE and RTM_NEWROUTE + alignas(fpm_msg_hdr_t) unsigned char fpmMsgBuffer[] = { + 0x01, 0x01, 0x00, 0x6C, 0x2C, 0x00, 0x00, 0x00, 0x19, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x12, + 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, + 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x18, 0x00, + 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x12, 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, + 0x00, 0x00, 0x08, 0x00, 0x05, 0x00, 0xAC, 0x1E, 0x38, 0xA7, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00 + }; + + EXPECT_CALL(m_mock, onMsg(_, _)).Times(2); + + m_fpm.processFpmMessage(reinterpret_cast(static_cast(fpmMsgBuffer))); +} + diff --git a/tests/mock_tests/fpmsyncd/test_routesync.cpp b/tests/mock_tests/fpmsyncd/test_routesync.cpp new file mode 100644 index 0000000000..a8de78859f --- /dev/null +++ b/tests/mock_tests/fpmsyncd/test_routesync.cpp @@ -0,0 +1,234 @@ +#include "redisutility.h" + +#include +#include +#include "mock_table.h" +#define private public +#include "fpmsyncd/routesync.h" +#undef private + +using namespace swss; +#define MAX_PAYLOAD 1024 + +using ::testing::_; + +class MockRouteSync : public RouteSync +{ +public: + MockRouteSync(RedisPipeline *m_pipeline) : RouteSync(m_pipeline) + { + } + + ~MockRouteSync() + { + } + MOCK_METHOD(bool, getEvpnNextHop, (nlmsghdr *, int, + rtattr *[], std::string&, + std::string& , std::string&, + std::string&), (override)); +}; +class MockFpm : public FpmInterface +{ +public: + MockFpm(RouteSync* routeSync) : + m_routeSync(routeSync) + { + m_routeSync->onFpmConnected(*this); + } + + ~MockFpm() override + { + m_routeSync->onFpmDisconnected(); + } + + MOCK_METHOD1(send, bool(nlmsghdr*)); + MOCK_METHOD0(getFd, int()); + MOCK_METHOD0(readData, uint64_t()); + +private: + RouteSync* m_routeSync{}; +}; + +class FpmSyncdResponseTest : public ::testing::Test +{ +public: + void SetUp() override + { + EXPECT_EQ(rtnl_route_read_protocol_names(DefaultRtProtoPath), 0); + m_routeSync.setSuppressionEnabled(true); + } + + void TearDown() override + { + } + + shared_ptr m_db = make_shared("APPL_DB", 0); + shared_ptr m_pipeline = make_shared(m_db.get()); + RouteSync m_routeSync{m_pipeline.get()}; + MockFpm m_mockFpm{&m_routeSync}; + MockRouteSync m_mockRouteSync{m_pipeline.get()}; +}; + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 0 when no in default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 0); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), RTPROT_KERNEL); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("1.0.0.0/24", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "kernel"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4Vrf) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 42 (returned by fake link cache) when in non default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 42); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), 200); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("Vrf0:1.0.0.0/24", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "200"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV6) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 0 when no in default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 0); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), RTPROT_KERNEL); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("1::/64", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "kernel"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV6Vrf) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 42 (returned by fake link cache) when in non default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 42); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), 200); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("Vrf0:1::/64", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "200"}, + }); +} + +TEST_F(FpmSyncdResponseTest, WarmRestart) +{ + std::vector fieldValues = { + {"protocol", "kernel"}, + }; + + DBConnector applStateDb{"APPL_STATE_DB", 0}; + Table routeStateTable{&applStateDb, APP_ROUTE_TABLE_NAME}; + + routeStateTable.set("1.0.0.0/24", fieldValues); + routeStateTable.set("2.0.0.0/24", fieldValues); + routeStateTable.set("Vrf0:3.0.0.0/24", fieldValues); + + EXPECT_CALL(m_mockFpm, send(_)).Times(3).WillRepeatedly([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onWarmStartEnd(applStateDb); +} + +TEST_F(FpmSyncdResponseTest, testEvpn) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *) malloc(NLMSG_SPACE(MAX_PAYLOAD)); + shared_ptr m_app_db; + m_app_db = make_shared("APPL_DB", 0); + Table app_route_table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + nlh->nlmsg_type = RTM_NEWROUTE; + struct rtmsg rtm; + rtm.rtm_family = AF_INET; + rtm.rtm_protocol = 200; + rtm.rtm_type = RTN_UNICAST; + rtm.rtm_table = 0; + rtm.rtm_dst_len = 32; + nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); + memcpy(NLMSG_DATA(nlh), &rtm, sizeof(rtm)); + + EXPECT_CALL(m_mockRouteSync, getEvpnNextHop(_, _, _, _, _, _, _)).Times(testing::AtLeast(1)).WillOnce([&]( + struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], std::string& nexthops, + std::string& vni_list, std::string& mac_list, + std::string& intf_list)-> bool { + vni_list="100"; + mac_list="aa:aa:aa:aa:aa:aa"; + intf_list="Ethernet0"; + nexthops = "1.1.1.1"; + return true; + }); + m_mockRouteSync.onMsgRaw(nlh); + vector keys; + vector fieldValues; + app_route_table.getKeys(keys); + ASSERT_EQ(keys.size(), 1); + + app_route_table.get(keys[0], fieldValues); + auto value = swss::fvsGetValue(fieldValues, "protocol", true); + ASSERT_EQ(value.get(), "0xc8"); + +} diff --git a/tests/mock_tests/intfmgrd/add_ipv6_prefix_ut.cpp b/tests/mock_tests/intfmgrd/intfmgr_ut.cpp similarity index 76% rename from tests/mock_tests/intfmgrd/add_ipv6_prefix_ut.cpp rename to tests/mock_tests/intfmgrd/intfmgr_ut.cpp index 2ed1a1b6af..ef43cdeb6b 100644 --- a/tests/mock_tests/intfmgrd/add_ipv6_prefix_ut.cpp +++ b/tests/mock_tests/intfmgrd/intfmgr_ut.cpp @@ -1,6 +1,6 @@ #include "gtest/gtest.h" #include -#include +#include #include #include #include "../mock_table.h" @@ -20,6 +20,9 @@ int cb(const std::string &cmd, std::string &stdout){ else if (cmd.find("/sbin/ip -6 address \"add\"") == 0) { return Ethernet0IPv6Set ? 0 : 2; } + else if (cmd == "/sbin/ip link set \"Ethernet64.10\" \"up\""){ + return 1; + } else { return 0; } @@ -27,7 +30,7 @@ int cb(const std::string &cmd, std::string &stdout){ } // Test Fixture -namespace add_ipv6_prefix_ut +namespace intfmgr_ut { struct IntfMgrTest : public ::testing::Test { @@ -35,14 +38,14 @@ namespace add_ipv6_prefix_ut std::shared_ptr m_app_db; std::shared_ptr m_state_db; std::vector cfg_intf_tables; - + virtual void SetUp() override - { + { testing_db::reset(); m_config_db = std::make_shared("CONFIG_DB", 0); m_app_db = std::make_shared("APPL_DB", 0); m_state_db = std::make_shared("STATE_DB", 0); - + swss::WarmStart::initialize("intfmgrd", "swss"); std::vector tables = { @@ -106,4 +109,22 @@ namespace add_ipv6_prefix_ut } ASSERT_EQ(ip_cmd_called, 1); } + + //This test except no runtime error when the set admin status command failed + //and the subinterface has not ok status (for example not existing subinterface) + TEST_F(IntfMgrTest, testSetAdminStatusFailToNotOkSubInt){ + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + intfmgr.setHostSubIntfAdminStatus("Ethernet64.10", "up", "up"); + } + + //This test except runtime error when the set admin status command failed + //and the subinterface has ok status + TEST_F(IntfMgrTest, testSetAdminStatusFailToOkSubInt){ + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + /* Set portStateTable */ + std::vector values; + values.emplace_back("state", "ok"); + intfmgr.m_statePortTable.set("Ethernet64.10", values, "SET", ""); + EXPECT_THROW(intfmgr.setHostSubIntfAdminStatus("Ethernet64.10", "up", "up"), std::runtime_error); + } } diff --git a/tests/mock_tests/intfsorch_ut.cpp b/tests/mock_tests/intfsorch_ut.cpp new file mode 100644 index 0000000000..ffbf348ed4 --- /dev/null +++ b/tests/mock_tests/intfsorch_ut.cpp @@ -0,0 +1,330 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#include "gtest/gtest.h" +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include +#include + + + +namespace intfsorch_test +{ + using namespace std; + + int create_rif_count = 0; + int remove_rif_count = 0; + sai_router_interface_api_t *pold_sai_rif_api; + sai_router_interface_api_t ut_sai_rif_api; + + sai_status_t _ut_create_router_interface( + _Out_ sai_object_id_t *router_interface_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + ++create_rif_count; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_remove_router_interface( + _In_ sai_object_id_t router_interface_id) + { + ++remove_rif_count; + return SAI_STATUS_SUCCESS; + } + + struct IntfsOrchTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + //sai_router_interface_api_t *old_sai_rif_api_ptr; + + //sai_create_router_interface_fn old_create_rif; + //sai_remove_router_interface_fn old_remove_rif; + void SetUp() override + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + pold_sai_rif_api = sai_router_intfs_api; + ut_sai_rif_api = *sai_router_intfs_api; + sai_router_intfs_api = &ut_sai_rif_api; + + sai_router_intfs_api->create_router_interface = _ut_create_router_interface; + sai_router_intfs_api->remove_router_interface = _ut_remove_router_interface; + + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + TableConnector stateDbBfdSessionTable(m_state_db.get(), STATE_BFD_SESSION_TABLE_NAME); + gBfdOrch = new BfdOrch(m_app_db.get(), APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + vector vnet_tables = { + APP_VNET_RT_TABLE_NAME, + APP_VNET_RT_TUNNEL_TABLE_NAME + }; + + vector cfg_vnet_tables = { + CFG_VNET_RT_TABLE_NAME, + CFG_VNET_RT_TUNNEL_TABLE_NAME + }; + + auto* vnet_orch = new VNetOrch(m_app_db.get(), APP_VNET_TABLE_NAME); + gDirectory.set(vnet_orch); + auto* cfg_vnet_rt_orch = new VNetCfgRouteOrch(m_config_db.get(), m_app_db.get(), cfg_vnet_tables); + gDirectory.set(cfg_vnet_rt_orch); + auto* vnet_rt_orch = new VNetRouteOrch(m_app_db.get(), vnet_tables, vnet_orch); + gDirectory.set(vnet_rt_orch); + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + auto* mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + gDirectory.set(mux_orch); + + ASSERT_EQ(gFgNhgOrch, nullptr); + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + + ASSERT_EQ(gSrv6Orch, nullptr); + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + + // Start FlowCounterRouteOrch + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + + ASSERT_EQ(gRouteOrch, nullptr); + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void TearDown() override + { + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gBfdOrch; + gBfdOrch = nullptr; + + delete gSrv6Orch; + gSrv6Orch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gFgNhgOrch; + gFgNhgOrch = nullptr; + + delete gRouteOrch; + gRouteOrch = nullptr; + + delete gNhgOrch; + gNhgOrch = nullptr; + + delete gBufferOrch; + gBufferOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gFlowCounterRouteOrch; + gFlowCounterRouteOrch = nullptr; + + sai_router_intfs_api = pold_sai_rif_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(IntfsOrchTest, IntfsOrchDeleteCreateRetry) + { + // create a interface + std::deque entries; + entries.push_back({"Ethernet0", "SET", { {"mtu", "9100"}}}); + auto consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count + 1, create_rif_count); + + // create dependency to the interface + gIntfsOrch->increaseRouterIntfsRefCount("Ethernet0"); + + // delete the interface, expect retry because dependency exists + entries.clear(); + entries.push_back({"Ethernet0", "DEL", { {} }}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + auto current_remove_count = remove_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_remove_count, remove_rif_count); + + // create the interface again, expect retry because interface is in removing + entries.clear(); + entries.push_back({"Ethernet0", "SET", { {"mtu", "9100"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + current_create_count = create_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count, create_rif_count); + + // remove the dependency, expect delete and create a new one + gIntfsOrch->decreaseRouterIntfsRefCount("Ethernet0"); + current_create_count = create_rif_count; + current_remove_count = remove_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count + 1, create_rif_count); + ASSERT_EQ(current_remove_count + 1, remove_rif_count); + } +} \ No newline at end of file diff --git a/tests/mock_tests/mock_orch_test.h b/tests/mock_tests/mock_orch_test.h new file mode 100644 index 0000000000..eefda42057 --- /dev/null +++ b/tests/mock_tests/mock_orch_test.h @@ -0,0 +1,305 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "gtest/gtest.h" +#include + +using namespace std; +using ::testing::Return; +using ::testing::Throw; + +namespace mock_orch_test +{ + static const string PEER_SWITCH_HOSTNAME = "peer_hostname"; + static const string PEER_IPV4_ADDRESS = "1.1.1.1"; + static const string ACTIVE_INTERFACE = "Ethernet4"; + static const string STANDBY_INTERFACE = "Ethernet8"; + static const string ACTIVE_STATE = "active"; + static const string STANDBY_STATE = "standby"; + static const string STATE = "state"; + static const string VLAN_1000 = "Vlan1000"; + static const string VLAN_2000 = "Vlan2000"; + static const string SERVER_IP1 = "192.168.0.2"; + static const string SERVER_IP2 = "192.168.0.3"; + static const string MAC1 = "62:f9:65:10:2f:01"; + static const string MAC2 = "62:f9:65:10:2f:02"; + static const string MAC3 = "62:f9:65:10:2f:03"; + + class MockOrchTest: public ::testing::Test + { + protected: + std::vector ut_orch_list; + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + MuxOrch *m_MuxOrch; + MuxCableOrch *m_MuxCableOrch; + MuxCable *m_MuxCable; + TunnelDecapOrch *m_TunnelDecapOrch; + MuxStateOrch *m_MuxStateOrch; + FlexCounterOrch *m_FlexCounterOrch; + VxlanTunnelOrch *m_VxlanTunnelOrch; + + virtual void ApplyInitialConfigs() {} + + void PrepareSai() + { + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + sai_status_t status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + /* Create a loopback underlay router interface */ + vector underlay_intf_attrs; + + sai_attribute_t underlay_intf_attr; + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + underlay_intf_attr.value.oid = gVirtualRouterId; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + underlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; + underlay_intf_attr.value.u32 = 9100; + underlay_intf_attrs.push_back(underlay_intf_attr); + + status = sai_router_intfs_api->create_router_interface(&gUnderlayIfId, gSwitchId, (uint32_t)underlay_intf_attrs.size(), underlay_intf_attrs.data()); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + virtual void PostSetUp() {}; + + void SetUp() override + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + PrepareSai(); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + m_FlexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(m_FlexCounterOrch); + ut_orch_list.push_back((Orch **)&m_FlexCounterOrch); + + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + gDirectory.set(gFlowCounterRouteOrch); + ut_orch_list.push_back((Orch **)&gFlowCounterRouteOrch); + + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + ut_orch_list.push_back((Orch **)&gVrfOrch); + + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + gDirectory.set(gIntfsOrch); + ut_orch_list.push_back((Orch **)&gIntfsOrch); + + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); + ut_orch_list.push_back((Orch **)&gPortsOrch); + + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + gDirectory.set(gFgNhgOrch); + ut_orch_list.push_back((Orch **)&gFgNhgOrch); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri } + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + gDirectory.set(gFdbOrch); + ut_orch_list.push_back((Orch **)&gFdbOrch); + + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + gDirectory.set(gNeighOrch); + ut_orch_list.push_back((Orch **)&gNeighOrch); + + m_TunnelDecapOrch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + gDirectory.set(m_TunnelDecapOrch); + ut_orch_list.push_back((Orch **)&m_TunnelDecapOrch); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + + vector buffer_tables = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + TableConnector stateDbSwitchTable(m_state_db.get(), STATE_SWITCH_CAPABILITY_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + vector policer_tables = { + TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + + TableConnector stateDbStorm(m_state_db.get(), STATE_BUM_STORM_CAPABILITY_TABLE_NAME); + gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); + gDirectory.set(gPolicerOrch); + ut_orch_list.push_back((Orch **)&gPolicerOrch); + + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + gDirectory.set(gSwitchOrch); + ut_orch_list.push_back((Orch **)&gSwitchOrch); + + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + gDirectory.set(gNhgOrch); + ut_orch_list.push_back((Orch **)&gNhgOrch); + + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gDirectory.set(gSrv6Orch); + ut_orch_list.push_back((Orch **)&gSrv6Orch); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + gDirectory.set(gCrmOrch); + ut_orch_list.push_back((Orch **)&gCrmOrch); + + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gDirectory.set(gRouteOrch); + ut_orch_list.push_back((Orch **)&gRouteOrch); + TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); + TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); + gDirectory.set(gMirrorOrch); + ut_orch_list.push_back((Orch **)&gMirrorOrch); + + TableConnector confDbAclTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME); + TableConnector confDbAclTableType(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector confDbAclRuleTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME); + TableConnector appDbAclTable(m_app_db.get(), APP_ACL_TABLE_TABLE_NAME); + TableConnector appDbAclTableType(m_app_db.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector appDbAclRuleTable(m_app_db.get(), APP_ACL_RULE_TABLE_NAME); + + vector acl_table_connectors = { + confDbAclTableType, + confDbAclTable, + confDbAclRuleTable, + appDbAclTable, + appDbAclRuleTable, + appDbAclTableType, + }; + gAclOrch = new AclOrch(acl_table_connectors, m_state_db.get(), + gSwitchOrch, gPortsOrch, gMirrorOrch, gNeighOrch, gRouteOrch, NULL); + gDirectory.set(gAclOrch); + ut_orch_list.push_back((Orch **)&gAclOrch); + + m_MuxOrch = new MuxOrch(m_config_db.get(), mux_tables, m_TunnelDecapOrch, gNeighOrch, gFdbOrch); + gDirectory.set(m_MuxOrch); + ut_orch_list.push_back((Orch **)&m_MuxOrch); + + m_MuxCableOrch = new MuxCableOrch(m_app_db.get(), m_state_db.get(), APP_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxCableOrch); + ut_orch_list.push_back((Orch **)&m_MuxCableOrch); + + m_MuxStateOrch = new MuxStateOrch(m_state_db.get(), STATE_HW_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxStateOrch); + ut_orch_list.push_back((Orch **)&m_MuxStateOrch); + + m_VxlanTunnelOrch = new VxlanTunnelOrch(m_state_db.get(), m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME); + gDirectory.set(m_VxlanTunnelOrch); + ut_orch_list.push_back((Orch **)&m_VxlanTunnelOrch); + + ApplyInitialConfigs(); + PostSetUp(); + } + + virtual void PreTearDown() {}; + + void TearDown() override + { + PreTearDown(); + for (std::vector::reverse_iterator rit = ut_orch_list.rbegin(); rit != ut_orch_list.rend(); ++rit) + { + Orch **orch = *rit; + delete *orch; + *orch = nullptr; + } + + gDirectory.m_values.clear(); + + ut_helper::uninitSaiApi(); + } + }; +} \ No newline at end of file diff --git a/tests/mock_tests/mock_orchagent_main.cpp b/tests/mock_tests/mock_orchagent_main.cpp index 62a03dc770..e709824707 100644 --- a/tests/mock_tests/mock_orchagent_main.cpp +++ b/tests/mock_tests/mock_orchagent_main.cpp @@ -1,6 +1,6 @@ extern "C" { -#include "sai.h" -#include "saistatus.h" +#include +#include } #include "orchdaemon.h" @@ -12,15 +12,6 @@ sai_object_id_t gSwitchId = SAI_NULL_OBJECT_ID; MacAddress gMacAddress; MacAddress gVxlanMacAddress; -#define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; - -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; string gMySwitchType = "switch"; int32_t gVoqMySwitchId = 0; string gMyHostName = "Linecard1"; diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 0acba4ef1c..850bcb7ed2 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -13,6 +13,9 @@ #define private public #include "bufferorch.h" #include "qosorch.h" +#define protected public +#include "pfcwdorch.h" +#undef protected #undef private #include "vrforch.h" #include "vnetorch.h" @@ -24,15 +27,10 @@ #include "muxorch.h" #include "nhgorch.h" #include "copporch.h" +#include "twamporch.h" #include "directory.h" extern int gBatchSize; -extern bool gSwssRecord; -extern bool gSairedisRecord; -extern bool gLogRotate; -extern bool gSaiRedisLogRotate; -extern ofstream gRecordOfs; -extern string gRecordFile; extern MacAddress gMacAddress; extern MacAddress gVxlanMacAddress; @@ -53,14 +51,18 @@ extern FdbOrch *gFdbOrch; extern MirrorOrch *gMirrorOrch; extern BufferOrch *gBufferOrch; extern QosOrch *gQosOrch; +template PfcWdSwOrch *gPfcwdOrch; extern VRFOrch *gVrfOrch; extern NhgOrch *gNhgOrch; extern Srv6Orch *gSrv6Orch; extern BfdOrch *gBfdOrch; +extern AclOrch *gAclOrch; +extern PolicerOrch *gPolicerOrch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; extern sai_switch_api_t *sai_switch_api; +extern sai_hash_api_t *sai_hash_api; extern sai_virtual_router_api_t *sai_virtual_router_api; extern sai_port_api_t *sai_port_api; extern sai_lag_api_t *sai_lag_api; @@ -71,6 +73,7 @@ extern sai_route_api_t *sai_route_api; extern sai_neighbor_api_t *sai_neighbor_api; extern sai_tunnel_api_t *sai_tunnel_api; extern sai_next_hop_api_t *sai_next_hop_api; +extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern sai_hostif_api_t *sai_hostif_api; extern sai_policer_api_t *sai_policer_api; extern sai_buffer_api_t *sai_buffer_api; @@ -83,3 +86,5 @@ extern sai_udf_api_t* sai_udf_api; extern sai_mpls_api_t* sai_mpls_api; extern sai_counter_api_t* sai_counter_api; extern sai_samplepacket_api_t *sai_samplepacket_api; +extern sai_fdb_api_t* sai_fdb_api; +extern sai_twamp_api_t* sai_twamp_api; diff --git a/tests/mock_tests/mock_sai_api.cpp b/tests/mock_tests/mock_sai_api.cpp new file mode 100644 index 0000000000..1f7e7e63ef --- /dev/null +++ b/tests/mock_tests/mock_sai_api.cpp @@ -0,0 +1,25 @@ +#include "mock_sai_api.h" + +std::set apply_mock_fns; +std::set remove_mock_fns; + +void MockSaiApis() +{ + if (apply_mock_fns.empty()) + { + EXPECT_TRUE(false) << "No mock application functions found. Did you call DEFINE_SAI_API_MOCK and INIT_SAI_API_MOCK for the necessary SAI object type?"; + } + + for (auto apply_fn : apply_mock_fns) + { + (*apply_fn)(); + } +} + +void RestoreSaiApis() +{ + for (auto remove_fn : remove_mock_fns) + { + (*remove_fn)(); + } +} \ No newline at end of file diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h new file mode 100644 index 0000000000..7819b5b126 --- /dev/null +++ b/tests/mock_tests/mock_sai_api.h @@ -0,0 +1,147 @@ +#ifndef MOCK_SAI_API_H +#define MOCK_SAI_API_H +#include "mock_orchagent_main.h" +#include + +/* +To mock a particular SAI API: +1. At the top of the test CPP file using the mock, call DEFINE_SAI_API_MOCK or DEFINE_SAI_GENERIC_API_MOCK + for each SAI API you want to mock. +2. At the top of the test CPP file using the mock, call EXTERN_MOCK_FNS. +3. In the SetUp method of the test class, call INIT_SAI_API_MOCK for each SAI API you want to mock. +4. In the SetUp method of the test class, call MockSaiApis. +5. In the TearDown method of the test class, call RestoreSaiApis. +*/ + +using ::testing::Return; +using ::testing::NiceMock; + +#define EXTERN_MOCK_FNS \ + extern std::set apply_mock_fns; \ + extern std::set remove_mock_fns; + +EXTERN_MOCK_FNS + +#define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list +#define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry +#define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list +#define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list +#define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id +#define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list +#define GENERIC_REMOVE_ARGS(sai_object_type) sai_object_type##_id + +/* +The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_object_type it is called on: +1. Create a pointer to store the original API +2. Create a new SAI_API where we can safely mock without affecting the original API +3. Define a class with mocked methods to create and remove the object type (to be used with gMock) +4. Create a pointer of the above class +5. Define two wrapper functions to create and remove the object type that has the same signature as the original SAI API function +6. Define a method to apply the mock +7. Define a method to remove the mock +*/ +#define DEFINE_SAI_API_MOCK(sai_object_type) \ + static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ + class mock_sai_##sai_object_type##_api_t \ + { \ + public: \ + mock_sai_##sai_object_type##_api_t() \ + { \ + ON_CALL(*this, create_##sai_object_type##_entry) \ + .WillByDefault( \ + [this](CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##_entry) \ + .WillByDefault( \ + [this](REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ + }); \ + } \ + MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ + }; \ + static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ + inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_object_type##_api_mock() \ + { \ + mock_sai_##sai_object_type##_api = new NiceMock(); \ + \ + old_sai_##sai_object_type##_api = sai_##sai_object_type##_api; \ + ut_sai_##sai_object_type##_api = *sai_##sai_object_type##_api; \ + sai_##sai_object_type##_api = &ut_sai_##sai_object_type##_api; \ + \ + sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ + sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ + } \ + inline void remove_sai_##sai_object_type##_api_mock() \ + { \ + sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ + delete mock_sai_##sai_object_type##_api; \ + } + +#define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t \ + { \ + public: \ + mock_sai_##sai_api_name##_api_t() \ + { \ + ON_CALL(*this, create_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + }); \ + } \ + MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ + }; \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_api_name##_api_mock() \ + { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + \ + sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ + sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ + } \ + inline void remove_sai_##sai_api_name##_api_mock() \ + { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ + } + +// Stores pointers to mock apply/remove functions to avoid needing to manually call each function +#define INIT_SAI_API_MOCK(sai_object_type) \ + apply_mock_fns.insert(&apply_sai_##sai_object_type##_api_mock); \ + remove_mock_fns.insert(&remove_sai_##sai_object_type##_api_mock); + +void MockSaiApis(); +void RestoreSaiApis(); +#endif \ No newline at end of file diff --git a/tests/mock_tests/mock_sai_bridge.h b/tests/mock_tests/mock_sai_bridge.h new file mode 100644 index 0000000000..8141ca66bb --- /dev/null +++ b/tests/mock_tests/mock_sai_bridge.h @@ -0,0 +1,34 @@ +// Define classes and functions to mock SAI bridge functions. +#pragma once + +#include + +extern "C" +{ +#include "sai.h" +} + +// Mock class including mock functions mapping to SAI bridge functions. +class MockSaiBridge +{ + public: + MOCK_METHOD4(create_bridge_port, sai_status_t(sai_object_id_t *bridge_port_id, + sai_object_id_t switch_id, + uint32_t attr_count, + const sai_attribute_t *attr_list)); +}; + +// Note that before mock functions below are used, mock_sai_bridge must be +// initialized to point to an instance of MockSaiBridge. +MockSaiBridge *mock_sai_bridge; + +sai_status_t mock_create_bridge_port(sai_object_id_t *bridge_port_id, + sai_object_id_t switch_id, + uint32_t attr_count, + const sai_attribute_t *attr_list) +{ + return mock_sai_bridge->create_bridge_port(bridge_port_id, switch_id, attr_count, attr_list); +} + + + diff --git a/tests/mock_tests/mock_subscriberstatetable.cpp b/tests/mock_tests/mock_subscriberstatetable.cpp new file mode 100644 index 0000000000..5548191940 --- /dev/null +++ b/tests/mock_tests/mock_subscriberstatetable.cpp @@ -0,0 +1,30 @@ +#include "subscriberstatetable.h" + +namespace swss +{ + SubscriberStateTable::SubscriberStateTable(DBConnector *db, const std::string &tableName, int popBatchSize, int pri) : + ConsumerTableBase(db, tableName, popBatchSize, pri), + m_table(db, tableName) + { + } + + void SubscriberStateTable::pops(std::deque &vkco, const std::string& /*prefix*/) + { + std::vector keys; + m_table.getKeys(keys); + for (const auto &key: keys) + { + KeyOpFieldsValuesTuple kco; + + kfvKey(kco) = key; + kfvOp(kco) = SET_COMMAND; + + if (!m_table.get(key, kfvFieldsValues(kco))) + { + continue; + } + m_table.del(key); + vkco.push_back(kco); + } + } +} diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp new file mode 100644 index 0000000000..933c27aca2 --- /dev/null +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -0,0 +1,246 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" +#include "gtest/gtest.h" +#include + +EXTERN_MOCK_FNS + +namespace mux_rollback_test +{ + DEFINE_SAI_API_MOCK(neighbor); + DEFINE_SAI_API_MOCK(route); + DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); + DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + using namespace std; + using namespace mock_orch_test; + using ::testing::Return; + using ::testing::Throw; + + static const string TEST_INTERFACE = "Ethernet4"; + + class MuxRollbackTest : public MockOrchTest + { + protected: + void SetMuxStateFromAppDb(std::string state) + { + Table mux_cable_table = Table(m_app_db.get(), APP_MUX_CABLE_TABLE_NAME); + mux_cable_table.set(TEST_INTERFACE, { { STATE, state } }); + m_MuxCableOrch->addExistingData(&mux_cable_table); + static_cast(m_MuxCableOrch)->doTask(); + } + + void SetAndAssertMuxState(std::string state) + { + m_MuxCable->setState(state); + EXPECT_EQ(state, m_MuxCable->getState()); + } + + void ApplyInitialConfigs() + { + Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); + Table tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); + Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); + Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + + auto ports = ut_helper::getInitialSaiPorts(); + port_table.set(TEST_INTERFACE, ports[TEST_INTERFACE]); + port_table.set("PortConfigDone", { { "count", to_string(1) } }); + port_table.set("PortInitDone", { {} }); + + neigh_table.set( + VLAN_1000 + neigh_table.getTableNameSeparator() + SERVER_IP1, { { "neigh", "62:f9:65:10:2f:04" }, + { "family", "IPv4" } }); + + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "00:aa:bb:cc:dd:ee" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + TEST_INTERFACE, + { { "tagging_mode", "untagged" } }); + + intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set( + VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/21", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, + { "dst_ip", "2.2.2.2" }, + { "ecn_mode", "copy_from_outer" }, + { "encap_ecn_mode", "standard" }, + { "ttl_mode", "pipe" }, + { "tunnel_type", "IPINIP" } }); + + peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); + + mux_cable_table.set(TEST_INTERFACE, { { "server_ipv4", SERVER_IP1 + "/32" }, + { "server_ipv6", "a::a/128" }, + { "state", "auto" } }); + + gPortsOrch->addExistingData(&port_table); + gPortsOrch->addExistingData(&vlan_table); + gPortsOrch->addExistingData(&vlan_member_table); + static_cast(gPortsOrch)->doTask(); + + gIntfsOrch->addExistingData(&intf_table); + static_cast(gIntfsOrch)->doTask(); + + m_TunnelDecapOrch->addExistingData(&tunnel_table); + static_cast(m_TunnelDecapOrch)->doTask(); + + m_MuxOrch->addExistingData(&peer_switch_table); + static_cast(m_MuxOrch)->doTask(); + + m_MuxOrch->addExistingData(&mux_cable_table); + static_cast(m_MuxOrch)->doTask(); + + gNeighOrch->addExistingData(&neigh_table); + static_cast(gNeighOrch)->doTask(); + + m_MuxCable = m_MuxOrch->getMuxCable(TEST_INTERFACE); + + // We always expect the mux to be initialized to standby + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + void PostSetUp() override + { + INIT_SAI_API_MOCK(neighbor); + INIT_SAI_API_MOCK(route); + INIT_SAI_API_MOCK(acl); + INIT_SAI_API_MOCK(next_hop); + MockSaiApis(); + } + + void PreTearDown() override + { + RestoreSaiApis(); + } + }; + + TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_route_api, create_route_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) + { + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE_STATE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY_STATE); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_route_api, create_route_entry) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) + { + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } +} diff --git a/tests/mock_tests/neighorch_ut.cpp b/tests/mock_tests/neighorch_ut.cpp new file mode 100644 index 0000000000..03957436a6 --- /dev/null +++ b/tests/mock_tests/neighorch_ut.cpp @@ -0,0 +1,198 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" + + +EXTERN_MOCK_FNS + +namespace neighorch_test +{ + DEFINE_SAI_API_MOCK(neighbor); + using namespace std; + using namespace mock_orch_test; + using ::testing::Return; + using ::testing::Throw; + + static const string TEST_IP = "10.10.10.10"; + static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); + static const NeighborEntry VLAN2000_NEIGH = NeighborEntry(TEST_IP, VLAN_2000); + + class NeighOrchTest: public MockOrchTest + { + protected: + void SetAndAssertMuxState(std::string interface, std::string state) + { + MuxCable* muxCable = m_MuxOrch->getMuxCable(interface); + muxCable->setState(state); + EXPECT_EQ(state, muxCable->getState()); + } + + void LearnNeighbor(std::string vlan, std::string ip, std::string mac) + { + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + string key = vlan + neigh_table.getTableNameSeparator() + ip; + neigh_table.set(key, { { "neigh", mac }, { "family", "IPv4" } }); + gNeighOrch->addExistingData(&neigh_table); + static_cast(gNeighOrch)->doTask(); + neigh_table.del(key); + } + + void ApplyInitialConfigs() + { + Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); + Table tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); + Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); + Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + Table fdb_table = Table(m_app_db.get(), APP_FDB_TABLE_NAME); + + auto ports = ut_helper::getInitialSaiPorts(); + port_table.set(ACTIVE_INTERFACE, ports[ACTIVE_INTERFACE]); + port_table.set(STANDBY_INTERFACE, ports[STANDBY_INTERFACE]); + port_table.set("PortConfigDone", { { "count", to_string(1) } }); + port_table.set("PortInitDone", { {} }); + + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "00:aa:bb:cc:dd:ee" } }); + vlan_table.set(VLAN_2000, { { "admin_status", "up"}, + { "mtu", "9100" }, + { "mac", "aa:11:bb:22:cc:33" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + ACTIVE_INTERFACE, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_2000 + vlan_member_table.getTableNameSeparator() + STANDBY_INTERFACE, + { { "tagging_mode", "untagged" } }); + + intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_2000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set( + VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + intf_table.set( + VLAN_2000 + neigh_table.getTableNameSeparator() + "192.168.2.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, + { "dst_ip", "2.2.2.2" }, + { "ecn_mode", "copy_from_outer" }, + { "encap_ecn_mode", "standard" }, + { "ttl_mode", "pipe" }, + { "tunnel_type", "IPINIP" } }); + + peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); + + mux_cable_table.set(ACTIVE_INTERFACE, { { "server_ipv4", SERVER_IP1 + "/32" }, + { "server_ipv6", "a::a/128" }, + { "state", "auto" } }); + + mux_cable_table.set(STANDBY_INTERFACE, { { "server_ipv4", SERVER_IP2+ "/32" }, + { "server_ipv6", "a::b/128" }, + { "state", "auto" } }); + + gPortsOrch->addExistingData(&port_table); + gPortsOrch->addExistingData(&vlan_table); + gPortsOrch->addExistingData(&vlan_member_table); + static_cast(gPortsOrch)->doTask(); + + gIntfsOrch->addExistingData(&intf_table); + static_cast(gIntfsOrch)->doTask(); + + m_TunnelDecapOrch->addExistingData(&tunnel_table); + static_cast(m_TunnelDecapOrch)->doTask(); + + m_MuxOrch->addExistingData(&peer_switch_table); + static_cast(m_MuxOrch)->doTask(); + + m_MuxOrch->addExistingData(&mux_cable_table); + static_cast(m_MuxOrch)->doTask(); + + fdb_table.set( + VLAN_1000 + fdb_table.getTableNameSeparator() + MAC1, + { { "type", "dynamic" }, + { "port", ACTIVE_INTERFACE } }); + + fdb_table.set( + VLAN_2000 + fdb_table.getTableNameSeparator() + MAC2, + { { "type", "dynamic" }, + { "port", STANDBY_INTERFACE} }); + + fdb_table.set( + VLAN_1000 + fdb_table.getTableNameSeparator() + MAC3, + { { "type", "dynamic" }, + { "port", ACTIVE_INTERFACE} }); + + gFdbOrch->addExistingData(&fdb_table); + static_cast(gFdbOrch)->doTask(); + + SetAndAssertMuxState(ACTIVE_INTERFACE, ACTIVE_STATE); + SetAndAssertMuxState(STANDBY_INTERFACE, STANDBY_STATE); + } + + void PostSetUp() override + { + INIT_SAI_API_MOCK(neighbor); + MockSaiApis(); + } + + void PreTearDown() override + { + RestoreSaiApis(); + } + }; + + TEST_F(NeighOrchTest, MultiVlanIpLearning) + { + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 0); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC3); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); + } + + TEST_F(NeighOrchTest, MultiVlanUnableToRemoveNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + NextHopKey nexthop = { TEST_IP, VLAN_1000 }; + gNeighOrch->m_syncdNextHops[nexthop].ref_count = 1; + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); + } +} diff --git a/tests/mock_tests/orchdaemon_ut.cpp b/tests/mock_tests/orchdaemon_ut.cpp new file mode 100644 index 0000000000..a107b6ba6d --- /dev/null +++ b/tests/mock_tests/orchdaemon_ut.cpp @@ -0,0 +1,52 @@ +#include "orchdaemon.h" +#include "dbconnector.h" +#include +#include +#include "mock_sai_switch.h" + +extern sai_switch_api_t* sai_switch_api; +sai_switch_api_t test_sai_switch; + +namespace orchdaemon_test +{ + + using ::testing::_; + using ::testing::Return; + using ::testing::StrictMock; + + DBConnector appl_db("APPL_DB", 0); + DBConnector state_db("STATE_DB", 0); + DBConnector config_db("CONFIG_DB", 0); + DBConnector counters_db("COUNTERS_DB", 0); + + class OrchDaemonTest : public ::testing::Test + { + public: + StrictMock mock_sai_switch_; + + OrchDaemon* orchd; + + OrchDaemonTest() + { + mock_sai_switch = &mock_sai_switch_; + sai_switch_api = &test_sai_switch; + sai_switch_api->get_switch_attribute = &mock_get_switch_attribute; + sai_switch_api->set_switch_attribute = &mock_set_switch_attribute; + + orchd = new OrchDaemon(&appl_db, &config_db, &state_db, &counters_db, nullptr); + + }; + + ~OrchDaemonTest() + { + sai_switch_api = nullptr; + }; + }; + + TEST_F(OrchDaemonTest, logRotate) + { + EXPECT_CALL(mock_sai_switch_, set_switch_attribute( _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + + orchd->logRotate(); + } +} diff --git a/tests/mock_tests/portal.h b/tests/mock_tests/portal.h index 8f0c4ab2db..31fa4ac4b7 100644 --- a/tests/mock_tests/portal.h +++ b/tests/mock_tests/portal.h @@ -7,6 +7,7 @@ #include "crmorch.h" #include "copporch.h" #include "sfloworch.h" +#include "twamporch.h" #include "directory.h" #undef protected @@ -81,6 +82,11 @@ struct Portal obj.getTrapIdsFromTrapGroup(trapGroupOid, trapIdList); return trapIdList; } + + static task_process_status processCoppRule(CoppOrch &obj, Consumer& processCoppRule) + { + return obj.processCoppRule(processCoppRule); + } }; struct SflowOrchInternal @@ -101,6 +107,19 @@ struct Portal } }; + struct TwampOrchInternal + { + static bool getTwampSessionStatus(TwampOrch &obj, const string &name, string& status) + { + return obj.getSessionStatus(name, status); + } + + static TwampStatsTable getTwampSessionStatistics(TwampOrch &obj) + { + return obj.m_twampStatistics; + } + }; + struct DirectoryInternal { template diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 012203c749..968a578d44 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -7,22 +7,37 @@ #include "mock_orchagent_main.h" #include "mock_table.h" #include "notifier.h" +#include "mock_sai_bridge.h" #define private public #include "pfcactionhandler.h" +#include "switchorch.h" +#include +#undef private +#define private public +#include "warm_restart.h" #undef private #include extern redisReply *mockReply; +using ::testing::_; +using ::testing::StrictMock; namespace portsorch_test { using namespace std; + // SAI default ports + std::map> defaultPortList; + sai_port_api_t ut_sai_port_api; sai_port_api_t *pold_sai_port_api; + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; bool not_support_fetching_fec; + uint32_t _sai_set_port_fec_count; + int32_t _sai_port_fec_mode; vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; sai_status_t _ut_stub_sai_get_port_attribute( @@ -48,6 +63,16 @@ namespace portsorch_test status = SAI_STATUS_SUCCESS; } } + else if (attr_count == 1 && attr_list[0].id == SAI_PORT_ATTR_OPER_PORT_FEC_MODE) + { + attr_list[0].value.s32 = _sai_port_fec_mode; + status = SAI_STATUS_SUCCESS; + } + else if (attr_count== 1 && attr_list[0].id == SAI_PORT_ATTR_OPER_STATUS) + { + attr_list[0].value.u32 = (uint32_t)SAI_PORT_OPER_STATUS_UP; + status = SAI_STATUS_SUCCESS; + } else { status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); @@ -55,8 +80,9 @@ namespace portsorch_test return status; } - uint32_t _sai_set_port_fec_count; - int32_t _sai_port_fec_mode; + uint32_t _sai_set_pfc_mode_count; + uint32_t _sai_set_admin_state_up_count; + uint32_t _sai_set_admin_state_down_count; sai_status_t _ut_stub_sai_set_port_attribute( _In_ sai_object_id_t port_id, _In_ const sai_attribute_t *attr) @@ -66,9 +92,47 @@ namespace portsorch_test _sai_set_port_fec_count++; _sai_port_fec_mode = attr[0].value.s32; } + else if (attr[0].id == SAI_PORT_ATTR_AUTO_NEG_MODE) + { + /* Simulating failure case */ + return SAI_STATUS_FAILURE; + } + else if (attr[0].id == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED) + { + _sai_set_pfc_mode_count++; + } + else if (attr[0].id == SAI_PORT_ATTR_ADMIN_STATE) + { + if (attr[0].value.booldata) { + _sai_set_admin_state_up_count++; + } else { + _sai_set_admin_state_down_count++; + } + } return pold_sai_port_api->set_port_attribute(port_id, attr); } + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + uint32_t _sai_switch_dlr_packet_action_count; + uint32_t _sai_switch_dlr_packet_action; + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count =+ 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + else if (attr[0].id == SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION) + { + _sai_switch_dlr_packet_action_count++; + _sai_switch_dlr_packet_action = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + void _hook_sai_port_api() { ut_sai_port_api = *sai_port_api; @@ -82,16 +146,29 @@ namespace portsorch_test { sai_port_api = pold_sai_port_api; } - + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + sai_queue_api_t ut_sai_queue_api; sai_queue_api_t *pold_sai_queue_api; int _sai_set_queue_attr_count = 0; - + sai_status_t _ut_stub_sai_set_queue_attribute(sai_object_id_t queue_id, const sai_attribute_t *attr) { if(attr->id == SAI_QUEUE_ATTR_PFC_DLR_INIT) { - if(attr->value.booldata == true) + if(attr->value.booldata == true) { _sai_set_queue_attr_count++; } @@ -103,17 +180,96 @@ namespace portsorch_test return SAI_STATUS_SUCCESS; } + uint32_t _sai_get_queue_attr_count; + bool _sai_mock_queue_attr = false; + sai_status_t _ut_stub_sai_get_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (_sai_mock_queue_attr) + { + _sai_get_queue_attr_count++; + for (auto i = 0u; i < attr_count; i++) + { + if (attr_list[i].id == SAI_QUEUE_ATTR_TYPE) + { + attr_list[i].value.s32 = static_cast(SAI_QUEUE_TYPE_UNICAST); + } + else if (attr_list[i].id == SAI_QUEUE_ATTR_INDEX) + { + attr_list[i].value.u8 = 0; + } + else + { + pold_sai_queue_api->get_queue_attribute(queue_id, 1, &attr_list[i]); + } + } + } + + return SAI_STATUS_SUCCESS; + } + void _hook_sai_queue_api() { + _sai_mock_queue_attr = true; ut_sai_queue_api = *sai_queue_api; pold_sai_queue_api = sai_queue_api; ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; sai_queue_api = &ut_sai_queue_api; } void _unhook_sai_queue_api() { sai_queue_api = pold_sai_queue_api; + _sai_mock_queue_attr = false; + } + + sai_bridge_api_t ut_sai_bridge_api; + sai_bridge_api_t *org_sai_bridge_api; + + void _hook_sai_bridge_api() + { + ut_sai_bridge_api = *sai_bridge_api; + org_sai_bridge_api = sai_bridge_api; + sai_bridge_api = &ut_sai_bridge_api; + } + + void _unhook_sai_bridge_api() + { + sai_bridge_api = org_sai_bridge_api; + } + + void cleanupPorts(PortsOrch *obj) + { + // Get CPU port + Port p; + obj->getCpuPort(p); + + // Get port list + auto portList = obj->getAllPorts(); + portList.erase(p.m_alias); + + // Generate port config + std::deque kfvList; + + for (const auto &cit : portList) + { + kfvList.push_back({ cit.first, DEL_COMMAND, { } }); + } + + // Refill consumer + auto consumer = dynamic_cast(obj->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(obj)->doTask(); + + // Dump pending tasks + std::vector taskList; + obj->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); } struct PortsOrchTest : public ::testing::Test @@ -147,11 +303,23 @@ namespace portsorch_test ::testing_db::reset(); // Create dependencies ... + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); const int portsorch_base_pri = 40; vector ports_tables = { { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_SEND_TO_INGRESS_PORT_TABLE_NAME, portsorch_base_pri + 5 }, { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, @@ -196,6 +364,62 @@ namespace portsorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + vector pfc_wd_tables = { + CFG_PFC_WD_TABLE_NAME + }; + + static const vector portStatIds = + { + SAI_PORT_STAT_PFC_0_RX_PKTS, + SAI_PORT_STAT_PFC_1_RX_PKTS, + SAI_PORT_STAT_PFC_2_RX_PKTS, + SAI_PORT_STAT_PFC_3_RX_PKTS, + SAI_PORT_STAT_PFC_4_RX_PKTS, + SAI_PORT_STAT_PFC_5_RX_PKTS, + SAI_PORT_STAT_PFC_6_RX_PKTS, + SAI_PORT_STAT_PFC_7_RX_PKTS, + SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS, + }; + + static const vector queueStatIds = + { + SAI_QUEUE_STAT_PACKETS, + SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES, + }; + + static const vector queueAttrIds = + { + SAI_QUEUE_ATTR_PAUSE_STATUS, + }; + ASSERT_EQ((gPfcwdOrch), nullptr); + gPfcwdOrch = new PfcWdSwOrch(m_config_db.get(), pfc_wd_tables, portStatIds, queueStatIds, queueAttrIds, 100); + } virtual void TearDown() override @@ -218,10 +442,17 @@ namespace portsorch_test gPortsOrch = nullptr; delete gBufferOrch; gBufferOrch = nullptr; + delete gPfcwdOrch; + gPfcwdOrch = nullptr; + delete gQosOrch; + gQosOrch = nullptr; + delete gSwitchOrch; + gSwitchOrch = nullptr; // clear orchs saved in directory gDirectory.m_values.clear(); } + static void SetUpTestCase() { // Init switch and create dependencies @@ -257,6 +488,10 @@ namespace portsorch_test ASSERT_EQ(status, SAI_STATUS_SUCCESS); gVirtualRouterId = attr.value.oid; + + // Get SAI default ports + defaultPortList = ut_helper::getInitialSaiPorts(); + ASSERT_TRUE(!defaultPortList.empty()); } static void TearDownTestCase() @@ -270,6 +505,597 @@ namespace portsorch_test }; + /* + * Test port flap count + */ + TEST_F(PortsOrchTest, PortFlapCount) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port, expect the oper status is not UP + Port port; + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 0); + + auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + + // mock a redis reply for notification, it notifies that Ehernet0 is going to up + for (uint32_t count=0; count < 5; count++) { + sai_port_oper_status_t oper_status = (count % 2 == 0) ? SAI_PORT_OPER_STATUS_UP : SAI_PORT_OPER_STATUS_DOWN; + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_port_oper_status_notification_t port_oper_status; + port_oper_status.port_state = oper_status; + port_oper_status.port_id = port.m_port_id; + std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); + std::vector notifyValues; + FieldValueTuple opdata("port_state_change", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + consumer->readData(); + gPortsOrch->doTask(*consumer); + mockReply = nullptr; + + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status == oper_status); + ASSERT_TRUE(port.m_flap_count == count+1); + } + + cleanupPorts(gPortsOrch); + } + + TEST_F(PortsOrchTest, PortBulkCreateRemove) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (std::uint32_t idx1 = 0, idx2 = 1; idx1 < ports.size() * 4; idx1 += 4, idx2++) + { + std::stringstream key; + key << FRONT_PANEL_PORT_PREFIX << idx1; + + std::stringstream alias; + alias << "etp" << idx2; + + std::stringstream index; + index << idx2; + + std::stringstream lanes; + lanes << idx1 << "," << idx1 + 1 << "," << idx1 + 2 << "," << idx1 + 3; + + std::vector fvList = { + { "alias", alias.str() }, + { "index", index.str() }, + { "lanes", lanes.str() }, + { "speed", "100000" }, + { "autoneg", "off" }, + { "adv_speeds", "all" }, + { "interface_type", "none" }, + { "adv_interface_types", "all" }, + { "fec", "rs" }, + { "mtu", "9100" }, + { "tpid", "0x8100" }, + { "pfc_asym", "off" }, + { "admin_status", "up" }, + { "description", "FP port" } + }; + + portTable.set(key.str(), fvList); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + + // Cleanup ports + cleanupPorts(gPortsOrch); + } + + TEST_F(PortsOrchTest, PortBasicConfig) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Generate port config + std::deque kfvList = {{ + "Ethernet0", + SET_COMMAND, { + { "speed", "100000" }, + { "autoneg", "on" }, + { "adv_speeds", "1000,10000,100000" }, + { "interface_type", "CR" }, + { "adv_interface_types", "CR,CR2,CR4,CR8" }, + { "fec", "fc" }, + { "mtu", "9100" }, + { "tpid", "0x9100" }, + { "pfc_asym", "on" }, + { "link_training", "on" }, + { "admin_status", "up" } + } + }}; + + // Refill consumer + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Get port + Port p; + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + + // Verify speed + ASSERT_EQ(p.m_speed, 100000); + + // Verify auto-negotiation + ASSERT_TRUE(p.m_autoneg); + + // Verify advertised speed + std::set adv_speeds = { 1000, 10000, 100000 }; + ASSERT_EQ(p.m_adv_speeds, adv_speeds); + + // Verify interface type + ASSERT_EQ(p.m_interface_type, SAI_PORT_INTERFACE_TYPE_CR); + + // Verify advertised interface type + std::set adv_interface_types = { + SAI_PORT_INTERFACE_TYPE_CR, + SAI_PORT_INTERFACE_TYPE_CR2, + SAI_PORT_INTERFACE_TYPE_CR4, + SAI_PORT_INTERFACE_TYPE_CR8 + }; + ASSERT_EQ(p.m_adv_interface_types, adv_interface_types); + + // Verify FEC + ASSERT_EQ(p.m_fec_mode, SAI_PORT_FEC_MODE_FC); + + // Verify MTU + ASSERT_EQ(p.m_mtu, 9100); + + // Verify TPID + ASSERT_EQ(p.m_tpid, 0x9100); + + // Verify asymmetric PFC + ASSERT_EQ(p.m_pfc_asym, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE); + + // Verify link training + ASSERT_TRUE(p.m_link_training); + + // Verify admin status + ASSERT_TRUE(p.m_admin_state_up); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + + // Cleanup ports + cleanupPorts(gPortsOrch); + } + + TEST_F(PortsOrchTest, PortAdvancedConfig) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Generate port serdes config + std::deque kfvList = {{ + "Ethernet0", + SET_COMMAND, { + { "preemphasis", "0xcad0,0xc6e0,0xc6e0,0xd2b0" }, + { "idriver", "0x5,0x3,0x4,0x1" }, + { "ipredriver", "0x1,0x4,0x3,0x5" }, + { "pre1", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "pre2", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "pre3", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "main", "0x90,0x92,0x91,0x93" }, + { "post1", "0x10,0x12,0x11,0x13" }, + { "post2", "0x10,0x12,0x11,0x13" }, + { "post3", "0x10,0x12,0x11,0x13" }, + { "attn", "0x80,0x82,0x81,0x83" }, + { "ob_m2lp", "0x4,0x6,0x5,0x7" }, + { "ob_alev_out", "0xf,0x11,0x10,0x12" }, + { "obplev", "0x69,0x6b,0x6a,0x6c" }, + { "obnlev", "0x5f,0x61,0x60,0x62" }, + { "regn_bfm1p", "0x1e,0x20,0x1f,0x21" }, + { "regn_bfm1n", "0xaa,0xac,0xab,0xad" } + } + }}; + + // Refill consumer + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Get port + Port p; + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + + // Verify preemphasis + std::vector preemphasis = { 0xcad0, 0xc6e0, 0xc6e0, 0xd2b0 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_PREEMPHASIS), preemphasis); + + // Verify idriver + std::vector idriver = { 0x5, 0x3, 0x4, 0x1 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IDRIVER), idriver); + + // Verify ipredriver + std::vector ipredriver = { 0x1, 0x4, 0x3, 0x5 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IPREDRIVER), ipredriver); + + // Verify pre1 + std::vector pre1 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1), pre1); + + // Verify pre2 + std::vector pre2 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2), pre2); + + // Verify pre3 + std::vector pre3 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3), pre3); + + // Verify main + std::vector main = { 0x90, 0x92, 0x91, 0x93 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN), main); + + // Verify post1 + std::vector post1 = { 0x10, 0x12, 0x11, 0x13 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST1), post1); + + // Verify post2 + std::vector post2 = { 0x10, 0x12, 0x11, 0x13 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST2), post2); + + // Verify post3 + std::vector post3 = { 0x10, 0x12, 0x11, 0x13 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST3), post3); + + // Verify attn + std::vector attn = { 0x80, 0x82, 0x81, 0x83 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN), attn); + + // Verify ob_m2lp + std::vector ob_m2lp = { 0x4, 0x6, 0x5, 0x7 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO), ob_m2lp); + + // Verify ob_alev_out + std::vector ob_alev_out = { 0xf, 0x11, 0x10, 0x12 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE), ob_alev_out); + + // Verify obplev + std::vector obplev = { 0x69, 0x6b, 0x6a, 0x6c }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE), obplev); + + // Verify obnlev + std::vector obnlev = { 0x5f, 0x61, 0x60, 0x62 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE), obnlev); + + // Verify regn_bfm1p + std::vector regn_bfm1p = { 0x1e, 0x20, 0x1f, 0x21 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG), regn_bfm1p); + + // Verify regn_bfm1n + std::vector regn_bfm1n = { 0xaa, 0xac, 0xab, 0xad }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG), regn_bfm1n); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + + // Cleanup ports + cleanupPorts(gPortsOrch); + } + + /** + * Test that verifies admin-disable then admin-enable during setPortSerdesAttribute() + */ + TEST_F(PortsOrchTest, PortSerdesConfig) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Generate basic port config + std::deque kfvBasic = {{ + "Ethernet0", + SET_COMMAND, { + { "speed", "100000" }, + { "fec", "rs" }, + { "mtu", "9100" }, + { "admin_status", "up" } + } + }}; + + // Refill consumer + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(kfvBasic); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Get port and verify admin status + Port p; + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + ASSERT_TRUE(p.m_admin_state_up); + + // Generate port serdes config + std::deque kfvSerdes = {{ + "Ethernet0", + SET_COMMAND, { + { "idriver" , "0x6,0x6,0x6,0x6" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvSerdes); + + _hook_sai_port_api(); + uint32_t current_sai_api_call_count = _sai_set_admin_state_down_count; + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + _unhook_sai_port_api(); + + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + ASSERT_TRUE(p.m_admin_state_up); + + // Verify idriver + std::vector idriver = { 0x6, 0x6, 0x6, 0x6 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IDRIVER), idriver); + + // Verify admin-disable then admin-enable + ASSERT_EQ(_sai_set_admin_state_down_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_set_admin_state_up_count, current_sai_api_call_count); + + // Configure non-serdes attribute that does not trigger admin state change + std::deque kfvMtu = {{ + "Ethernet0", + SET_COMMAND, { + { "mtu", "1234" }, + } + }}; + + // Refill consumer + consumer->addToSync(kfvMtu); + + _hook_sai_port_api(); + current_sai_api_call_count = _sai_set_admin_state_down_count; + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + _unhook_sai_port_api(); + + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + ASSERT_TRUE(p.m_admin_state_up); + + // Verify mtu is set + ASSERT_EQ(p.m_mtu, 1234); + + // Verify no admin-disable then admin-enable + ASSERT_EQ(_sai_set_admin_state_down_count, current_sai_api_call_count); + ASSERT_EQ(_sai_set_admin_state_up_count, current_sai_api_call_count); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + + // Cleanup ports + cleanupPorts(gPortsOrch); + } + + /** + * Test that verifies PortsOrch::getPort() on a port that has been deleted + */ + TEST_F(PortsOrchTest, GetPortTest) + { + _hook_sai_queue_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + Port port; + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", port)); + ASSERT_NE(port.m_port_id, SAI_NULL_OBJECT_ID); + + // Get queue info + string type; + uint8_t index; + auto queue_id = port.m_queue_ids[0]; + auto ut_sai_get_queue_attr_count = _sai_get_queue_attr_count; + gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); + ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(index, 0); + type = ""; + index = 255; + gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); + ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(index, 0); + ASSERT_EQ(++ut_sai_get_queue_attr_count, _sai_get_queue_attr_count); + + // Delete port + entries.push_back({"Ethernet0", "DEL", {}}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_FALSE(gPortsOrch->getPort(port.m_port_id, port)); + ASSERT_EQ(gPortsOrch->m_queueInfo.find(queue_id), gPortsOrch->m_queueInfo.end()); + _unhook_sai_queue_api(); + } + + /** + * Test case: PortsOrch::addBridgePort() does not add router port to .1Q bridge + */ + TEST_F(PortsOrchTest, addBridgePortOnRouterPort) + { + _hook_sai_bridge_api(); + + StrictMock mock_sai_bridge_; + mock_sai_bridge = &mock_sai_bridge_; + sai_bridge_api->create_bridge_port = mock_create_bridge_port; + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port and set its rif id to simulate it is router port + Port port; + gPortsOrch->getPort("Ethernet0", port); + port.m_rif_id = 1; + + ASSERT_FALSE(gPortsOrch->addBridgePort(port)); + EXPECT_CALL(mock_sai_bridge_, create_bridge_port(_, _, _, _)).Times(0); + + _unhook_sai_bridge_api(); + } + TEST_F(PortsOrchTest, PortSupportedFecModes) { _hook_sai_port_api(); @@ -433,9 +1259,143 @@ namespace portsorch_test _unhook_sai_port_api(); } + /* + * Test case: Fetching SAI_PORT_ATTR_OPER_PORT_FEC_MODE + **/ + TEST_F(PortsOrchTest, PortVerifyOperFec) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table statePortTable = Table(m_state_db.get(), STATE_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + auto old_mock_port_fec_modes = mock_port_fec_modes; + mock_port_fec_modes.clear(); + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + gPortsOrch->oper_fec_sup = true; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + Port port; + gPortsOrch->getPort("Ethernet0", port); + + sai_port_fec_mode_t fec_mode; + gPortsOrch->getPortOperFec(port, fec_mode); + + ASSERT_EQ(fec_mode, SAI_PORT_FEC_MODE_RS); + + gPortsOrch->refreshPortStatus(); + std::vector values; + statePortTable.get("Ethernet0", values); + bool fec_found = false; + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "fec") + { + fec_found = true; + ASSERT_TRUE(fvValue(valueTuple) == "rs"); + } + } + ASSERT_TRUE(fec_found == true); + + /*Mock an invalid fec mode with high value*/ + _sai_port_fec_mode = 100; + gPortsOrch->refreshPortStatus(); + statePortTable.get("Ethernet0", values); + fec_found = false; + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "fec") + { + fec_found = true; + ASSERT_TRUE(fvValue(valueTuple) == "N/A"); + } + } + mock_port_fec_modes = old_mock_port_fec_modes; + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortTestSAIFailureHandling) + { + _hook_sai_port_api(); + _hook_sai_switch_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + + entries.push_back({"Ethernet0", "SET", + { + {"autoneg", "on"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + ASSERT_DEATH({static_cast(gPortsOrch)->doTask();}, ""); + + ASSERT_EQ(*_sai_syncd_notifications_count, 1); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + _unhook_sai_port_api(); + _unhook_sai_switch_api(); + } + TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table sendToIngressPortTable = Table(m_app_db.get(), APP_SEND_TO_INGRESS_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table pgTableCfg = Table(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); @@ -489,6 +1449,8 @@ namespace portsorch_test // Set PortConfigDone portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + // Populate send to ingresss port table + sendToIngressPortTable.set("SEND_TO_INGRESS", {{"NULL", "NULL"}}); // refill consumer gPortsOrch->addExistingData(&portTable); @@ -500,7 +1462,6 @@ namespace portsorch_test // create ports static_cast(gBufferOrch)->doTask(); - static_cast(gPortsOrch)->doTask(); // Ports are not ready yet @@ -545,6 +1506,7 @@ namespace portsorch_test Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table transceieverInfoTable = Table(m_state_db.get(), STATE_TRANSCEIVER_INFO_TABLE_NAME); // Get SAI default ports to populate DB @@ -578,6 +1540,7 @@ namespace portsorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + transceieverInfoTable.set(it.first, {}); } // Set PortConfigDone, PortInitDone @@ -585,6 +1548,12 @@ namespace portsorch_test portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); portTable.set("PortInitDone", { { "lanes", "0" } }); + // warm start, initialize ports ready list + + WarmStart::getInstance().m_enabled = true; + gBufferOrch->initBufferReadyLists(m_app_db.get(), m_config_db.get()); + WarmStart::getInstance().m_enabled = false; + // warm start, bake fill refill consumer gBufferOrch->bake(); @@ -619,10 +1588,30 @@ namespace portsorch_test gBufferOrch->dumpPendingTasks(ts); ASSERT_TRUE(ts.empty()); + + // Verify port configuration + vector port_list; + port_list.resize(ports.size()); + sai_attribute_t attr; + sai_status_t status; + attr.id = SAI_SWITCH_ATTR_PORT_LIST; + attr.value.objlist.count = static_cast(port_list.size()); + attr.value.objlist.list = port_list.data(); + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + for (uint32_t i = 0; i < port_list.size(); i++) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + status = sai_port_api->get_port_attribute(port_list[i], 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_TRUE(attr.value.booldata); + } } TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute) { + _hook_sai_port_api(); _hook_sai_queue_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); @@ -666,14 +1655,161 @@ namespace portsorch_test // Simulate storm drop handler started on Ethernet0 TC 3 Port port; gPortsOrch->getPort("Ethernet0", port); + auto current_pfc_mode_count = _sai_set_pfc_mode_count; auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + ASSERT_EQ(current_pfc_mode_count, _sai_set_pfc_mode_count); ASSERT_TRUE(_sai_set_queue_attr_count == 1); dropHandler.reset(); + ASSERT_EQ(current_pfc_mode_count, _sai_set_pfc_mode_count); ASSERT_FALSE(_sai_set_queue_attr_count == 1); _unhook_sai_queue_api(); + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, PfcDlrPacketAction) + { + _hook_sai_switch_api(); + std::deque entries; + sai_packet_action_t dlr_packet_action; + gSwitchOrch->m_PfcDlrInitEnable = true; + gPfcwdOrch->m_platform = BRCM_PLATFORM_SUBSTRING; + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfgPfcwdTable = Table(m_config_db.get(), CFG_PFC_WD_TABLE_NAME); + Table cfgPortQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + entries.clear(); + entries.push_back({"Ethernet0", "SET", + { + {"pfc_enable", "3,4"}, + {"pfcwd_sw_enable", "3,4"} + }}); + entries.push_back({"Ethernet8", "SET", + { + {"pfc_enable", "3,4"}, + {"pfcwd_sw_enable", "3,4"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // create pfcwd entry for first port with drop action + dlr_packet_action = SAI_PACKET_ACTION_DROP; + entries.push_back({"GLOBAL", "SET", + { + {"POLL_INTERVAL", "200"}, + }}); + entries.push_back({"Ethernet0", "SET", + { + {"action", "drop"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + + auto PfcwdConsumer = dynamic_cast(gPfcwdOrch->getExecutor(CFG_PFC_WD_TABLE_NAME)); + PfcwdConsumer->addToSync(entries); + entries.clear(); + + auto current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ(++current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + ASSERT_EQ(_sai_switch_dlr_packet_action, dlr_packet_action); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + // create pfcwd entry for second port with drop action + entries.push_back({"Ethernet8", "SET", + { + {"action", "drop"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + PfcwdConsumer->addToSync(entries); + entries.clear(); + current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + // verify no change in count + ASSERT_EQ(current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + + // remove both the entries + entries.push_back({"Ethernet0", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + entries.push_back({"Ethernet8", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + + // create pfcwd entry for first port with forward action + dlr_packet_action = SAI_PACKET_ACTION_FORWARD; + entries.push_back({"Ethernet0", "SET", + { + {"action", "forward"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + + PfcwdConsumer->addToSync(entries); + entries.clear(); + + current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ(++current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + ASSERT_EQ(_sai_switch_dlr_packet_action, dlr_packet_action); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + // remove the entry + entries.push_back({"Ethernet0", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 0); + + _unhook_sai_switch_api(); } TEST_F(PortsOrchTest, PfcZeroBufferHandler) @@ -969,6 +2105,7 @@ namespace portsorch_test gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 1); std::vector values; portTable.get("Ethernet0", values); diff --git a/tests/mock_tests/portsyncd/portsyncd_ut.cpp b/tests/mock_tests/portsyncd/portsyncd_ut.cpp index a7aaf0f9f8..f97a80e3d6 100644 --- a/tests/mock_tests/portsyncd/portsyncd_ut.cpp +++ b/tests/mock_tests/portsyncd/portsyncd_ut.cpp @@ -28,6 +28,7 @@ extern std::string mockCmdStdcout; extern std::vector mockCallArgs; std::set g_portSet; bool g_init = false; +std::string g_switchType; void writeToApplDB(swss::ProducerStateTable &p, swss::DBConnector &cfgDb) { diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 13454cee56..713238e9cd 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -25,6 +25,7 @@ namespace qosorch_test int sai_remove_scheduler_count; int sai_set_wred_attribute_count; sai_object_id_t switch_dscp_to_tc_map_id; + TunnelDecapOrch *tunnel_decap_orch; sai_remove_scheduler_fn old_remove_scheduler; sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; @@ -36,6 +37,14 @@ namespace qosorch_test sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; sai_set_switch_attribute_fn old_set_switch_attribute_fn; sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + sai_tunnel_api_t ut_sai_tunnel_api, *pold_sai_tunnel_api; + + typedef struct + { + sai_uint32_t green_max_drop_probability; + sai_uint32_t yellow_max_drop_probability; + sai_uint32_t red_max_drop_probability; + } qos_wred_max_drop_probability_t; sai_status_t _ut_stub_sai_set_switch_attribute(sai_object_id_t switch_id, const sai_attribute_t *attr) { @@ -55,6 +64,7 @@ namespace qosorch_test bool testing_wred_thresholds; WredMapHandler::qos_wred_thresholds_t saiThresholds; + qos_wred_max_drop_probability_t saiMaxDropProbabilities; void _ut_stub_sai_check_wred_attributes(const sai_attribute_t &attr) { if (!testing_wred_thresholds) @@ -88,6 +98,15 @@ namespace qosorch_test ASSERT_TRUE(!saiThresholds.red_max_threshold || saiThresholds.red_max_threshold > attr.value.u32); saiThresholds.red_min_threshold = attr.value.u32; break; + case SAI_WRED_ATTR_GREEN_DROP_PROBABILITY: + saiMaxDropProbabilities.green_max_drop_probability = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY: + saiMaxDropProbabilities.yellow_max_drop_probability = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_DROP_PROBABILITY: + saiMaxDropProbabilities.red_max_drop_probability = attr.value.u32; + break; default: break; } @@ -132,6 +151,23 @@ namespace qosorch_test ASSERT_TRUE(ts.empty()); } + void updateMaxDropProbabilityAndCheck(string name, vector &maxDropProbabilityVector, qos_wred_max_drop_probability_t &maxDropProbabilities) + { + std::deque entries; + vector ts; + entries.push_back({name, "SET", maxDropProbabilityVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + saiMaxDropProbabilities.green_max_drop_probability = 0; + saiMaxDropProbabilities.yellow_max_drop_probability = 0; + saiMaxDropProbabilities.red_max_drop_probability = 0; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(saiMaxDropProbabilities.green_max_drop_probability, maxDropProbabilities.green_max_drop_probability); + ASSERT_EQ(saiMaxDropProbabilities.yellow_max_drop_probability, maxDropProbabilities.yellow_max_drop_probability); + ASSERT_EQ(saiMaxDropProbabilities.red_max_drop_probability, maxDropProbabilities.red_max_drop_probability); + } + sai_status_t _ut_stub_sai_create_wred( _Out_ sai_object_id_t *wred_id, _In_ sai_object_id_t switch_id, @@ -178,6 +214,40 @@ namespace qosorch_test return rc; } + sai_status_t _ut_stub_sai_create_tunnel( + _Out_ sai_object_id_t *tunnel_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_tunnel_term_table_entry( + _Out_ sai_object_id_t *tunnel_term_table_entry_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_term_table_entry_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + void checkTunnelAttribute(sai_attr_id_t attr) + { + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_ENCAP_ECN_MODE); + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_DECAP_ECN_MODE); + } + + sai_status_t _ut_stub_sai_set_tunnel_attribute( + _In_ sai_object_id_t tunnel_id, + _In_ const sai_attribute_t *attr) + { + checkTunnelAttribute(attr->id); + return SAI_STATUS_ATTR_NOT_SUPPORTED_0; + } + struct QosOrchTest : public ::testing::Test { QosOrchTest() @@ -258,6 +328,14 @@ namespace qosorch_test sai_switch_api = &ut_sai_switch_api; ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + // Mock tunnel API + pold_sai_tunnel_api = sai_tunnel_api; + ut_sai_tunnel_api = *pold_sai_tunnel_api; + sai_tunnel_api = &ut_sai_tunnel_api; + ut_sai_tunnel_api.set_tunnel_attribute = _ut_stub_sai_set_tunnel_attribute; + ut_sai_tunnel_api.create_tunnel = _ut_stub_sai_create_tunnel; + ut_sai_tunnel_api.create_tunnel_term_table_entry = _ut_stub_sai_create_tunnel_term_table_entry; + // Init switch and create dependencies m_app_db = make_shared("APPL_DB", 0); m_config_db = make_shared("CONFIG_DB", 0); @@ -347,6 +425,9 @@ namespace qosorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + ASSERT_EQ(tunnel_decap_orch, nullptr); + tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, CFG_SCHEDULER_TABLE_NAME, @@ -360,7 +441,8 @@ namespace qosorch_test CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, - CFG_EXP_TO_FC_MAP_TABLE_NAME + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; gQosOrch = new QosOrch(m_config_db.get(), qos_tables); @@ -523,10 +605,14 @@ namespace qosorch_test delete gQosOrch; gQosOrch = nullptr; + delete tunnel_decap_orch; + tunnel_decap_orch = nullptr; + sai_qos_map_api = pold_sai_qos_map_api; sai_scheduler_api = pold_sai_scheduler_api; sai_wred_api = pold_sai_wred_api; sai_switch_api = pold_sai_switch_api; + sai_tunnel_api = pold_sai_tunnel_api; ut_helper::uninitSaiApi(); } }; @@ -1000,6 +1086,8 @@ namespace qosorch_test entries.clear(); // Drain QUEUE table static_cast(gQosOrch)->doTask(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); // The dependency should be removed CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME); static_cast(gQosOrch)->dumpPendingTasks(ts); @@ -1337,4 +1425,167 @@ namespace qosorch_test testing_wred_thresholds = false; } + + TEST_F(QosOrchTest, QosOrchTestWredDropProbability) + { + testing_wred_thresholds = true; + + // The order of fields matters when the wred profile is updated from the upper set to the lower set + // It should be max, min for each color. In this order, the new max is less then the current min + // QoS orchagent should guarantee that the new min is configured first and then new max + vector greenProfile = { + {"wred_green_enable", "true"}, + {"wred_yellow_enable", "false"}, + }; + qos_wred_max_drop_probability_t greenProbabilities = { + 100, // green_max_drop_probability + 0, // yellow_max_drop_probability + 0 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("green_default", greenProfile, greenProbabilities); + + greenProfile.push_back({"green_drop_probability", "5"}); + greenProbabilities.green_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("green", greenProfile, greenProbabilities); + + vector yellowProfile = { + {"wred_yellow_enable", "true"}, + {"wred_red_enable", "false"}, + }; + qos_wred_max_drop_probability_t yellowProbabilities = { + 0, // green_max_drop_probability + 100, // yellow_max_drop_probability + 0 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("yellow_default", yellowProfile, yellowProbabilities); + + yellowProfile.push_back({"yellow_drop_probability", "5"}); + yellowProbabilities.yellow_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("yellow", yellowProfile, yellowProbabilities); + + vector redProfile = { + {"wred_green_enable", "false"}, + {"wred_red_enable", "true"}, + }; + qos_wred_max_drop_probability_t redProbabilities = { + 0, // green_max_drop_probability + 0, // yellow_max_drop_probability + 100 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("red_default", redProfile, redProbabilities); + + redProfile.push_back({"red_drop_probability", "5"}); + redProbabilities.red_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("red", redProfile, redProbabilities); + + testing_wred_thresholds = false; + } + + + /* + * Make sure empty fields won't cause orchagent crash + */ + TEST_F(QosOrchTest, QosOrchTestEmptyField) + { + // Create a new dscp to tc map + std::deque entries; + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", ""} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", ""} + }}); + entries.push_back({"Ethernet0|4", "SET", + { + {"wred_profile", ""} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + } + + /* + * Set tunnel QoS attribute test - OA should skip settings + */ + TEST_F(QosOrchTest, QosOrchTestSetTunnelQoSAttribute) + { + // Create a new dscp to tc map + Table tcToDscpMapTable = Table(m_config_db.get(), CFG_TC_TO_DSCP_MAP_TABLE_NAME); + tcToDscpMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + gQosOrch->addExistingData(&tcToDscpMapTable); + static_cast(gQosOrch)->doTask(); + + std::deque entries; + entries.push_back({"MuxTunnel0", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"decap_tc_to_pg_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + auto consumer = dynamic_cast(tunnel_decap_orch->getExecutor(APP_TUNNEL_DECAP_TABLE_NAME)); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set an attribute that is not supported by vendor + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_tc_to_pg_map", "AZURE"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel0", "SET", + { + {"encap_ecn_mode", "standard"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel1", "SET", + { + {"ecn_mode", "copy_from_outer"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + } } diff --git a/tests/mock_tests/response_publisher/response_publisher_ut.cpp b/tests/mock_tests/response_publisher/response_publisher_ut.cpp new file mode 100644 index 0000000000..9e836bad04 --- /dev/null +++ b/tests/mock_tests/response_publisher/response_publisher_ut.cpp @@ -0,0 +1,32 @@ +#include "response_publisher.h" + +#include + +using namespace swss; + +TEST(ResponsePublisher, TestPublish) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} + +TEST(ResponsePublisher, TestPublishBuffered) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.setBuffered(true); + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + publisher.flush(); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 2c1c4b8535..bd2108a683 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -7,10 +7,14 @@ #include "ut_helper.h" #include "mock_orchagent_main.h" #include "mock_table.h" +#include "mock_response_publisher.h" #include "bulker.h" extern string gMySwitchType; +extern std::unique_ptr gMockResponsePublisher; + +using ::testing::_; namespace routeorch_test { @@ -178,6 +182,7 @@ namespace routeorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME @@ -193,6 +198,10 @@ namespace routeorch_test ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + + EvpnNvoOrch *evpn_orch = new EvpnNvoOrch(m_app_db.get(), APP_VXLAN_EVPN_NVO_TABLE_NAME); + gDirectory.set(evpn_orch); ASSERT_EQ(gIntfsOrch, nullptr); gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); @@ -278,10 +287,18 @@ namespace routeorch_test static_cast(gPortsOrch)->doTask(); Table intfTable = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Loopback0", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Loopback0:10.1.0.32/32", { { "scope", "global" }, + { "family", "IPv4" }}); intfTable.set("Ethernet0", { {"NULL", "NULL" }, {"mac_addr", "00:00:00:00:00:00" }}); intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, { "family", "IPv4" }}); + intfTable.set("Ethernet4", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet4:11.0.0.1/32", { { "scope", "global" }, + { "family", "IPv4" }}); gIntfsOrch->addExistingData(&intfTable); static_cast(gIntfsOrch)->doTask(); @@ -321,6 +338,9 @@ namespace routeorch_test delete gIntfsOrch; gIntfsOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; + delete gNeighOrch; gNeighOrch = nullptr; @@ -330,9 +350,6 @@ namespace routeorch_test delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; - delete gRouteOrch; gRouteOrch = nullptr; @@ -422,4 +439,104 @@ namespace routeorch_test ASSERT_EQ(current_set_count + 1, set_route_count); ASSERT_EQ(sai_fail_count, 0); } + + TEST_F(RouteOrchTest, RouteOrchTestSetDelResponse) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "2.2.2.0/24"; + std::vector fvs{{"ifname", "Ethernet0,Ethernet0"}, {"nexthop", "10.0.0.2,10.0.0.3"}, {"protocol", "bgp"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + // add entries again to the consumer queue (in case of rapid DEL/SET operations from fpmsyncd, routeorch just gets the last SET update) + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + entries.clear(); + + // Route deletion + + entries.clear(); + entries.push_back({key, "DEL", {}}); + + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(RouteOrchTest, RouteOrchSetFullMaskSubnetPrefix) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "11.0.0.1/32"; + std::vector fvs{{"ifname", "Ethernet4"}, {"nexthop", "0.0.0.0"}, {"protocol", "bgp"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(RouteOrchTest, RouteOrchLoopbackRoute) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "fc00:1::/64"; + std::vector fvs{{"ifname", "Loopback"}, {"nexthop", "::"}, {"protocol", "static"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "static"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(RouteOrchTest, RouteOrchTestInvalidEvpnRoute) + { + std::deque entries; + entries.push_back({"Vrf1", "SET", { {"vni", "500100"}, {"v4", "true"}}}); + auto consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + + entries.clear(); + entries.push_back({"Vrf1:1.1.1.0/24", "SET", { {"ifname", "Ethernet0,Ethernet0"}, + {"nexthop", "10.0.0.2,10.0.0.3"}, + {"vni_label", "500100"}, + {"router_mac", "7e:f0:c0:e4:b2:5a,7e:f0:c0:e4:b2:5b"}}}); + entries.push_back({"Vrf1:2.1.1.0/24", "SET", { {"ifname", "Ethernet0,Ethernet0"}, + {"nexthop", "10.0.0.2,10.0.0.3"}, + {"vni_label", "500100,500100"}, + {"router_mac", "7e:f0:c0:e4:b2:5b"}}}); + consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + auto current_create_count = create_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } } diff --git a/tests/mock_tests/sflowmgrd_ut.cpp b/tests/mock_tests/sflowmgrd_ut.cpp new file mode 100644 index 0000000000..7e47b162f2 --- /dev/null +++ b/tests/mock_tests/sflowmgrd_ut.cpp @@ -0,0 +1,320 @@ +#include "gtest/gtest.h" +#include "mock_table.h" +#include "redisutility.h" +#include "sflowmgr.h" + +namespace sflowmgr_ut +{ + using namespace swss; + using namespace std; + + struct SflowMgrTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_sflowMgr; + SflowMgrTest() + { + m_app_db = make_shared( + "APPL_DB", 0); + m_config_db = make_shared( + "CONFIG_DB", 0); + m_state_db = make_shared( + "STATE_DB", 0); + } + + virtual void SetUp() override + { + ::testing_db::reset(); + TableConnector conf_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + TableConnector state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + TableConnector conf_sflow_table(m_config_db.get(), CFG_SFLOW_TABLE_NAME); + TableConnector conf_sflow_session_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + + vector sflow_tables = { + conf_port_table, + state_port_table, + conf_sflow_table, + conf_sflow_session_table + }; + m_sflowMgr.reset(new SflowMgr(m_app_db.get(), sflow_tables)); + } + + void enableSflow() + { + Table cfg_sflow(m_config_db.get(), CFG_SFLOW_TABLE_NAME); + cfg_sflow.set("global", { + {"admin_state", "up"} + }); + m_sflowMgr->addExistingData(&cfg_sflow); + m_sflowMgr->doTask(); + } + + void cfgSflowSession(string alias, bool status, string sample_rate, string direction = "") + { + Table cfg_sflow_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + vector values; + values.emplace_back("admin_state", status ? "up" : "down"); + if (!sample_rate.empty()) + { + values.emplace_back("sample_rate", sample_rate); + } + if (!direction.empty()) + { + values.emplace_back("sample_direction", direction); + } + cfg_sflow_table.set(alias, values); + m_sflowMgr->addExistingData(&cfg_sflow_table); + m_sflowMgr->doTask(); + } + + void cfgSflowSessionAll(bool status) + { + Table cfg_sflow_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + cfg_sflow_table.set("all", { + {"admin_state", status ? "up" : "down"}, + }); + m_sflowMgr->addExistingData(&cfg_sflow_table); + m_sflowMgr->doTask(); + } + + void cfgPortSpeed(string alias, string speed) + { + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + cfg_port_table.set(alias, { + {"speed", speed} + }); + m_sflowMgr->addExistingData(&cfg_port_table); + m_sflowMgr->doTask(); + } + + void statePortSpeed(string alias, string speed) + { + Table state_port_table(m_config_db.get(), STATE_PORT_TABLE_NAME); + state_port_table.set(alias, { + {"speed", speed} + }); + m_sflowMgr->addExistingData(&state_port_table); + m_sflowMgr->doTask(); + } + + string getSflowSampleRate(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "sample_rate", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return ""; + } + + string getSflowSampleDir(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "sample_direction", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return ""; + } + + string getSflowAdminStatus(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "admin_state", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return "down"; + } + }; + + TEST_F(SflowMgrTest, test_RateConfiguration) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_RateConfigurationCfgSpeed) + { + enableSflow(); + /* Configure the Speed to 100G */ + cfgPortSpeed("Ethernet0", "100000"); + + /* Scenario: Operational Speed Changes to 100G with autoneg */ + statePortSpeed("Ethernet0", "100000"); + + /* User changes the config speed to 10G */ + cfgPortSpeed("Ethernet0", "10000"); + + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 10G, with autoneg */ + statePortSpeed("Ethernet0", "10000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "10000"); + + /* Configured speed is updated by user */ + cfgPortSpeed("Ethernet0", "200000"); + + /* Sampling Rate will not be updated */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "10000"); + } + + TEST_F(SflowMgrTest, test_OnlyStateDbNotif) + { + enableSflow(); + statePortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + } + + TEST_F(SflowMgrTest, test_LocalRateConfiguration) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSession("Ethernet0", true, "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_LocalRateConfWithOperSpeed) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + + /* Set per interface sampling rate*/ + cfgSflowSession("Ethernet0", true, "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + + /* Operational Speed Changes again to 50000 */ + statePortSpeed("Ethernet0", "50000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_newSpeed) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "800000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "800000"); + } + + TEST_F(SflowMgrTest, test_CfgSpeedAdminCfg) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSessionAll(false); /* Disable sflow on all interfaces*/ + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + cfgSflowSession("Ethernet0", true, ""); /* Set local admin up with no rate */ + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Sampling rate should adhere to config speed*/ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + cfgPortSpeed("Ethernet0", "25000"); /* Change cfg speed */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + } + + TEST_F(SflowMgrTest, test_OperSpeedAdminCfg) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSessionAll(false); /* Disable sflow on all interfaces*/ + cfgSflowSession("Ethernet0", true, ""); /* Set local admin up with no rate */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + statePortSpeed("Ethernet0", "50000"); + /* Sampling rate should adhere to oper speed*/ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Change cfg speed */ + cfgPortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + + statePortSpeed("Ethernet0", "1000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "1000"); + + cfgSflowSession("Ethernet0", true, "12345"); /* Set local sampling rate */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Change oper speed now */ + statePortSpeed("Ethernet0", "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_SflowCfgAfterPortCfg) + { + cfgPortSpeed("Ethernet0", "100000"); + /* Nothing is written yet since cfg is not enabled */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* State DB is updated with oper speed */ + statePortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* enable sflow */ + enableSflow(); + cfgSflowSessionAll(true); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_SflowCfgAfterOperSpeed) + { + cfgPortSpeed("Ethernet0", "100000"); + /* Nothing is written yet since cfg is not enabled */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* State DB is updated with oper speed */ + statePortSpeed("Ethernet0", "50000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* enable sflow */ + cfgSflowSessionAll(true); + enableSflow(); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_RateConfigEgressDir) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + /* Set local admin up with no rate and no egress direction */ + cfgSflowSession("Ethernet0", true, "", "tx"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "tx"); + } +} diff --git a/tests/mock_tests/teammgrd/teammgr_ut.cpp b/tests/mock_tests/teammgrd/teammgr_ut.cpp new file mode 100644 index 0000000000..32f064f552 --- /dev/null +++ b/tests/mock_tests/teammgrd/teammgr_ut.cpp @@ -0,0 +1,78 @@ +#include "gtest/gtest.h" +#include "../mock_table.h" +#include "teammgr.h" + +extern int (*callback)(const std::string &cmd, std::string &stdout); +extern std::vector mockCallArgs; + +int cb(const std::string &cmd, std::string &stdout) +{ + mockCallArgs.push_back(cmd); + if (cmd.find("/usr/bin/teamd -r -t PortChannel1") != std::string::npos) + { + return 1; + } + else if (cmd.find("cat \"/var/run/teamd/PortChannel1.pid\"") != std::string::npos) + { + stdout = "1234"; + return 0; + } + return 0; +} + +namespace teammgr_ut +{ + struct TeamMgrTest : public ::testing::Test + { + std::shared_ptr m_config_db; + std::shared_ptr m_app_db; + std::shared_ptr m_state_db; + std::vector cfg_lag_tables; + + virtual void SetUp() override + { + testing_db::reset(); + m_config_db = std::make_shared("CONFIG_DB", 0); + m_app_db = std::make_shared("APPL_DB", 0); + m_state_db = std::make_shared("STATE_DB", 0); + + swss::Table metadata_table = swss::Table(m_config_db.get(), CFG_DEVICE_METADATA_TABLE_NAME); + std::vector vec; + vec.emplace_back("mac", "01:23:45:67:89:ab"); + metadata_table.set("localhost", vec); + + TableConnector conf_lag_table(m_config_db.get(), CFG_LAG_TABLE_NAME); + TableConnector conf_lag_member_table(m_config_db.get(), CFG_LAG_MEMBER_TABLE_NAME); + TableConnector state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + + std::vector tables = { + conf_lag_table, + conf_lag_member_table, + state_port_table + }; + + cfg_lag_tables = tables; + mockCallArgs.clear(); + callback = cb; + } + }; + + TEST_F(TeamMgrTest, testProcessKilledAfterAddLagFailure) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel1", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "lacp_key", "auto" }, + { "min_links", "2" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + int kill_cmd_called = 0; + for (auto cmd : mockCallArgs){ + if (cmd.find("kill -TERM 1234") != std::string::npos){ + kill_cmd_called++; + } + } + ASSERT_EQ(kill_cmd_called, 1); + } +} \ No newline at end of file diff --git a/tests/mock_tests/test_failure_handling.cpp b/tests/mock_tests/test_failure_handling.cpp new file mode 100644 index 0000000000..7381f4015e --- /dev/null +++ b/tests/mock_tests/test_failure_handling.cpp @@ -0,0 +1,91 @@ +#include "saihelper.h" +#include "ut_helper.h" +#include + +extern sai_switch_api_t *sai_switch_api; + +namespace saifailure_test +{ + struct SaiFailureTest : public ::testing::Test + { + }; + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + sai_switch_api_t *pold_sai_switch_api; + sai_switch_api_t ut_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count = *_sai_syncd_notifications_count + 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_switch_api() + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + ut_helper::uninitSaiApi(); + } + + TEST_F(SaiFailureTest, handleSaiFailure) + { + _hook_sai_switch_api(); + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_FDB, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + _unhook_sai_switch_api(); + } +} diff --git a/tests/mock_tests/twamporch_ut.cpp b/tests/mock_tests/twamporch_ut.cpp new file mode 100644 index 0000000000..721950e74a --- /dev/null +++ b/tests/mock_tests/twamporch_ut.cpp @@ -0,0 +1,975 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "notifier.h" + +extern string gMySwitchType; + +extern sai_object_id_t gSwitchId; + +extern redisReply *mockReply; + + +namespace twamporch_test +{ + using namespace std; + + int create_twamp_session_count; + int set_twamp_session_count; + int remove_twamp_session_count; + + sai_twamp_api_t ut_sai_twamp_api; + sai_twamp_api_t *pold_sai_twamp_api; + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + + sai_create_twamp_session_fn old_create_twamp_session; + sai_remove_twamp_session_fn old_remove_twamp_session; + sai_set_twamp_session_attribute_fn old_set_twamp_session_attribute; + + sai_status_t _ut_stub_sai_create_twamp_session( + _Out_ sai_object_id_t *twamp_session_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *twamp_session_id = (sai_object_id_t)(0x1); + create_twamp_session_count++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_twamp_session( + _In_ sai_object_id_t twamp_session_id) + { + remove_twamp_session_count++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_set_twamp_session_attribute( + _In_ sai_object_id_t twamp_session_id, + _In_ const sai_attribute_t *attr) + { + set_twamp_session_count++; + if (attr->id == SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT) + { + return SAI_STATUS_SUCCESS; + } + return old_set_twamp_session_attribute(twamp_session_id, attr); + } + + sai_status_t _ut_stub_sai_get_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1) + { + if (attr_list[0].id == SAI_SWITCH_ATTR_MAX_TWAMP_SESSION) + { + attr_list[0].value.u32 = 128; + return SAI_STATUS_SUCCESS; + } + } + return pold_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY) + { + return SAI_STATUS_SUCCESS; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_twamp_api() + { + ut_sai_twamp_api = *sai_twamp_api; + pold_sai_twamp_api = sai_twamp_api; + ut_sai_twamp_api.create_twamp_session = _ut_stub_sai_create_twamp_session; + ut_sai_twamp_api.remove_twamp_session = _ut_stub_sai_remove_twamp_session; + ut_sai_twamp_api.set_twamp_session_attribute = _ut_stub_sai_set_twamp_session_attribute; + sai_twamp_api = &ut_sai_twamp_api; + } + + void _unhook_sai_twamp_api() + { + sai_twamp_api = pold_sai_twamp_api; + } + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.get_switch_attribute = _ut_stub_sai_get_switch_attribute; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + + class MockTwampOrch final + { + public: + MockTwampOrch() + { + this->confDb = std::make_shared("CONFIG_DB", 0); + TableConnector confDbTwampTable(this->confDb.get(), CFG_TWAMP_SESSION_TABLE_NAME); + TableConnector stateDbTwampTable(this->confDb.get(), STATE_TWAMP_SESSION_TABLE_NAME); + this->twampOrch = std::make_shared(confDbTwampTable, stateDbTwampTable, gSwitchOrch, gPortsOrch, gVrfOrch); + } + ~MockTwampOrch() = default; + + void doTwampTableTask(const std::deque &entries) + { + auto consumer = dynamic_cast((this->twampOrch.get())->getExecutor(CFG_TWAMP_SESSION_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(this->twampOrch.get())->doTask(*consumer); + } + + void doTwampNotificationTask() + { + auto exec = static_cast((this->twampOrch.get())->getExecutor("TWAMP_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + consumer->readData(); + static_cast(this->twampOrch.get())->doTask(*consumer); + } + + TwampOrch& get() + { + return *twampOrch; + } + + private: + std::shared_ptr confDb; + std::shared_ptr twampOrch; + }; + + class TwampOrchTest : public ::testing::Test + { + public: + TwampOrchTest() + { + this->initDb(); + } + virtual ~TwampOrchTest() = default; + + void SetUp() override + { + this->initSaiApi(); + this->initSwitch(); + this->initOrch(); + this->initPorts(); + _hook_sai_twamp_api(); + _hook_sai_switch_api(); + } + + void TearDown() override + { + this->deinitOrch(); + this->deinitSwitch(); + this->deinitSaiApi(); + _unhook_sai_twamp_api(); + _unhook_sai_switch_api(); + } + + private: + void initSaiApi() + { + std::map profileMap = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + auto status = ut_helper::initSaiApi(profileMap); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void deinitSaiApi() + { + auto status = ut_helper::uninitSaiApi(); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitch() + { + sai_status_t status; + sai_attribute_t attr; + + // Create switch + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get switch default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + } + + void deinitSwitch() + { + // Remove switch + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gSwitchId = SAI_NULL_OBJECT_ID; + gVirtualRouterId = SAI_NULL_OBJECT_ID; + } + + void initOrch() + { + // + // SwitchOrch + // + TableConnector state_switch_table(this->stateDb.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(this->appDb.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(this->configDb.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + std::vector switchTableList = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(this->appDb.get(), switchTableList, state_switch_table); + gDirectory.set(gSwitchOrch); + resourcesList.push_back(gSwitchOrch); + + // + // PortsOrch + // + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(this->appDb.get(), this->stateDb.get(), ports_tables, this->chassisAppDb.get()); + gDirectory.set(gPortsOrch); + resourcesList.push_back(gPortsOrch); + + // + // VrfOrch + // + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(this->appDb.get(), APP_VRF_TABLE_NAME, this->stateDb.get(), STATE_VRF_OBJECT_TABLE_NAME); + resourcesList.push_back(gVrfOrch); + + + // + // BufferOrch + // + std::vector bufferTableList = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(this->appDb.get(), this->configDb.get(), this->stateDb.get(), bufferTableList); + gDirectory.set(gBufferOrch); + resourcesList.push_back(gBufferOrch); + + // + // FlexCounterOrch + // + std::vector flexCounterTableList = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto flexCounterOrch = new FlexCounterOrch(this->configDb.get(), flexCounterTableList); + gDirectory.set(flexCounterOrch); + resourcesList.push_back(flexCounterOrch); + + // + // CrmOrch + // + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(this->configDb.get(), CFG_CRM_TABLE_NAME); + gDirectory.set(gCrmOrch); + resourcesList.push_back(gCrmOrch); + } + + void deinitOrch() + { + std::reverse(resourcesList.begin(), resourcesList.end()); + for (auto &it : resourcesList) + { + delete it; + } + + gSwitchOrch = nullptr; + gPortsOrch = nullptr; + gVrfOrch = nullptr; + gBufferOrch = nullptr; + gCrmOrch = nullptr; + + Portal::DirectoryInternal::clear(gDirectory); + EXPECT_TRUE(Portal::DirectoryInternal::empty(gDirectory)); + } + + void initPorts() + { + auto portTable = Table(this->appDb.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Set PortInitDone + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void initDb() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->configDb = std::make_shared("CONFIG_DB", 0); + this->stateDb = std::make_shared("STATE_DB", 0); + this->countersDb = make_shared("COUNTERS_DB", 0); + this->chassisAppDb = make_shared("CHASSIS_APP_DB", 0); + this->asicDb = make_shared("ASIC_DB", 0); + } + + shared_ptr appDb; + shared_ptr configDb; + shared_ptr stateDb; + shared_ptr countersDb; + shared_ptr chassisAppDb; + shared_ptr asicDb; + + std::vector resourcesList; + }; + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderPacketCountSingle) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "863" }, + {"packet_count", "1000" }, + {"tx_interval", "10" }, + {"timeout", "10" }, + {"statistics_interval", "20000" }, + {"vrf_name", "default" }, + {"dscp", "0" }, + {"ttl", "10" }, + {"timestamp_format", "ntp" }, + {"padding_size", "100" }, + {"hw_lookup", "true" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Process Notification + { + // mock a redis reply for notification + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_twamp_session_event_notification_data_t twamp_session_data; + sai_twamp_session_stat_t counters_ids[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t counters[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + twamp_session_data.session_state = SAI_TWAMP_SESSION_STATE_INACTIVE; + twamp_session_data.twamp_session_id = (sai_object_id_t)0x1; + twamp_session_data.session_stats.index = 1; + twamp_session_data.session_stats.number_of_counters = 11; + + counters_ids[0] = SAI_TWAMP_SESSION_STAT_RX_PACKETS; + counters_ids[1] = SAI_TWAMP_SESSION_STAT_RX_BYTE; + counters_ids[2] = SAI_TWAMP_SESSION_STAT_TX_PACKETS; + counters_ids[3] = SAI_TWAMP_SESSION_STAT_TX_BYTE; + counters_ids[4] = SAI_TWAMP_SESSION_STAT_DROP_PACKETS; + counters_ids[5] = SAI_TWAMP_SESSION_STAT_MAX_LATENCY; + counters_ids[6] = SAI_TWAMP_SESSION_STAT_MIN_LATENCY; + counters_ids[7] = SAI_TWAMP_SESSION_STAT_AVG_LATENCY; + counters_ids[8] = SAI_TWAMP_SESSION_STAT_MAX_JITTER; + counters_ids[9] = SAI_TWAMP_SESSION_STAT_MIN_JITTER; + counters_ids[10] = SAI_TWAMP_SESSION_STAT_AVG_JITTER; + counters[0] = 1000; + counters[1] = 100000; + counters[2] = 1000; + counters[3] = 100000; + counters[4] = 0; + counters[5] = 1987; + counters[6] = 1983; + counters[7] = 1984; + counters[8] = 2097; + counters[9] = 1896; + counters[10] = 1985; + twamp_session_data.session_stats.counters_ids = counters_ids; + twamp_session_data.session_stats.counters = counters; + + std::string data = sai_serialize_twamp_session_event_ntf(1, &twamp_session_data); + + std::vector notifyValues; + FieldValueTuple opdata("twamp_session_event", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + twampOrch.doTwampNotificationTask(); + mockReply = nullptr; + + TwampStatsTable twampStatistics = Portal::TwampOrchInternal::getTwampSessionStatistics(twampOrch.get()); + ASSERT_TRUE(twampStatistics.find(twampSessionName) != twampStatistics.end()); + ASSERT_EQ(twampStatistics[twampSessionName].rx_packets, 1000); + ASSERT_EQ(twampStatistics[twampSessionName].rx_bytes, 100000); + ASSERT_EQ(twampStatistics[twampSessionName].tx_packets, 1000); + ASSERT_EQ(twampStatistics[twampSessionName].tx_bytes, 100000); + ASSERT_EQ(twampStatistics[twampSessionName].drop_packets, 0); + ASSERT_EQ(twampStatistics[twampSessionName].max_latency, 1987); + ASSERT_EQ(twampStatistics[twampSessionName].min_latency, 1983); + ASSERT_EQ(twampStatistics[twampSessionName].avg_latency, 1984); + ASSERT_EQ(twampStatistics[twampSessionName].max_jitter, 2097); + ASSERT_EQ(twampStatistics[twampSessionName].min_jitter, 1896); + ASSERT_EQ(twampStatistics[twampSessionName].avg_jitter, 1985); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderPacketCountMulti) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "1862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "1863" }, + {"packet_count", "1000" }, + {"tx_interval", "10" }, + {"timeout", "10" }, + {"statistics_interval", "11000" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Process Notification + { + sai_twamp_session_event_notification_data_t twamp_session_data; + sai_twamp_session_stat_t counters_ids[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t counters[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t latency_total = 0; + uint64_t jitter_total = 0; + twamp_session_data.twamp_session_id = (sai_object_id_t)0x1; + twamp_session_data.session_stats.number_of_counters = 11; + counters_ids[0] = SAI_TWAMP_SESSION_STAT_RX_PACKETS; + counters_ids[1] = SAI_TWAMP_SESSION_STAT_RX_BYTE; + counters_ids[2] = SAI_TWAMP_SESSION_STAT_TX_PACKETS; + counters_ids[3] = SAI_TWAMP_SESSION_STAT_TX_BYTE; + counters_ids[4] = SAI_TWAMP_SESSION_STAT_DROP_PACKETS; + counters_ids[5] = SAI_TWAMP_SESSION_STAT_MAX_LATENCY; + counters_ids[6] = SAI_TWAMP_SESSION_STAT_MIN_LATENCY; + counters_ids[7] = SAI_TWAMP_SESSION_STAT_AVG_LATENCY; + counters_ids[8] = SAI_TWAMP_SESSION_STAT_MAX_JITTER; + counters_ids[9] = SAI_TWAMP_SESSION_STAT_MIN_JITTER; + counters_ids[10] = SAI_TWAMP_SESSION_STAT_AVG_JITTER; + twamp_session_data.session_stats.counters_ids = counters_ids; + twamp_session_data.session_stats.counters = counters; + for (uint8_t i = 1; i <= 10; i++) + { + // mock a redis reply for notification + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + + twamp_session_data.session_state = (i<10) ? SAI_TWAMP_SESSION_STATE_ACTIVE : SAI_TWAMP_SESSION_STATE_INACTIVE; + twamp_session_data.session_stats.index = i; + counters[0] = 100; + counters[1] = 10000; + counters[2] = 100; + counters[3] = 10000; + counters[4] = 0; + counters[5] = 1000+i; + counters[6] = 1000+i; + counters[7] = 1000+i; + counters[8] = 1100+i; + counters[9] = 1100+i; + counters[10] = 1100+i; + latency_total += counters[7]; + jitter_total += counters[10]; + + std::string data = sai_serialize_twamp_session_event_ntf(1, &twamp_session_data); + + std::vector notifyValues; + FieldValueTuple opdata("twamp_session_event", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + twampOrch.doTwampNotificationTask(); + mockReply = nullptr; + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + if (i<10) + { + ASSERT_EQ(session_status, "active"); + } + else + { + ASSERT_EQ(session_status, "inactive"); + } + + TwampStatsTable twampStatistics = Portal::TwampOrchInternal::getTwampSessionStatistics(twampOrch.get()); + ASSERT_TRUE(twampStatistics.find(twampSessionName) != twampStatistics.end()); + ASSERT_EQ(twampStatistics[twampSessionName].rx_packets, 100*i); + ASSERT_EQ(twampStatistics[twampSessionName].rx_bytes, 10000*i); + ASSERT_EQ(twampStatistics[twampSessionName].tx_packets, 100*i); + ASSERT_EQ(twampStatistics[twampSessionName].tx_bytes, 10000*i); + ASSERT_EQ(twampStatistics[twampSessionName].drop_packets, 0); + ASSERT_EQ(twampStatistics[twampSessionName].max_latency, 1000+i); + ASSERT_EQ(twampStatistics[twampSessionName].min_latency, 1000+1); + ASSERT_EQ(twampStatistics[twampSessionName].avg_latency, latency_total/i); + ASSERT_EQ(twampStatistics[twampSessionName].max_jitter, 1100+i); + ASSERT_EQ(twampStatistics[twampSessionName].min_jitter, 1100+1); + ASSERT_EQ(twampStatistics[twampSessionName].avg_jitter, jitter_total/i); + } + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderContinuousSingle) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "863" }, + {"monitor_time", "60" }, + {"tx_interval", "100" }, + {"timeout", "10" }, + {"statistics_interval", "60000" }, + {"vrf_name", "default" }, + {"dscp", "0" }, + {"ttl", "10" }, + {"timestamp_format", "ntp" }, + {"padding_size", "100" }, + {"hw_lookup", "true" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderContinuousMulti) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "1862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "1863" }, + {"monitor_time", "0" }, + {"tx_interval", "100" }, + {"timeout", "10" }, + {"statistics_interval", "20000" }, + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Stop TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "disabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_set_count + 2, set_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 2, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteReflector) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT"}, + {"role", "REFLECTOR"}, + {"src_ip", "1.1.1.1"}, + {"src_udp_port", "862"}, + {"dst_ip", "2.2.2.2"}, + {"dst_udp_port", "863"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count, set_twamp_session_count); + } +} \ No newline at end of file diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index 40594cc32c..c9bed67691 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -64,6 +64,7 @@ namespace ut_helper } sai_api_query(SAI_API_SWITCH, (void **)&sai_switch_api); + sai_api_query(SAI_API_HASH, (void **)&sai_hash_api); sai_api_query(SAI_API_BRIDGE, (void **)&sai_bridge_api); sai_api_query(SAI_API_VIRTUAL_ROUTER, (void **)&sai_virtual_router_api); sai_api_query(SAI_API_SAMPLEPACKET, (void **)&sai_samplepacket_api); @@ -75,6 +76,7 @@ namespace ut_helper sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); sai_api_query(SAI_API_TUNNEL, (void **)&sai_tunnel_api); sai_api_query(SAI_API_NEXT_HOP, (void **)&sai_next_hop_api); + sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_HOSTIF, (void **)&sai_hostif_api); sai_api_query(SAI_API_POLICER, (void **)&sai_policer_api); @@ -86,6 +88,8 @@ namespace ut_helper sai_api_query(SAI_API_QUEUE, (void **)&sai_queue_api); sai_api_query(SAI_API_MPLS, (void**)&sai_mpls_api); sai_api_query(SAI_API_COUNTER, (void**)&sai_counter_api); + sai_api_query(SAI_API_FDB, (void**)&sai_fdb_api); + sai_api_query(SAI_API_TWAMP, (void**)&sai_twamp_api); return SAI_STATUS_SUCCESS; } @@ -115,6 +119,7 @@ namespace ut_helper sai_buffer_api = nullptr; sai_queue_api = nullptr; sai_counter_api = nullptr; + sai_twamp_api = nullptr; return SAI_STATUS_SUCCESS; } diff --git a/tests/mock_tests/warmrestartassist_ut.cpp b/tests/mock_tests/warmrestartassist_ut.cpp new file mode 100644 index 0000000000..6adcd08baf --- /dev/null +++ b/tests/mock_tests/warmrestartassist_ut.cpp @@ -0,0 +1,64 @@ +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +//#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "warm_restart.h" +#define private public +#include "warmRestartAssist.h" +#undef private + +#define APP_WRA_TEST_TABLE_NAME "TEST_TABLE" + +namespace warmrestartassist_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_app_db_pipeline = make_shared(m_app_db.get()); + shared_ptr m_wra_test_table = make_shared(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + + AppRestartAssist *appRestartAssist; + + struct WarmrestartassistTest : public ::testing::Test + { + WarmrestartassistTest() + { + appRestartAssist = new AppRestartAssist(m_app_db_pipeline.get(), "testsyncd", "swss", 0); + appRestartAssist->m_warmStartInProgress = true; + appRestartAssist->registerAppTable(APP_WRA_TEST_TABLE_NAME, m_wra_test_table.get()); + } + + void SetUp() override + { + testing_db::reset(); + + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + testTable.set("key", + { + {"field", "value0"}, + }); + } + + void TearDown() override + { + } + }; + + TEST_F(WarmrestartassistTest, warmRestartAssistTest) + { + appRestartAssist->readTablesToMap(); + vector fvVector; + fvVector.emplace_back("field", "value1"); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->reconcile(); + + fvVector.clear(); + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + ASSERT_TRUE(testTable.get("key", fvVector)); + ASSERT_EQ(fvField(fvVector[0]), "field"); + ASSERT_EQ(fvValue(fvVector[0]), "value1"); + } +} diff --git a/tests/mock_tests/warmrestarthelper_ut.cpp b/tests/mock_tests/warmrestarthelper_ut.cpp new file mode 100644 index 0000000000..9aae03e88d --- /dev/null +++ b/tests/mock_tests/warmrestarthelper_ut.cpp @@ -0,0 +1,106 @@ +#include "warmRestartHelper.h" +#include "warm_restart.h" +#include "mock_table.h" +#include "ut_helper.h" + +using namespace testing_db; + +namespace wrhelper_test +{ + struct WRHelperTest : public ::testing::Test + { + std::shared_ptr m_app_db; + std::shared_ptr m_pipeline; + std::shared_ptr m_routeTable; + std::shared_ptr m_routeProducerTable; + std::shared_ptr wrHelper; + + void SetUp() override + { + m_app_db = std::make_shared("APPL_DB", 0); + m_pipeline = std::make_shared(m_app_db.get()); + m_routeTable = std::make_shared(m_app_db.get(), "ROUTE_TABLE"); + m_routeProducerTable = std::make_shared(m_app_db.get(), "ROUTE_TABLE"); + wrHelper = std::make_shared(m_pipeline.get(), m_routeProducerTable.get(), "ROUTE_TABLE", "bgp", "bgp"); + testing_db::reset(); + } + + void TearDown() override { + } + }; + + TEST_F(WRHelperTest, testReconciliation) + { + /* Initialize WR */ + wrHelper->setState(WarmStart::INITIALIZED); + ASSERT_EQ(wrHelper->getState(), WarmStart::INITIALIZED); + + /* Old-life entries */ + m_routeTable->set("1.0.0.0/24", + { + {"ifname", "eth1"}, + {"nexthop", "2.0.0.0"} + }); + m_routeTable->set("1.1.0.0/24", + { + {"ifname", "eth2"}, + {"nexthop", "2.1.0.0"}, + {"weight", "1"}, + }); + m_routeTable->set("1.2.0.0/24", + { + {"ifname", "eth2"}, + {"nexthop", "2.2.0.0"}, + {"weight", "1"}, + {"random_attrib", "random_val"}, + }); + wrHelper->runRestoration(); + ASSERT_EQ(wrHelper->getState(), WarmStart::RESTORED); + + /* Insert new life entries */ + wrHelper->insertRefreshMap({ + "1.0.0.0/24", + "SET", + { + {"ifname", "eth1"}, + {"nexthop", "2.0.0.0"}, + {"protocol", "kernel"} + } + }); + wrHelper->insertRefreshMap({ + "1.1.0.0/24", + "SET", + { + {"ifname", "eth2"}, + {"nexthop", "2.1.0.0,2.5.0.0"}, + {"weight", "4"}, + {"protocol", "kernel"} + } + }); + wrHelper->insertRefreshMap({ + "1.2.0.0/24", + "SET", + { + {"ifname", "eth2"}, + {"nexthop", "2.2.0.0"}, + {"weight", "1"}, + {"protocol", "kernel"} + } + }); + wrHelper->reconcile(); + ASSERT_EQ(wrHelper->getState(), WarmStart::RECONCILED); + + std::string val; + ASSERT_TRUE(m_routeTable->hget("1.0.0.0/24", "protocol", val)); + ASSERT_EQ(val, "kernel"); + + m_routeTable->hget("1.1.0.0/24", "protocol", val); + ASSERT_EQ(val, "kernel"); + + m_routeTable->hget("1.1.0.0/24", "weight", val); + ASSERT_EQ(val, "4"); + + m_routeTable->hget("1.2.0.0/24", "protocol", val); + ASSERT_EQ(val, "kernel"); + } +} diff --git a/tests/p4rt/l3.py b/tests/p4rt/l3.py index 31cd0b3b95..915228a9b5 100644 --- a/tests/p4rt/l3.py +++ b/tests/p4rt/l3.py @@ -95,8 +95,8 @@ class P4RtGreTunnelWrapper(util.DBInterface): DEFAULT_TUNNEL_ID = "tunnel-1" DEFAULT_ROUTER_INTERFACE_ID = "16" DEFAULT_ENCAP_SRC_IP = "1.2.3.4" - DEFAULT_ENCAP_DST_IP = "5.6.7.8" - DEFAULT_ACTION = "mark_for_tunnel_encap" + DEFAULT_ENCAP_DST_IP = "12.0.0.1" + DEFAULT_ACTION = "mark_for_p2p_tunnel_encap" def generate_app_db_key(self, tunnel_id): d = {} @@ -240,7 +240,7 @@ class P4RtNextHopWrapper(util.DBInterface): DEFAULT_IPV6_NEIGHBOR_ID = "fe80::21a:11ff:fe17:5f80" # tunnel nexthop attribute values - TUNNEL_ACTION = "set_tunnel_encap_nexthop" + TUNNEL_ACTION = "set_p2p_tunnel_encap_nexthop" DEFAULT_TUNNEL_ID = "tunnel-1" def generate_app_db_key(self, nexthop_id): @@ -266,12 +266,10 @@ def create_next_hop( else: neighbor_id = neighbor_id or self.DEFAULT_IPV6_NEIGHBOR_ID nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID - attr_list = [ - (util.prepend_param_field(self.NEIGHBOR_ID_FIELD), neighbor_id), - (self.ACTION_FIELD, action), - ] + attr_list = [(self.ACTION_FIELD, action)] if action == self.DEFAULT_ACTION: attr_list.append((util.prepend_param_field(self.RIF_FIELD), router_interface_id)) + attr_list.append((util.prepend_param_field(self.NEIGHBOR_ID_FIELD), neighbor_id)) if tunnel_id != None: attr_list.append((util.prepend_param_field(self.TUNNEL_ID_FIELD), tunnel_id)) nexthop_key = self.generate_app_db_key(nexthop_id) diff --git a/tests/p4rt/tables_definition.py b/tests/p4rt/tables_definition.py new file mode 100644 index 0000000000..fe3a077def --- /dev/null +++ b/tests/p4rt/tables_definition.py @@ -0,0 +1,35 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtTableDefinitionWrapper(util.DBInterface): + """Interface to interact with APP DB for P4RT tables definition.""" + + # database constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_TABLES_DEFINITION_TABLE_NAME + + # attribute fields for tables definition object + INFO_FIELD = "info" + + # tables definition object's attribute values + INFO_VALUE = "{\"tables\":[{\"actions\":[{\"alias\":\"drop\",\"id\":16777222,\"name\":\"ingress.routing.drop\",\"params\":null},{\"alias\":\"set_nexthop_id\",\"id\":16777221,\"name\":\"ingress.routing.set_nexthop_id\",\"params\":[{\"bitwidth\":0,\"format\":\"STRING\",\"id\":1,\"name\":\"nexthop_id\",\"references\":[{\"match\":\"nexthop_id\",\"table\":\"nexthop_table\"}]}]},{\"alias\":\"set_wcmp_group_id\",\"id\":16777220,\"name\":\"ingress.routing.set_wcmp_group_id\",\"params\":[{\"bitwidth\":0,\"format\":\"STRING\",\"id\":1,\"name\":\"wcmp_group_id\",\"references\":[{\"match\":\"wcmp_group_id\",\"table\":\"wcmp_group_table\"}]}]}],\"alias\":\"vipv4_table\",\"counter/unit\":\"BOTH\",\"id\":33554500,\"matchFields\":[{\"bitwidth\":32,\"format\":\"IPV4\",\"id\":1,\"name\":\"ipv4_dst\",\"references\":null}],\"name\":\"ingress.routing.vipv4_table\"}]}" + + + def generate_app_db_key(self, context): + d = {} + d["context"] = context + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + + # create tables definition set + def create_tables_definition(self, info=None): + info = info or self.INFO_VALUE + attr_list = [(self.INFO_FIELD, info)] + tables_definition_key = self.generate_app_db_key("0") + self.set_app_db_entry(tables_definition_key, attr_list) + return tables_definition_key, attr_list + diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py index 2816f3d4fa..a16c8d3f03 100644 --- a/tests/p4rt/test_l3.py +++ b/tests/p4rt/test_l3.py @@ -2731,7 +2731,24 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): == self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() ) - # Delete the pruned wcmp group member. + # Attempt to delete the next hop. Expect failure as the pruned WCMP + # group member is still referencing it. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that the P4RT key to OID count is same as before in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that the next hop still exists in app state db. + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, + ) + assert status == True + + # Delete the pruned wcmp group member and try again. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) # Verify that P4RT key to OID count decremented by 1 in Redis DB. diff --git a/tests/p4rt/test_viplb.py b/tests/p4rt/test_viplb.py new file mode 100644 index 0000000000..fbb51ea48d --- /dev/null +++ b/tests/p4rt/test_viplb.py @@ -0,0 +1,282 @@ +from swsscommon import swsscommon + +import pytest +import json +import util +import time +import l3 +import viplb +import tables_definition + +def getCrmCounterValue(dvs, key, counter): + + counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + crm_stats_table = swsscommon.Table(counters_db, 'CRM') + + for k in crm_stats_table.get(key)[1]: + if k[0] == counter: + return int(k[1]) + + return 0 + +def crm_update(dvs, field, value): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(cfg_db, "CRM") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set("Config", fvs) + time.sleep(1) + + +class TestP4RTVIPLB(object): + + def _set_up(self, dvs): + self._p4rt_tables_definition_obj = tables_definition.P4RtTableDefinitionWrapper() + self._p4rt_router_intf_obj = l3.P4RtRouterInterfaceWrapper() + self._p4rt_neighbor_obj = l3.P4RtNeighborWrapper() + self._p4rt_nexthop_obj = l3.P4RtNextHopWrapper() + self._p4rt_viplb_obj = viplb.P4RtVIPLBWrapper() + + self._p4rt_tables_definition_obj.set_up_databases(dvs) + self._p4rt_router_intf_obj.set_up_databases(dvs) + self._p4rt_neighbor_obj.set_up_databases(dvs) + self._p4rt_nexthop_obj.set_up_databases(dvs) + self._p4rt_viplb_obj.set_up_databases(dvs) + self.response_consumer = swsscommon.NotificationConsumer( + self._p4rt_viplb_obj.appl_db, "APPL_DB_" + + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" + ) + + def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + crm_update(dvs, "polling_interval", "1") + + # Create tables definition AppDb entry + tables_definition_key, attr_list = ( + self._p4rt_tables_definition_obj.create_tables_definition() + ) + util.verify_response(self.response_consumer, tables_definition_key, + attr_list, "SWSS_RC_SUCCESS") + + # Set IP type for viplb object. + self._p4rt_viplb_obj.set_ip_type("IPV4") + + # Maintain list of original Application and ASIC DB entries before + # adding new entry. + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_viplb_obj.appl_db, + "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, + self._p4rt_viplb_obj.TBL_NAME)), + (self._p4rt_viplb_obj.appl_state_db, + "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, + self._p4rt_viplb_obj.TBL_NAME)), + (self._p4rt_viplb_obj.asic_db, + self._p4rt_viplb_obj.ASIC_DB_TBL_NAME)) + self._p4rt_viplb_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response(self.response_consumer, router_intf_key, + attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + first_nexthop_id, first_nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response(self.response_consumer, first_nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + # get nexthop_oid of newly created nexthop + first_nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert first_nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb(first_nexthop_id) + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for viplb entries. + viplb_entries = util.get_keys( + self._p4rt_viplb_obj.appl_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME + ":" + self._p4rt_viplb_obj.TBL_NAME) + assert len(viplb_entries) == ( + self._p4rt_viplb_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created viplb key. + (status, fvs) = util.get_key(self._p4rt_viplb_obj.appl_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME, + viplb_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for viplb entries. + state_viplb_entries = util.get_keys( + self._p4rt_viplb_obj.appl_state_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME + ":" + self._p4rt_viplb_obj.TBL_NAME) + assert len(state_viplb_entries) == ( + self._p4rt_viplb_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created viplb key. + (status, fvs) = util.get_key(self._p4rt_viplb_obj.appl_state_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME, + viplb_key) + assert status == True + util.verify_attr(fvs, attr_list) + + + # get programmable_object_oid of newly created viplb + viplb_oid = self._p4rt_viplb_obj.get_newly_created_programmable_object_oid() + assert viplb_oid is not None + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 1 + + # Create another router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface(router_interace_id="20") + ) + util.verify_response(self.response_consumer, router_intf_key, + attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create another neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor(router_interface_id="20", neighbor_id="10.0.0.1") + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create another nexthop. + second_nexthop_id, second_nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop(router_interface_id="20", neighbor_id="10.0.0.1", nexthop_id="16") + ) + util.verify_response(self.response_consumer, second_nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Update viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb(second_nexthop_id) + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_SUCCESS") + + + # Remove nexthop. + self._p4rt_nexthop_obj.remove_app_db_entry(first_nexthop_key) + util.verify_response(self.response_consumer, first_nexthop_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 1 + + # Remove viplb entry. + self._p4rt_viplb_obj.remove_app_db_entry(viplb_key) + util.verify_response( + self.response_consumer, viplb_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 0 + + + def test_VIPv4LBWithBadNexthopAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + return + + # Create tables definition AppDb entry + tables_definition_key, attr_list = ( + self._p4rt_tables_definition_obj.create_tables_definition() + ) + util.verify_response(self.response_consumer, tables_definition_key, + attr_list, "SWSS_RC_SUCCESS") + + # Set IP type for viplb object. + self._p4rt_viplb_obj.set_ip_type("IPV4") + + # Create viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb() + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_INVALID_PARAM", "[OrchAgent] Cross-table reference valdiation failed, no OID found") + diff --git a/tests/p4rt/viplb.py b/tests/p4rt/viplb.py new file mode 100644 index 0000000000..06e61443fa --- /dev/null +++ b/tests/p4rt/viplb.py @@ -0,0 +1,74 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtVIPLBWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT viplb object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE" + SAI_ATTR_TYPE = "SAI_GENERIC_PROGRAMMABLE_ATTR_TYPE" + SAI_ATTR_OBJECT_NAME = "SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME" + SAI_ATTR_ENTRY = "SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY" + + # default viplb attribute values + DEFAULT_ACTION = "set_nexthop_id" + DEFAULT_NEXTHOP_ID = "18" + DEFAULT_DST = "10.11.12.0/24" + + # attribute fields for viplb object + NEXTHOP_ID_FIELD = "nexthop_id" + + def generate_app_db_key(self, dst): + assert self.ip_type is not None + d = {} + if self.ip_type == "IPV4": + d[util.prepend_match_field("ipv4_dst")] = dst + else: + d[util.prepend_match_field("ipv6_dst")] = dst + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + def set_ip_type(self, ip_type): + assert ip_type in ("IPV4", "IPV6") + self.ip_type = ip_type + self.TBL_NAME = "EXT_V" + ip_type + "_TABLE" + + # Create entry + def create_viplb(self, nexthop_id=None, action=None, dst=None): + action = action or self.DEFAULT_ACTION + dst = dst or self.DEFAULT_DST + if action == "set_nexthop_id": + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + attr_list = [(self.ACTION_FIELD, action), + (util.prepend_param_field(self.NEXTHOP_ID_FIELD), + nexthop_id)] + else: + attr_list = [(self.ACTION_FIELD, action)] + viplb_key = self.generate_app_db_key(dst) + self.set_app_db_entry(viplb_key, attr_list) + return viplb_key, attr_list + + def get_newly_created_programmable_object_oid(self): + viplb_oid = None + viplb_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in viplb_entries: + if key not in self._original_entries["{}:{}".format(self.asic_db, + self.ASIC_DB_TBL_NAME)]: + viplb_oid = key + break + return viplb_oid + + def get_original_appl_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_appl_state_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_state_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + diff --git a/tests/run-tests.sh b/tests/run-tests.sh new file mode 100755 index 0000000000..b9cdadf783 --- /dev/null +++ b/tests/run-tests.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +IMAGE_NAME=$1 +PY_TEST_PARAMS="$2" +TESTS="$3" +RETRY=$4 +[ -z "$RETRY" ] && RETRY=1 +JUNITXML=$(echo "$TESTS" | cut -d "." -f1)_tr.xml + +set -x +for ((i=1; i<=$RETRY; i++)); do + echo "Running the py test for tests: $TESTS, $i/$RETRY..." + py.test -v --force-flaky --junitxml="$JUNITXML" $PY_TEST_PARAMS --imgname="$IMAGE_NAME" $TESTS && break +done diff --git a/tests/test_acl.py b/tests/test_acl.py index ac7e7fda87..cf68d1516e 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -25,11 +25,11 @@ PFCWD_TABLE_NAME = "PFCWD_TEST" PFCWD_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] class TestAcl: - @pytest.fixture - def l3_acl_table(self, dvs_acl): + @pytest.fixture(params=['ingress', 'egress']) + def l3_acl_table(self, dvs_acl, request): try: - dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS) - yield dvs_acl.get_acl_table_ids(1)[0] + dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS, stage=request.param) + yield dvs_acl.get_acl_table_ids(1)[0], request.param finally: dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) @@ -100,9 +100,36 @@ def test_AclTableCreationDeletion(self, dvs_acl): dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, L3_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3_TABLE_NAME, "Active") finally: dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(L3_TABLE_NAME, None) + + def test_InvalidAclTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table("INVALID_ACL_TABLE", L3_TABLE_TYPE, "dummy_port", "invalid_stage") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status("INVALID_ACL_TABLE", "Inactive") + finally: + dvs_acl.remove_acl_table("INVALID_ACL_TABLE") + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status("INVALID_ACL_TABLE", None) + + def test_InvalidAclRuleCreation(self, dvs_acl, l3_acl_table): + config_qualifiers = {"INVALID_QUALIFIER": "TEST"} + + dvs_acl.create_acl_rule(L3_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() def test_AclRuleL4SrcPort(self, dvs_acl, l3_acl_table): config_qualifiers = {"L4_SRC_PORT": "65000"} @@ -112,8 +139,12 @@ def test_AclRuleL4SrcPort(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleIpProtocol(self, dvs_acl, l3_acl_table): @@ -124,8 +155,12 @@ def test_AclRuleIpProtocol(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleTCPProtocolAppendedForTCPFlags(self, dvs_acl, l3_acl_table): @@ -141,8 +176,12 @@ def test_AclRuleTCPProtocolAppendedForTCPFlags(self, dvs_acl, l3_acl_table): } dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleNextHeader(self, dvs_acl, l3_acl_table): @@ -150,9 +189,13 @@ def test_AclRuleNextHeader(self, dvs_acl, l3_acl_table): # Shouldn't allow NEXT_HEADER on vanilla L3 tables. dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleNextHeaderAppendedForTCPFlags(self, dvs_acl, l3v6_acl_table): @@ -169,8 +212,12 @@ def test_V6AclRuleNextHeaderAppendedForTCPFlags(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleInPorts(self, dvs_acl, mirror_acl_table): @@ -187,9 +234,13 @@ def test_AclRuleInPorts(self, dvs_acl, mirror_acl_table): } dvs_acl.create_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleOutPorts(self, dvs_acl, mclag_acl_table): @@ -207,8 +258,12 @@ def test_AclRuleOutPorts(self, dvs_acl, mclag_acl_table): dvs_acl.create_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, "Active") dvs_acl.remove_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleInPortsNonExistingInterface(self, dvs_acl, mirror_acl_table): @@ -220,9 +275,12 @@ def test_AclRuleInPortsNonExistingInterface(self, dvs_acl, mirror_acl_table): } dvs_acl.create_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, config_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, None) def test_AclRuleOutPortsNonExistingInterface(self, dvs_acl, mclag_acl_table): """ @@ -233,9 +291,12 @@ def test_AclRuleOutPortsNonExistingInterface(self, dvs_acl, mclag_acl_table): } dvs_acl.create_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, config_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, None) def test_AclRuleVlanId(self, dvs_acl, l3_acl_table): config_qualifiers = {"VLAN_ID": "100"} @@ -244,9 +305,29 @@ def test_AclRuleVlanId(self, dvs_acl, l3_acl_table): } dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) + dvs_acl.verify_no_acl_rules() + + def test_AclRuleIPTypeNonIpv4(self, dvs_acl, l3_acl_table): + config_qualifiers = {"IP_TYPE": "NON_IPv4"} + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE": dvs_acl.get_simple_qualifier_comparator("SAI_ACL_IP_TYPE_NON_IPV4&mask:0xffffffffffffffff") + } + + dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") + dvs_acl.verify_acl_rule(expected_sai_qualifiers) + + dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclTableCreationDeletion(self, dvs_acl): @@ -259,10 +340,30 @@ def test_V6AclTableCreationDeletion(self, dvs_acl): acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3V6_BIND_PORTS)) dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, L3V6_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3V6_TABLE_NAME, "Active") finally: dvs_acl.remove_acl_table(L3V6_TABLE_NAME) + # Verify the STATE_DB entry is cleared + dvs_acl.verify_acl_table_status(L3V6_TABLE_NAME, None) dvs_acl.verify_acl_table_count(0) + def test_V6AclRuleIPTypeNonIpv6(self, dvs_acl, l3v6_acl_table): + config_qualifiers = {"IP_TYPE": "NON_IPv6"} + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE": dvs_acl.get_simple_qualifier_comparator("SAI_ACL_IP_TYPE_NON_IPV6&mask:0xffffffffffffffff") + } + + dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") + dvs_acl.verify_acl_rule(expected_sai_qualifiers) + + dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) + dvs_acl.verify_no_acl_rules() + def test_V6AclRuleIPv6Any(self, dvs_acl, l3v6_acl_table): config_qualifiers = {"IP_TYPE": "IPv6ANY"} expected_sai_qualifiers = { @@ -270,9 +371,13 @@ def test_V6AclRuleIPv6Any(self, dvs_acl, l3v6_acl_table): } dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleIPv6AnyDrop(self, dvs_acl, l3v6_acl_table): @@ -286,8 +391,12 @@ def test_V6AclRuleIPv6AnyDrop(self, dvs_acl, l3v6_acl_table): config_qualifiers, action="DROP") dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() # This test validates that backwards compatibility works as expected, it should @@ -300,8 +409,12 @@ def test_V6AclRuleIpProtocol(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleNextHeader(self, dvs_acl, l3v6_acl_table): @@ -312,8 +425,12 @@ def test_V6AclRuleNextHeader(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleSrcIPv6(self, dvs_acl, l3v6_acl_table): @@ -325,8 +442,12 @@ def test_V6AclRuleSrcIPv6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleDstIPv6(self, dvs_acl, l3v6_acl_table): @@ -337,8 +458,12 @@ def test_V6AclRuleDstIPv6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4SrcPort(self, dvs_acl, l3v6_acl_table): @@ -349,8 +474,12 @@ def test_V6AclRuleL4SrcPort(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4DstPort(self, dvs_acl, l3v6_acl_table): @@ -361,8 +490,12 @@ def test_V6AclRuleL4DstPort(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4SrcPortRange(self, dvs_acl, l3v6_acl_table): @@ -373,8 +506,12 @@ def test_V6AclRuleL4SrcPortRange(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4DstPortRange(self, dvs_acl, l3v6_acl_table): @@ -385,8 +522,12 @@ def test_V6AclRuleL4DstPortRange(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleVlanId(self, dvs_acl, l3v6_acl_table): @@ -397,8 +538,12 @@ def test_V6AclRuleVlanId(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): @@ -430,6 +575,8 @@ def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): f"PRIORITY_TEST_RULE_{rule}", config_qualifiers[rule], action=config_actions[rule], priority=rule) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}", "Active") dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) @@ -447,9 +594,12 @@ def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): action="DROP", priority=odd_priority) dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{odd_priority}", "Active") for rule in rule_priorities: dvs_acl.remove_acl_rule(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}", None) dvs_acl.verify_no_acl_rules() def test_RulesWithDiffMaskLengths(self, dvs_acl, l3_acl_table): @@ -488,10 +638,14 @@ def test_RulesWithDiffMaskLengths(self, dvs_acl, l3_acl_table): config_qualifiers[rule], action=config_actions[rule], priority=rule) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}", "Active") dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) for rule in rule_priorities: dvs_acl.remove_acl_rule(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}", None) dvs_acl.verify_no_acl_rules() def test_AclRuleIcmp(self, dvs_acl, l3_acl_table): @@ -507,8 +661,12 @@ def test_AclRuleIcmp(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_table(L3_TABLE_NAME) @@ -527,8 +685,12 @@ def test_AclRuleIcmpV6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighbor): @@ -546,8 +708,11 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb next_hop_id = setup_teardown_neighbor dvs_acl.verify_redirect_acl_rule(expected_sai_qualifiers, next_hop_id, priority="20") - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() dvs_acl.create_redirect_acl_rule(L3_TABLE_NAME, @@ -558,8 +723,11 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb intf_id = dvs.asic_db.port_name_map["Ethernet4"] dvs_acl.verify_redirect_acl_rule(expected_sai_qualifiers, intf_id, priority="20") - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclTableMandatoryMatchFields(self, dvs, pfcwd_acl_table): @@ -577,6 +745,20 @@ def test_AclTableMandatoryMatchFields(self, dvs, pfcwd_acl_table): assert match_in_ports else: assert not match_in_ports + + def test_AclTableMandatoryRangeFields(self, dvs, l3_acl_table): + """ + The test case is to verify range qualifier is not applied for egress ACL + """ + table_oid, stage = l3_acl_table + match_range_qualifier = False + entry = dvs.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE", table_oid) + for k, v in entry.items(): + if k == "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE" and v == "true": + match_range_qualifier = True + + assert not match_range_qualifier + class TestAclCrmUtilization: @pytest.fixture(scope="class", autouse=True) def configure_crm_polling_interval_for_test(self, dvs): diff --git a/tests/test_acl_egress_table.py b/tests/test_acl_egress_table.py index 0697dae6ee..c96af74644 100644 --- a/tests/test_acl_egress_table.py +++ b/tests/test_acl_egress_table.py @@ -14,6 +14,8 @@ "VLAN_ID" ] CUSTOM_TABLE_TYPE_BPOINT_TYPES = ["PORT","PORTCHANNEL"] +CUSTOM_TABLE_TYPE_ACTIONS = ["PACKET_ACTION,COUNTER"] +EXPECTED_ACTION_LIST = ['SAI_ACL_ACTION_TYPE_PACKET_ACTION','SAI_ACL_ACTION_TYPE_COUNTER'] TABLE_NAME = "EGRESS_TEST" BIND_PORTS = ["Ethernet0", "Ethernet4"] RULE_NAME = "EGRESS_TEST_RULE" @@ -23,7 +25,7 @@ class TestEgressAclTable: @pytest.fixture def egress_acl_table(self, dvs_acl): try: - dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES) + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") yield dvs_acl.get_acl_table_ids(1)[0] finally: @@ -33,7 +35,7 @@ def egress_acl_table(self, dvs_acl): def test_EgressAclTableCreationDeletion(self, dvs_acl): try: - dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES) + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") acl_table_id = dvs_acl.get_acl_table_ids(1)[0] @@ -41,6 +43,7 @@ def test_EgressAclTableCreationDeletion(self, dvs_acl): dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, BIND_PORTS, 1, stage="egress") + dvs_acl.verify_acl_table_action_list(acl_table_id, EXPECTED_ACTION_LIST) finally: dvs_acl.remove_acl_table(TABLE_NAME) dvs_acl.remove_acl_table_type(TABLE_TYPE) diff --git a/tests/test_acl_l3v4v6.py b/tests/test_acl_l3v4v6.py new file mode 100644 index 0000000000..2a5e044f52 --- /dev/null +++ b/tests/test_acl_l3v4v6.py @@ -0,0 +1,99 @@ +import pytest +from requests import request + +L3V4V6_TABLE_TYPE = "L3V4V6" +L3V4V6_TABLE_NAME = "L3_V4V6_TEST" +L3V4V6_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8"] +L3V4V6_RULE_NAME = "L3V4V6_TEST_RULE" + +class TestAcl: + @pytest.fixture + def l3v4v6_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(L3V4V6_TABLE_NAME, + L3V4V6_TABLE_TYPE, + L3V4V6_BIND_PORTS) + yield dvs_acl.get_acl_table_ids(1)[0] + finally: + dvs_acl.remove_acl_table(L3V4V6_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + + @pytest.fixture + def setup_teardown_neighbor(self, dvs): + try: + # NOTE: set_interface_status has a dependency on cdb within dvs, + # so we still need to setup the db. This should be refactored. + dvs.setup_db() + + # Bring up an IP interface with a neighbor + dvs.set_interface_status("Ethernet4", "up") + dvs.add_ip_address("Ethernet4", "10.0.0.1/24") + dvs.add_neighbor("Ethernet4", "10.0.0.2", "00:01:02:03:04:05") + + yield dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1)[0] + finally: + # Clean up the IP interface and neighbor + dvs.remove_neighbor("Ethernet4", "10.0.0.2") + dvs.remove_ip_address("Ethernet4", "10.0.0.1/24") + dvs.set_interface_status("Ethernet4", "down") + + def test_L3V4V6AclTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(L3V4V6_TABLE_NAME, L3V4V6_TABLE_TYPE, L3V4V6_BIND_PORTS) + + acl_table_id = dvs_acl.get_acl_table_ids(1)[0] + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3V4V6_BIND_PORTS)) + + dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) + dvs_acl.verify_acl_table_port_binding(acl_table_id, L3V4V6_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3V4V6_TABLE_NAME, "Active") + finally: + dvs_acl.remove_acl_table(L3V4V6_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(L3V4V6_TABLE_NAME, None) + + def test_ValidAclRuleCreation_sip_dip(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "VALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "VALID_RULE", "Active") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "VALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "VALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_InvalidAclRuleCreation_sip_sipv6(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"SRC_IPV6": "2777::0/64", + "SRC_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_InvalidAclRuleCreation_dip_sipv6(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"SRC_IPV6": "2777::0/64", + "DST_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_admin_status.py b/tests/test_admin_status.py index 1b99bf37c7..6aac5cc691 100644 --- a/tests/test_admin_status.py +++ b/tests/test_admin_status.py @@ -8,6 +8,7 @@ class TestAdminStatus(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.countdb = swsscommon.DBConnector(2, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) @@ -43,6 +44,19 @@ def remove_port_channel_members(self, dvs, lag, members): tbl._del(lag + "|" + member) time.sleep(1) + def update_host_tx_ready_status(self, dvs, port_id, switch_id, admin_state): + host_tx_ready = "SAI_PORT_HOST_TX_READY_STATUS_READY" if admin_state == "up" else "SAI_PORT_HOST_TX_READY_STATUS_NOT_READY" + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"host_tx_ready_status\":\""+host_tx_ready+"\",\"port_id\":\""+port_id+"\",\"switch_id\":\""+switch_id+"\"}]" + ntf.send("port_host_tx_ready", ntf_data, fvp) + + def get_port_id(self, dvs, port_name): + port_name_map = swsscommon.Table(self.countdb, "COUNTERS_PORT_NAME_MAP") + status, returned_value = port_name_map.hget("", port_name) + assert status == True + return returned_value + def check_admin_status(self, dvs, port, admin_status): assert admin_status == "up" or admin_status == "down" tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") @@ -91,8 +105,12 @@ def test_PortChannelMemberAdminStatus(self, dvs, testlog): self.remove_port_channel(dvs, "PortChannel6") def test_PortHostTxReadiness(self, dvs, testlog): + dvs.setup_db() self.setup_db(dvs) + #Find switch_id + switch_id = dvs.getSwitchOid() + # configure admin status to interface self.set_admin_status("Ethernet0", "up") self.set_admin_status("Ethernet4", "down") @@ -103,6 +121,11 @@ def test_PortHostTxReadiness(self, dvs, testlog): self.check_admin_status(dvs, "Ethernet4", "down") self.check_admin_status(dvs, "Ethernet8", "up") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet0") , switch_id, "up") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet4") , switch_id, "down") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet8") , switch_id, "up") + time.sleep(3) + # check host readiness status in PORT TABLE of STATE-DB self.check_host_tx_ready_status(dvs, "Ethernet0", "up") self.check_host_tx_ready_status(dvs, "Ethernet4", "down") diff --git a/tests/test_bfd.py b/tests/test_bfd.py index 2feef60acb..5cd18bbe05 100644 --- a/tests/test_bfd.py +++ b/tests/test_bfd.py @@ -9,6 +9,7 @@ def setup_db(self, dvs): self.pdb = dvs.get_app_db() self.adb = dvs.get_asic_db() self.sdb = dvs.get_state_db() + self.cdb = dvs.get_config_db() def get_exist_bfd_session(self): return set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")) @@ -43,13 +44,29 @@ def update_bfd_session_state(self, dvs, session, state): ntf_data = "[{\"bfd_session_id\":\""+session+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" ntf.send("bfd_session_state_change", ntf_data, fvp) + def update_bgp_global_dev_state(self, state): + tbl = swsscommon.Table(self.cdb.db_connection, "BGP_DEVICE_GLOBAL") + fvs = swsscommon.FieldValuePairs(list(state.items())) + key = "STATE" + tbl.set(key, fvs) + time.sleep(1) + + def set_tsa(self): + state = {"tsa_enabled": "true"} + self.update_bgp_global_dev_state(state) + + def clear_tsa(self): + state = {"tsa_enabled": "false"} + self.update_bgp_global_dev_state(state) + + def test_addRemoveBfdSession(self, dvs): self.setup_db(dvs) bfdSessions = self.get_exist_bfd_session() # Create BFD session - fieldValues = {"local_addr": "10.0.0.1"} + fieldValues = {"local_addr": "10.0.0.1","tos":"64"} self.create_bfd_session("default:default:10.0.0.2", fieldValues) self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) @@ -62,13 +79,14 @@ def test_addRemoveBfdSession(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "64", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "1"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -102,13 +120,14 @@ def test_addRemoveBfdSession_ipv6(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "2"} self.check_state_bfd_session_value("default|default|2000::2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -142,6 +161,7 @@ def test_addRemoveBfdSession_interface(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_HW_LOOKUP_VALID": "false", "SAI_BFD_SESSION_ATTR_DST_MAC_ADDRESS": "00:02:03:04:05:06" @@ -150,7 +170,7 @@ def test_addRemoveBfdSession_interface(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "3"} self.check_state_bfd_session_value("default|Ethernet0|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -184,6 +204,7 @@ def test_addRemoveBfdSession_txrx_interval(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", @@ -192,7 +213,7 @@ def test_addRemoveBfdSession_txrx_interval(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "4"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -226,6 +247,7 @@ def test_addRemoveBfdSession_multiplier(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MULTIPLIER": "5" } @@ -233,7 +255,7 @@ def test_addRemoveBfdSession_multiplier(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "5", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "5", "multihop": "false", "local_discriminator" : "5"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -267,6 +289,7 @@ def test_addRemoveBfdSession_multihop(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MULTIHOP": "true" } @@ -274,7 +297,7 @@ def test_addRemoveBfdSession_multihop(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "true"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "true", "local_discriminator" : "6"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -308,13 +331,14 @@ def test_addRemoveBfdSession_type(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "7"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -350,6 +374,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session1, expected_adb_values) @@ -357,7 +382,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 1 key_state_db1 = "default|default|10.0.0.2" expected_sdb_values1 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "8"} self.check_state_bfd_session_value(key_state_db1, expected_sdb_values1) # Create BFD session 2 @@ -376,6 +401,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.1.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", @@ -385,7 +411,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 2 key_state_db2 = "default|default|10.0.1.2" expected_sdb_values2 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "9"} self.check_state_bfd_session_value(key_state_db2, expected_sdb_values2) # Create BFD session 3 @@ -404,6 +430,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session3, expected_adb_values) @@ -411,7 +438,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 3 key_state_db3 = "default|default|2000::2" expected_sdb_values3 = {"state": "Down", "type": "demand_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "10"} self.check_state_bfd_session_value(key_state_db3, expected_sdb_values3) # Create BFD session 4 @@ -430,6 +457,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "3000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "3000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session4, expected_adb_values) @@ -437,7 +465,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 4 key_state_db4 = "default|default|3000::2" expected_sdb_values4 = {"state": "Down", "type": "async_active", "local_addr" : "3000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "11"} self.check_state_bfd_session_value(key_state_db4, expected_sdb_values4) # Update BFD session states @@ -465,6 +493,131 @@ def test_multipleBfdSessions(self, dvs): self.remove_bfd_session(key4) self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session4) + def test_addRemoveBfdSession_with_tsa_case1(self, dvs): + # This is a test for BFD caching mechanism. + # This test sets up a BFD session with shutdown_bfd_during_tsa=true and checks state DB for session creation. + # Then TSA is applied and removal of the session is verified in app db. This is followed by TSB and finally the + # reinstated session is verified. + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active", "shutdown_bfd_during_tsa": "true"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "12"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + bfdSessions = self.get_exist_bfd_session() + # Confirm BFD session state in STATE_DB is updated as expected. + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSA + self.set_tsa() + time.sleep(2) + + #ensure the session is removed. + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + #set TSB + self.clear_tsa() + time.sleep(2) + createdSessions = self.get_exist_bfd_session() - bfdSessions + session = createdSessions.pop() + expected_sdb_values["local_discriminator"] = "13" + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + # bfd session should come back + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + + def test_addRemoveBfdSession_with_tsa_case2(self, dvs): + # This is a test for BFD caching mechanism. + # This test sets up a BFD session with shutdown_bfd_during_tsa=true and checks state DB for session creation. + # Then TSA is applied and removal of the session is verified from app db. At this point the session is removed. + # This isfollowed by TSB. Since the session configuration has been removed during TSB, the BFD session should not + # start up. + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "14"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + # Confirm BFD session state in STATE_DB is updated as expected. + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSA + self.set_tsa() + time.sleep(2) + + #ensure the session is still present. + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSB + self.clear_tsa() + time.sleep(2) + + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + def test_bfd_state_db_clear(self, dvs): self.setup_db(dvs) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 0b4177b64c..1813ebf430 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -775,8 +775,131 @@ def test_removeBufferPool(self, dvs, testlog): def test_bufferPortMaxParameter(self, dvs, testlog): self.setup_db(dvs) + # Update log level so that we can analyze the log in case the test failed + logfvs = self.config_db.wait_for_entry("LOGGER", "buffermgrd") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + # Check whether port's maximum parameter has been exposed to STATE_DB fvs = self.state_db.wait_for_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") assert int(fvs["max_queues"]) and int(fvs["max_priority_groups"]) + _, oa_pid = dvs.runcmd("pgrep orchagent") + + try: + fvs["max_headroom_size"] = "122880" + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + # Startup interface + dvs.port_admin_set('Ethernet0', 'up') + # Wait for the lossy profile to be handled + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": "ingress_lossy_profile"}) + + # Stop orchagent to simulate the scenario that the system is during initialization + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + # Create a lossless profile + profile_fvs = {'xon': '19456', + 'xoff': '10240', + 'size': '29696', + 'dynamic_th': '0', + 'pool': 'ingress_lossless_pool'} + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + + # Make sure the entry has been handled by buffermgrd and is pending on orchagent's queue + self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + + # Resume orchagent + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + # Check whether BUFFER_PG_TABLE is updated as expected + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + + keys = self.app_db.get_keys('BUFFER_PG_TABLE') + + assert 'Ethernet0:1' not in keys + assert 'Ethernet0:6' not in keys + + # Update the profile + profile_fvs['size'] = '28672' + profile_fvs['xoff'] = '9216' + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) + self.app_db.wait_for_field_match('BUFFER_PROFILE_TABLE', 'test', profile_fvs) + + # Verify a pending remove PG is not counted into the accumulative headroom + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') + # Should be added because PG 3-4 has been removed and there are sufficient headroom + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + + # Resume orchagent + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + # Check whether BUFFER_PG_TABLE is updated as expected + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:1", {"profile": "ingress_lossy_profile"}) + finally: + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') + self.config_db.delete_entry('BUFFER_PROFILE', 'test') + + fvs.pop("max_headroom_size") + self.state_db.delete_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + if old_log_level: + logfvs["LOGLEVEL"] = old_log_level + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + + dvs.port_admin_set('Ethernet0', 'down') + self.cleanup_db(dvs) + + + def test_bufferPoolInitWithSHP(self, dvs, testlog): + self.setup_db(dvs) + + try: + # 1. Enable the shared headroom pool + default_lossless_buffer_parameter = self.config_db.get_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE') + default_lossless_buffer_parameter['over_subscribe_ratio'] = '2' + self.config_db.update_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', default_lossless_buffer_parameter) + + # 2. Stop the orchagent + _, oa_pid = dvs.runcmd("pgrep orchagent") + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + # 3. Remove the size from CONFIG_DB|BUFFER_POOL.ingress_lossless_pool + original_ingress_lossless_pool = self.config_db.get_entry('BUFFER_POOL', 'ingress_lossless_pool') + try: + self.config_db.delete_field('BUFFER_POOL', 'ingress_lossless_pool', 'size') + self.config_db.delete_field('BUFFER_POOL', 'ingress_lossless_pool', 'xoff') + except Exception as e: + pass + + # 4. Remove the ingress_lossless_pool from the APPL_DB + self.app_db.delete_entry('BUFFER_POOL_TABLE', 'ingress_lossless_pool') + + # 5. Mock it by adding a "TABLE_SET" entry to trigger the fallback logic + self.app_db.update_entry("BUFFER_PG_TABLE_SET", "", {"NULL": "NULL"}) + + # 6. Invoke the lua plugin + _, output = dvs.runcmd("redis-cli --eval /usr/share/swss/buffer_pool_vs.lua") + assert "ingress_lossless_pool:2048:1024" in output + + finally: + self.config_db.update_entry('BUFFER_POOL', 'ingress_lossless_pool', original_ingress_lossless_pool) + self.config_db.delete_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE') + self.app_db.delete_entry("BUFFER_PG_TABLE_SET", "") + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 21371cb05a..5e1f26bd50 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,14 +3,18 @@ class TestBuffer(object): + from conftest import DockerVirtualSwitch lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): - self.app_db = dvs.get_app_db() - self.asic_db = dvs.get_asic_db() - self.config_db = dvs.get_config_db() - self.counter_db = dvs.get_counters_db() + from conftest import ApplDbValidator, AsicDbValidator + from dvslib.dvs_database import DVSDatabase + + self.app_db: ApplDbValidator = dvs.get_app_db() + self.asic_db: AsicDbValidator = dvs.get_asic_db() + self.config_db: DVSDatabase = dvs.get_config_db() + self.counter_db: DVSDatabase = dvs.get_counters_db() # enable PG watermark self.set_pg_wm_status('enable') @@ -74,6 +78,10 @@ def get_pg_name_map(self): pg_name = "{}:{}".format(self.INTF, pg) pg_name_map[pg_name] = self.get_pg_oid(pg_name) return pg_name_map + + def check_syslog(self, dvs, marker, err_log, expected_cnt=1): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() >= str(expected_cnt) @pytest.fixture def setup_teardown_test(self, dvs): @@ -246,3 +254,102 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): dvs.port_field_set(extra_port, "speed", orig_speed) dvs.port_admin_set(self.INTF, "down") dvs.port_admin_set(extra_port, "down") + + def test_no_pg_profile_for_speed_and_length(self, dvs: DockerVirtualSwitch, setup_teardown_test): + """ + Test to verify that buffermgrd correctly handles a scenario where no PG profile + is configured for a given speed (10000) and cable length (80m) for Ethernet0 (self.INTF). + """ + orig_cable_len = None + orig_port_speed = None + orig_port_status = None + orig_port_qos_map = None + + test_cable_len = "80m" # cable length must not exist for test_speed in + test_speed = "10000" + test_port_status ="down" # can be up or down, but it must exist in port configuration + test_port_pfc_enable = "3,4" # does not matter, but must exist + + try: + ################################## + ## Save original configurations ## + ################################## + + # Save original cable length + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len.get(self.INTF) if fvs_cable_len else None + + # Save original port speed and admin status + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_port_speed = fvs_port.get("speed") if fvs_port else None + orig_port_status = fvs_port.get("admin_status") if fvs_port else None + + # Save original port qos map + fvs_qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + orig_cable_len = fvs_qos_map.get("pfc_enable") if fvs_qos_map else None + + ###################################### + ## Send configurations to CONFIG_DB ## + ###################################### + + # Configure cable length + self.change_cable_len(test_cable_len) + + # Configure port speed + dvs.port_field_set(self.INTF, "speed", test_speed) + + # Configure PFC enable + self.set_port_qos_table(self.INTF, test_port_pfc_enable) + + # Add marker to log to make syslog verification easier + # Set before setting admin status to not miss syslog + marker = dvs.add_log_marker() + + # Configure admin status + dvs.port_admin_set(self.INTF, test_port_status) + + # Wait for buffermgrd to process the changes + time.sleep(2) + + ################## + ## Verification ## + ################## + + + # Check syslog if this error is present. This is expected. + self.check_syslog(dvs, marker, "Failed to process invalid entry, drop it") + + finally: + ############################### + ## Revert to original values ## + ############################### + + # Revert values to original values + # If there are none, then assume entry/field never existed and should be deleted + + # Revert cable length + if orig_cable_len: + self.change_cable_len(orig_cable_len) + else: + self.config_db.delete_entry("CABLE_LENGTH", "AZURE") + + # Revert port speed + if orig_port_speed: + dvs.port_field_set(self.INTF, "speed", orig_port_speed) + else: + self.config_db.delete_field("PORT", self.INTF, "speed") + + # Revert admin status + if orig_port_status: + dvs.port_admin_set(self.INTF, orig_port_status) + else: + self.config_db.delete_field("PORT", self.INTF, "admin_status") + + # Revert port qos map + if orig_port_qos_map: + self.config_db.update_entry("PORT_QOS_MAP", self.INTF, orig_port_qos_map) + else: + self.config_db.delete_entry("PORT_QOS_MAP", self.INTF) + + + diff --git a/tests/test_crm.py b/tests/test_crm.py index 31d0b57ae5..bee145c34f 100644 --- a/tests/test_crm.py +++ b/tests/test_crm.py @@ -697,12 +697,22 @@ def test_CrmAclGroup(self, dvs, testlog): entry_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') assert entry_used_counter == 3 - # remove ACL table - #tbl._del("test-aclv6") - #time.sleep(2) - #atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP") - #table_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') - #assert table_used_counter == 0 + marker = dvs.add_log_marker() + crm_update(dvs, "polling_interval", "1") + crm_update(dvs, "acl_group_threshold_type", "used") + crm_update(dvs, "acl_group_low_threshold", str(0)) + crm_update(dvs, "acl_group_high_threshold", str(2)) + + time.sleep(2) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_EXCEEDED for TH_USED", 1) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_CLEAR for TH_USED", 0) + + tbl._del("test-aclv6") + time.sleep(2) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_CLEAR for TH_USED", 1) + + table_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') + assert table_used_counter == 0 def test_CrmSnatEntry(self, dvs, testlog): diff --git a/tests/test_dash_acl.py b/tests/test_dash_acl.py new file mode 100644 index 0000000000..c85fbf532b --- /dev/null +++ b/tests/test_dash_acl.py @@ -0,0 +1,854 @@ +from collections import namedtuple + +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.acl_group_pb2 import * +from dash_api.acl_rule_pb2 import * +from dash_api.acl_in_pb2 import * +from dash_api.acl_out_pb2 import * +from dash_api.prefix_tag_pb2 import * +from dash_api.types_pb2 import * + +from typing import Union +import time +import ipaddress +import socket +import binascii + +import pytest + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +ACL_GROUP_1 = "acl_group_1" +ACL_GROUP_2 = "acl_group_2" +ACL_GROUP_3 = "acl_group_3" +ACL_RULE_1 = "1" +ACL_RULE_2 = "2" +ACL_RULE_3 = "3" +ACL_STAGE_1 = "1" +ACL_STAGE_2 = "2" +ACL_STAGE_3 = "3" +TAG_1 = "tag_1" +TAG_2 = "tag_2" +TAG_3 = "tag_3" + +SAI_NULL_OID = "oid:0x0" + +PortRange = namedtuple('PortRange', ['min', 'max']) + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, bytes): + return value + return str(value) + + +def get_sai_stage(outbound, v4, stage_num): + direction = "OUTBOUND" if outbound else "INBOUND" + ip_version = "V4" if v4 else "V6" + return "SAI_ENI_ATTR_{}_{}_STAGE{}_DASH_ACL_GROUP_ID".format(direction, ip_version, stage_num) + + +def prefix_list_to_set(prefix_list: str): + count, prefixes = prefix_list.split(":") + + ps = set(prefixes.split(",")) + assert len(ps) == int(count) + + return ps + + +def to_ip_prefix(prefix): + net = ipaddress.IPv4Network(prefix, False) + pfx = IpPrefix() + pfx.ip.ipv4 = socket.htonl(int(net.network_address)) + pfx.mask.ipv4 = socket.htonl(int(net.netmask)) + return pfx + + +class ProduceStateTable(object): + def __init__(self, database, table_name: str): + self.table = swsscommon.ProducerStateTable( + database.db_connection, + table_name) + self.keys = set() + + def __setitem__(self, key: str, pairs: Union[dict, list, tuple]): + pairs_str = [] + if isinstance(pairs, dict): + pairs = pairs.items() + for k, v in pairs: + pairs_str.append((to_string(k), to_string(v))) + self.table.set(key, pairs_str) + self.keys.add(key) + + def __delitem__(self, key: str): + self.table.delete(str(key)) + self.keys.discard(key) + + def get_keys(self): + return self.keys + + +class Table(object): + def __init__(self, database: DVSDatabase, table_name: str): + self.table_name = table_name + self.db = database + self.table = swsscommon.Table(database.db_connection, self.table_name) + + # Overload verification methods in DVSDatabase so we can use them per-table + # All methods from DVSDatabase that do not start with '_' are overloaded + # See the DVSDatabase class for info about the use of each method + # For each `func` in DVSDatabase, equivalent to: + # def func(self, **kwargs): + # return self.db.func(table_name=self.table_name, **kwargs) + # This means that we can call e.g. + # table_object.wait_for_n_keys(num_keys=1) + # instead of + # dvs.get_asic_db().wait_for_n_keys(table_name="ASIC_STATE:SAI_EXAMPLE_TABLE", num_keys=1) + overload_methods = [ + attr for attr in dir(DVSDatabase) + if not attr.startswith('_') and callable(getattr(DVSDatabase, attr)) + ] + for method_name in overload_methods: + setattr( + self, method_name, lambda method_name=method_name, + **kwargs: getattr(self.db, method_name)(table_name=self.table_name, **kwargs) + ) + + def __getitem__(self, key: str): + exists, result = self.table.get(str(key)) + if not exists: + return None + else: + return dict(result) + + +APPL_DB_TABLE_LIST = [ + swsscommon.APP_DASH_PREFIX_TAG_TABLE_NAME, + swsscommon.APP_DASH_ACL_IN_TABLE_NAME, + swsscommon.APP_DASH_ACL_OUT_TABLE_NAME, + swsscommon.APP_DASH_ACL_GROUP_TABLE_NAME, + swsscommon.APP_DASH_ACL_RULE_TABLE_NAME, + swsscommon.APP_DASH_ENI_TABLE_NAME, + swsscommon.APP_DASH_VNET_TABLE_NAME, + swsscommon.APP_DASH_APPLIANCE_TABLE_NAME +] + + +# TODO: At some point, orchagent will be update to write to some DB to indicate that it's finished +# processing updates for a given table. Once this is implemented, we can remove all the `sleep` +# statements in these tests and instead proactively check for the finished signal from orchagent +class DashAcl(object): + def __init__(self, dvs): + self.dvs = dvs + self.app_db_tables = [] + + for table in APPL_DB_TABLE_LIST: + pst = ProduceStateTable( + self.dvs.get_app_db(), table + ) + table_variable_name = "app_{}".format(table.lower()) + # Based on swsscommon convention for table names, assume + # e.g. swsscommon.APP_DASH_ENI_TABLE_NAME == "DASH_ENI_TABLE", therefore + # the ProducerStateTable object for swsscommon.APP_DASH_ENI_TABLE_NAME + # will be accessible as `self.app_dash_eni_table` + setattr(self, table_variable_name, pst) + self.app_db_tables.append(pst) + + self.asic_dash_acl_rule_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DASH_ACL_RULE") + self.asic_dash_acl_group_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DASH_ACL_GROUP") + self.asic_eni_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_ENI") + self.asic_vip_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + self.asic_vnet_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VNET") + + self.asic_db_tables = [ + self.asic_dash_acl_group_table, + self.asic_dash_acl_rule_table, + self.asic_eni_table, + self.asic_vip_table, + self.asic_vnet_table + ] + + def create_prefix_tag(self, name, ip_version, prefixes): + pb = PrefixTag() + pb.ip_version = ip_version + for prefix in prefixes: + pb.prefix_list.append(to_ip_prefix(prefix)) + self.app_dash_prefix_tag_table[str(name)] = {"pb": pb.SerializeToString()} + + def remove_prefix_tag(self, tag_id): + del self.app_dash_prefix_tag_table[str(tag_id)] + + def create_acl_rule(self, group_id, rule_id, action, terminating, priority, protocol=None, + src_addr=None, dst_addr=None, + src_tag=None, dst_tag=None, + src_port=None, dst_port=None): + pb = AclRule() + pb.priority = priority + pb.action = action + pb.terminating = terminating + + if protocol: + map(pb.protocol.append, protocol) + + if src_addr: + for addr in src_addr: + pb.src_addr.append(to_ip_prefix(addr)) + + if dst_addr: + for addr in dst_addr: + pb.dst_addr.append(to_ip_prefix(addr)) + + if src_tag: + for tag in src_tag: + pb.src_tag.append(tag.encode()) + + if dst_tag: + for tag in dst_tag: + pb.dst_tag.append(tag.encode()) + + if src_port: + for pr in src_port: + vr = ValueOrRange() + vr.range.min = pr.min + vr.range.max = pr.max + pb.src_port.append(vr) + + if dst_port: + for pr in dst_port: + vr = ValueOrRange() + vr.range.min = pr.min + vr.range.max = pr.max + pb.dst_port.append(vr) + + self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] = {"pb": pb.SerializeToString()} + + + def remove_acl_rule(self, group_id, rule_id): + del self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] + + def create_acl_group(self, group_id, ip_version): + pb = AclGroup() + pb.ip_version = IpVersion.IP_VERSION_IPV4 + self.app_dash_acl_group_table[str(group_id)] = {"pb": pb.SerializeToString()} + + def remove_acl_group(self, group_id): + del self.app_dash_acl_group_table[str(group_id)] + + def create_appliance(self, name, pb): + self.app_dash_appliance_table[str(name)] = {"pb": pb.SerializeToString()} + + def remove_appliance(self, name): + del self.app_dash_appliance_table[str(name)] + + def create_eni(self, eni, pb): + self.app_dash_eni_table[str(eni)] = {"pb": pb.SerializeToString()} + + def remove_eni(self, eni): + del self.app_dash_eni_table[str(eni)] + + def create_vnet(self, vnet, pb): + self.app_dash_vnet_table[str(vnet)] = {"pb": pb.SerializeToString()} + + def remove_vnet(self, vnet): + del self.app_dash_vnet_table[str(vnet)] + + def bind_acl_in(self, eni, stage, v4_group_id = None, v6_group_id = None): + pb = AclIn() + if v4_group_id: + pb.v4_acl_group_id = v4_group_id + if v6_group_id: + pb.v6_acl_group_id = v6_group_id + self.app_dash_acl_in_table[str( + eni) + ":" + str(stage)] = {"pb": pb.SerializeToString()} + + def unbind_acl_in(self, eni, stage): + del self.app_dash_acl_in_table[str(eni) + ":" + str(stage)] + + def bind_acl_out(self, eni, stage, v4_group_id = None, v6_group_id = None): + pb = AclIn() + if v4_group_id: + pb.v4_acl_group_id = v4_group_id + if v6_group_id: + pb.v6_acl_group_id = v6_group_id + self.app_dash_acl_out_table[str( + eni) + ":" + str(stage)] = {"pb": pb.SerializeToString()} + + def unbind_acl_out(self, eni, stage): + del self.app_dash_acl_out_table[str(eni) + ":" + str(stage)] + + + +class TestAcl(object): + @pytest.fixture + def ctx(self, dvs): + self.vnet_name = "vnet1" + self.eni_name = "eth0" + self.appliance_name = "default_app" + self.vm_vni = "4321" + self.appliance_sip = "10.20.30.40" + self.vni = "1" + self.mac_address = "01:23:45:67:89:ab" + self.underlay_ip = "1.1.1.1" + + acl_context = DashAcl(dvs) + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.appliance_sip))) + pb.vm_vni = int(self.vm_vni) + + acl_context.create_appliance(self.appliance_name, pb) + pb = Vnet() + pb.vni = int(self.vni) + + acl_context.create_vnet(self.vnet_name, pb) + pb = Eni() + pb.vnet = self.vnet_name + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) + acl_context.create_eni(self.eni_name, pb) + + acl_context.asic_vip_table.wait_for_n_keys(num_keys=1) + acl_context.asic_vnet_table.wait_for_n_keys(num_keys=1) + acl_context.asic_eni_table.wait_for_n_keys(num_keys=1) + + yield acl_context + + # Manually cleanup by deleting all remaining APPL_DB keys + for table in acl_context.app_db_tables: + keys = table.get_keys() + for key in list(keys): + del table[key] + + for table in acl_context.asic_db_tables: + table.wait_for_n_keys(num_keys=0) + + def bind_acl_group(self, ctx, stage_id, group_id, group_oid): + ctx.bind_acl_in(self.eni_name, stage_id, group_id) + self.verify_group_is_bound_to_eni(ctx, stage_id, group_oid) + + def verify_group_is_bound_to_eni(self, ctx, stage_id, group_oid): + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=stage_id) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: group_oid}) + assert sai_stage in ctx.asic_eni_table[eni_key] + assert ctx.asic_eni_table[eni_key][sai_stage] == group_oid + + def test_acl_flow(self, ctx): + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + group1_id= ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_PRIORITY"] == "1" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_ACTION"] == "SAI_DASH_ACL_RULE_ACTION_PERMIT_AND_CONTINUE" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID"] == group1_id + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"] == "2:192.168.0.1/32,192.168.1.0/30" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"] == "2:192.168.0.1/32,192.168.1.0/30" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_DST_PORT"] == "1:0,1" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_SRC_PORT"] == "1:0,1" + assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_PROTOCOL"].split(":")[0] == "256" + group1_attr = ctx.asic_dash_acl_group_table[group1_id] + assert group1_attr["SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY"] == "SAI_IP_ADDR_FAMILY_IPV4" + + # Create multiple rules + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_3, + priority=3, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_3) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + + def test_acl_group(self, ctx): + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV6) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + + # Remove group before removing its rule + ctx.remove_acl_group(ACL_GROUP_1) + # Wait a few seconds to make sure no changes are made + # since group still contains a rule + time.sleep(3) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + + def test_empty_acl_group_binding(self, ctx): + """ + Verifies behavior when binding ACL groups + """ + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=True, v4=True, stage_num=ACL_STAGE_1) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + ctx.bind_acl_out(self.eni_name, ACL_STAGE_1, v4_group_id = ACL_GROUP_1) + time.sleep(3) + # Binding should not happen yet because the ACL group is empty + assert sai_stage not in ctx.asic_eni_table[eni_key] + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + # Now that the group contains a rule, expect binding to occur + ctx.bind_acl_out(self.eni_name, ACL_STAGE_1, v4_group_id = ACL_GROUP_1) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: acl_group_key}) + + # Unbinding should occur immediately + ctx.unbind_acl_out(self.eni_name, ACL_STAGE_1) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) + + def test_acl_rule_after_group_bind(self, ctx): + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_1) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, acl_group_key) + + # The new rule should not be created since the group is bound + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + # Unbinding the group + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) + + # Now the rule can be created since the group is no longer bound + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2) + + # cleanup + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) + ctx.remove_acl_group(ACL_GROUP_1) + + def test_acl_group_binding(self, ctx): + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_2) + + ctx.create_acl_group(ACL_GROUP_2, IpVersion.IP_VERSION_IPV4) + acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + ctx.create_acl_rule(ACL_GROUP_2, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + ctx.bind_acl_in(self.eni_name, ACL_STAGE_2, v4_group_id = ACL_GROUP_2) + # Binding should occurr immediately since we added a rule to the group prior to binding + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: acl_group_key}) + + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_2) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) + + def test_acl_rule(self, ctx): + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + # Create acl rule with nonexistent acl group, which should never get programmed to ASIC_DB + ctx.create_acl_rule("0", "0", + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_prefix_single_tag(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_tag=[TAG_2], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + time.sleep(3) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + tag2_prefixes = {"192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + time.sleep(3) + + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + rule1_id = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.1.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1, TAG_2], dst_tag=[TAG_2, TAG_3], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag2_prefixes = {"192.168.10.0/30", "192.168.11.0/30", "192.168.12.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.13.0.0/16", "3.14.0.0/16", "4.14.4.0/24", "5.15.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + time.sleep(3) + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + ctx.remove_prefix_tag(TAG_3) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags_and_prefixes(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + prefix_list = {"10.0.0.0/8", "11.1.1.0/24", "11.1.2.0/24"} + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1, TAG_2, TAG_3], dst_addr=prefix_list, + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + super_set = set() + super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.2/32", "192.168.2.2/32", "192.168.1.2/32"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + time.sleep(3) + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + super_set = set() + super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + ctx.remove_prefix_tag(TAG_3) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_groups_prefix_single_tag(self, ctx, bind_group): + groups = [ACL_GROUP_1, ACL_GROUP_2, ACL_GROUP_3] + stages = [ACL_STAGE_1, ACL_STAGE_2, ACL_STAGE_3] + + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + for group in groups: + ctx.create_acl_group(group, IpVersion.IP_VERSION_IPV4) + ctx.create_acl_rule(group, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) + rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + + for rid in rule_ids: + rule_attrs = ctx.asic_dash_acl_rule_table[rid] + assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + if bind_group: + eni_stages = [] + eni_key = ctx.asic_eni_table.get_keys()[0] + for stage, group in zip(stages, groups): + ctx.bind_acl_in(self.eni_name, stage, group) + eni_stages.append(get_sai_stage(outbound=False, v4=True, stage_num=stage)) + + ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + for stage in eni_stages: + assert ctx.asic_eni_table[eni_key][stage] in group_ids + + tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + time.sleep(3) + + rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + + for rid in rule_ids: + rule_attrs = ctx.asic_dash_acl_rule_table[rid] + assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + if bind_group: + new_group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) + + ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + for stage in eni_stages: + assert ctx.asic_eni_table[eni_key][stage] in new_group_ids + + for stage in stages: + ctx.unbind_acl_in(self.eni_name, stage) + + for group in groups: + ctx.remove_acl_rule(group, ACL_RULE_1) + ctx.remove_acl_group(group) + + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + + def test_tag_remove(self, ctx): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + ctx.remove_prefix_tag(TAG_1) + time.sleep(1) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=2, action=Action.ACTION_DENY, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule2_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule2_attr = ctx.asic_dash_acl_rule_table[rule2_id] + + assert prefix_list_to_set(rule2_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + + def test_tag_create_delay(self, ctx): + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before the TAG1, TAG_2 + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_tag=[TAG_2], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + # The rule should not be created since the TAG_1, TAG_2 are not created yet + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + + tagsrc_prefixes = {"1.2.3.4/32", "5.6.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tagsrc_prefixes) + + # The rule should not be created since the TAG_2 is not created yet + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + + tagdst_prefixes = {"10.20.30.40/32", "50.60.0.0/16"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tagdst_prefixes) + + rule_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule_attr = ctx.asic_dash_acl_rule_table[rule_id] + + assert prefix_list_to_set(rule_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tagsrc_prefixes + assert prefix_list_to_set(rule_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tagdst_prefixes + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down +# before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_dash_vnet.py b/tests/test_dash_vnet.py new file mode 100644 index 0000000000..031fd7a0ef --- /dev/null +++ b/tests/test_dash_vnet.py @@ -0,0 +1,334 @@ +from swsscommon import swsscommon + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * + +import typing +import time +import binascii +import uuid +import ipaddress +import sys +import socket + + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, bytes): + return value + return str(value) + + +class ProduceStateTable(object): + def __init__(self, database, table_name: str): + self.table = swsscommon.ProducerStateTable( + database.db_connection, + table_name) + + def __setitem__(self, key: str, pairs: typing.Union[dict, list, tuple]): + pairs_str = [] + if isinstance(pairs, dict): + pairs = pairs.items() + for k, v in pairs: + pairs_str.append((to_string(k), to_string(v))) + self.table.set(key, pairs_str) + + def __delitem__(self, key: str): + self.table.delete(str(key)) + + +class Table(object): + def __init__(self, database, table_name: str): + self.table_name = table_name + self.table = swsscommon.Table(database.db_connection, self.table_name) + + def __getitem__(self, key: str): + exists, result = self.table.get(str(key)) + if not exists: + return None + else: + return dict(result) + + def get_keys(self): + return self.table.getKeys() + + def get_newly_created_oid(self, old_oids): + new_oids = self.asic_db.wait_for_n_keys(table, len(old_oids) + 1) + oid = [ids for ids in new_oids if ids not in old_oids] + return oid[0] + + +class Dash(object): + def __init__(self, dvs): + self.dvs = dvs + self.app_dash_appliance_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_APPLIANCE_TABLE") + self.asic_direction_lookup_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + self.asic_vip_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + self.app_dash_vnet_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_VNET_TABLE") + self.asic_dash_vnet_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VNET") + self.app_dash_eni_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_ENI_TABLE") + self.asic_eni_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_ENI") + self.asic_eni_ether_addr_map_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY") + self.app_dash_vnet_map_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_VNET_MAPPING_TABLE") + self.asic_dash_outbound_ca_to_pa_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY") + self.asic_pa_validation_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY") + self.app_dash_route_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_ROUTE_TABLE") + self.app_dash_route_rule_table = ProduceStateTable( + self.dvs.get_app_db(), "DASH_ROUTE_RULE_TABLE") + self.asic_outbound_routing_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") + self.asic_inbound_routing_rule_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY") + + def create_appliance(self, appliance_id, attr_maps: dict): + self.app_dash_appliance_table[str(appliance_id)] = attr_maps + + def remove_appliance(self, appliance_id): + del self.app_dash_appliance_table[str(appliance_id)] + + def create_vnet(self, vnet, attr_maps: dict): + self.app_dash_vnet_table[str(vnet)] = attr_maps + + def remove_vnet(self, vnet): + del self.app_dash_vnet_table[str(vnet)] + + def create_eni(self, eni, attr_maps: dict): + self.app_dash_eni_table[str(eni)] = attr_maps + + def remove_eni(self, eni): + del self.app_dash_eni_table[str(eni)] + + def create_vnet_map(self, vnet, ip, attr_maps: dict): + self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] = attr_maps + + def remove_vnet_map(self, vnet, ip): + del self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] + + def create_outbound_routing(self, mac_string, ip, attr_maps: dict): + self.app_dash_route_table[str(mac_string) + ":" + str(ip)] = attr_maps + + def remove_outbound_routing(self, mac_string, ip): + del self.app_dash_route_table[str(mac_string) + ":" + str(ip)] + + def create_inbound_routing(self, mac_string, vni, ip, attr_maps: dict): + self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] = attr_maps + + def remove_inbound_routing(self, mac_string, vni, ip): + del self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] + +class TestDash(object): + def test_appliance(self, dvs): + dashobj = Dash(dvs) + self.appliance_id = "100" + self.sip = "10.0.0.1" + self.vm_vni = "4321" + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) + pb.vm_vni = int(self.vm_vni) + dashobj.create_appliance(self.appliance_id, {"pb": pb.SerializeToString()}) + time.sleep(3) + + direction_entries = dashobj.asic_direction_lookup_table.get_keys() + assert direction_entries + fvs = dashobj.asic_direction_lookup_table[direction_entries[0]] + for fv in fvs.items(): + if fv[0] == "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION" + vip_entries = dashobj.asic_vip_table.get_keys() + assert vip_entries + fvs = dashobj.asic_vip_table[vip_entries[0]] + for fv in fvs.items(): + if fv[0] == "SAI_VIP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_VIP_ENTRY_ACTION_ACCEPT" + return dashobj + + def test_vnet(self, dvs): + dashobj = Dash(dvs) + self.vnet = "Vnet1" + self.vni = "45654" + self.guid = "559c6ce8-26ab-4193-b946-ccc6e8f930b2" + pb = Vnet() + pb.vni = int(self.vni) + pb.guid.value = bytes.fromhex(uuid.UUID(self.guid).hex) + dashobj.create_vnet(self.vnet, {"pb": pb.SerializeToString()}) + time.sleep(3) + vnets = dashobj.asic_dash_vnet_table.get_keys() + assert vnets + self.vnet_oid = vnets[0] + vnet_attr = dashobj.asic_dash_vnet_table[self.vnet_oid] + assert vnet_attr["SAI_VNET_ATTR_VNI"] == "45654" + return dashobj + + def test_eni(self, dvs): + dashobj = Dash(dvs) + self.vnet = "Vnet1" + self.mac_string = "F4939FEFC47E" + self.mac_address = "F4:93:9F:EF:C4:7E" + self.eni_id = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" + self.underlay_ip = "25.1.1.1" + self.admin_state = "enabled" + pb = Eni() + pb.eni_id = self.eni_id + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) + pb.admin_state = State.STATE_ENABLED + pb.vnet = self.vnet + dashobj.create_eni(self.mac_string, {"pb": pb.SerializeToString()}) + time.sleep(3) + vnets = dashobj.asic_dash_vnet_table.get_keys() + assert vnets + self.vnet_oid = vnets[0] + enis = dashobj.asic_eni_table.get_keys() + assert enis + self.eni_oid = enis[0]; + fvs = dashobj.asic_eni_table[enis[0]] + for fv in fvs.items(): + if fv[0] == "SAI_ENI_ATTR_VNET_ID": + assert fv[1] == str(self.vnet_oid) + if fv[0] == "SAI_ENI_ATTR_PPS": + assert fv[1] == 0 + if fv[0] == "SAI_ENI_ATTR_CPS": + assert fv[1] == 0 + if fv[0] == "SAI_ENI_ATTR_FLOWS": + assert fv[1] == 0 + if fv[0] == "SAI_ENI_ATTR_ADMIN_STATE": + assert fv[1] == "true" + + time.sleep(3) + eni_addr_maps = dashobj.asic_eni_ether_addr_map_table.get_keys() + assert eni_addr_maps + fvs = dashobj.asic_eni_ether_addr_map_table[eni_addr_maps[0]] + for fv in fvs.items(): + if fv[0] == "SAI_ENI_ETHER_ADDRESS_MAP_ENTRY_ATTR_ENI_ID": + assert fv[1] == str(self.eni_oid) + return dashobj + + def test_vnet_map(self, dvs): + dashobj = Dash(dvs) + self.vnet = "Vnet1" + self.ip1 = "10.1.1.1" + self.ip2 = "10.1.1.2" + self.mac_address = "F4:93:9F:EF:C4:7E" + self.routing_type = "vnet_encap" + self.underlay_ip = "101.1.2.3" + pb = VnetMapping() + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.action_type = RoutingType.ROUTING_TYPE_VNET_ENCAP + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) + + dashobj.create_vnet_map(self.vnet, self.ip1, {"pb": pb.SerializeToString()}) + dashobj.create_vnet_map(self.vnet, self.ip2, {"pb": pb.SerializeToString()}) + time.sleep(3) + + vnet_ca_to_pa_maps = dashobj.asic_dash_outbound_ca_to_pa_table.get_keys() + assert len(vnet_ca_to_pa_maps) >= 2 + fvs = dashobj.asic_dash_outbound_ca_to_pa_table[vnet_ca_to_pa_maps[0]] + for fv in fvs.items(): + if fv[0] == "SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP": + assert fv[1] == "101.1.2.3" + if fv[0] == "SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC": + assert fv[1] == "F4:93:9F:EF:C4:7E" + + vnet_pa_validation_maps = dashobj.asic_pa_validation_table.get_keys() + assert vnet_pa_validation_maps + fvs = dashobj.asic_pa_validation_table[vnet_pa_validation_maps[0]] + for fv in fvs.items(): + if fv[0] == "SAI_PA_VALIDATION_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_PA_VALIDATION_ENTRY_ACTION_PERMIT" + return dashobj + + def test_outbound_routing(self, dvs): + dashobj = Dash(dvs) + self.vnet = "Vnet1" + self.mac_string = "F4939FEFC47E" + self.ip = "10.1.0.0/24" + self.action_type = "vnet_direct" + self.overlay_ip= "10.0.0.6" + pb = Route() + pb.action_type = RoutingType.ROUTING_TYPE_VNET_DIRECT + pb.vnet_direct.vnet = self.vnet + pb.vnet_direct.overlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.overlay_ip))) + dashobj.create_outbound_routing(self.mac_string, self.ip, {"pb": pb.SerializeToString()}) + time.sleep(3) + + outbound_routing_entries = dashobj.asic_outbound_routing_table.get_keys() + assert outbound_routing_entries + fvs = dashobj.asic_outbound_routing_table[outbound_routing_entries[0]] + for fv in fvs.items(): + if fv[0] == "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET_DIRECT" + if fv[0] == "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_OVERLAY_IP": + assert fv[1] == "10.0.0.6" + assert "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID" in fvs + return dashobj + + def test_inbound_routing(self, dvs): + dashobj = Dash(dvs) + self.mac_string = "F4939FEFC47E" + self.vnet = "Vnet1" + self.vni = "3251" + self.ip = "10.1.1.1" + self.action_type = "decap" + self.pa_validation = "true" + self.priority = "1" + self.protocol = "0" + pb = RouteRule() +# pb.action_type = RoutingType.ROUTING_TYPE_DECAP + pb.pa_validation = True + pb.priority = int(self.priority) + pb.protocol = int(self.protocol) + pb.vnet = self.vnet + + dashobj.create_inbound_routing(self.mac_string, self.vni, self.ip, {"pb": pb.SerializeToString()}) + time.sleep(3) + + inbound_routing_entries = dashobj.asic_inbound_routing_rule_table.get_keys() + assert inbound_routing_entries + fvs = dashobj.asic_inbound_routing_rule_table[inbound_routing_entries[0]] + for fv in fvs.items(): + if fv[0] == "SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP_PA_VALIDATE" + return dashobj + + def test_cleanup(self, dvs): + dashobj = Dash(dvs) + self.vnet = "Vnet1" + self.mac_string = "F4939FEFC47E" + self.vni = "3251" + self.sip = "10.1.1.1" + self.dip = "10.1.0.0/24" + self.appliance_id = "100" + dashobj.remove_inbound_routing(self.mac_string, self.vni, self.sip) + dashobj.remove_outbound_routing(self.mac_string, self.dip) + dashobj.remove_eni(self.mac_string) + dashobj.remove_vnet_map(self.vnet, self.sip) + dashobj.remove_vnet(self.vnet) + dashobj.remove_appliance(self.appliance_id) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down +# before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_evpn_l3_vxlan.py b/tests/test_evpn_l3_vxlan.py index 7bcabacb6d..3f424f3830 100644 --- a/tests/test_evpn_l3_vxlan.py +++ b/tests/test_evpn_l3_vxlan.py @@ -35,6 +35,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): print ("\n\nTesting Create and Delete SIP Tunnel and VRF VNI Map entries") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -68,10 +69,10 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan VNI Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel VRF VNI Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -82,6 +83,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') print ("\tTesting Tunnel Vlan VNI Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -96,7 +98,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): # Test 2 - Create and Delete DIP Tunnel on adding and removing prefix route # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_prefix_route_create_dip_tunnel(self, dvs, testlog): + def test_prefix_route_create_tunnel(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -143,10 +145,10 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -160,17 +162,11 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): vxlan_obj.create_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', "Vlan100", "00:11:11:11:11:11", '1000') vxlan_obj.check_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') - print ("\tTesting DIP tunnel 7.7.7.7 creation") - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - print ("\tTest VRF IPv4 Route with Tunnel Nexthop Delete") vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') vxlan_obj.check_del_tunnel_nexthop(dvs, 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') - print ("\tTesting DIP tunnel 7.7.7.7 deletion") - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - print ("\tTesting Tunnel Vrf Map Entry removal") vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -180,6 +176,7 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -196,7 +193,7 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): # Test 3 - Create and Delete DIP Tunnel and Test IPv4 route and overlay nexthop add and delete # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_dip_tunnel_ipv4_routes(self, dvs, testlog): + def test_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -209,6 +206,7 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): print ("\n\nTesting IPv4 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -242,29 +240,14 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First DIP tunnel creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - - print ("\tTesting Second DIP tunnel creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '8.8.8.8') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension(dvs, '100', '8.8.8.8') - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vxlan_obj.vlan_id_map['100'], 2) @@ -326,6 +309,44 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj.check_vrf_routes_ecmp_nexthop_grp_del(dvs, 2) vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + print ("\n\nTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop Add and Delete") + vxlan_obj.fetch_exist_entries(dvs) + + ecmp_nexthop_attr = [ + ("nexthop", "7.7.7.7,8.8.8.8"), + ("ifname", "Vlan100,Vlan100"), + ("vni_label", "1000,1000"), + ("router_mac", "00:11:11:11:11:11,00:22:22:22:22:22"), + ] + + print ("\tTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop [7.7.7.7 , 8.8.8.8] Add") + vxlan_obj.create_vrf_route_ecmp(dvs, "80.80.1.0/24", 'Vrf-RED', ecmp_nexthop_attr) + + nh_count = 2 + ecmp_nhid_list = vxlan_obj.check_vrf_routes_ecmp(dvs, "80.80.1.0/24", 'Vrf-RED', tunnel_name, nh_count) + assert nh_count == len(ecmp_nhid_list) + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[0], '7.7.7.7', tunnel_name, '00:11:11:11:11:11', '1000') + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[1], '8.8.8.8', tunnel_name, '00:22:22:22:22:22', '1000') + + nh_count = 2 + vxlan_obj.create_vrf_route_ecmp(dvs, "90.90.1.0/24", 'Vrf-RED', ecmp_nexthop_attr) + ecmp_nhid_list = vxlan_obj.check_vrf_routes_ecmp(dvs, "90.90.1.0/24", 'Vrf-RED', tunnel_name, nh_count) + assert nh_count == len(ecmp_nhid_list) + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[0], '7.7.7.7', tunnel_name, '00:11:11:11:11:11', '1000') + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[1], '8.8.8.8', tunnel_name, '00:22:22:22:22:22', '1000') + + print ("\tTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop [7.7.7.7 , 8.8.8.8] Delete") + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.delete_vrf_route(dvs, "90.90.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_vrf_routes(dvs, "90.90.1.0/24", 'Vrf-RED') + helper.check_deleted_object(self.adb, vxlan_obj.ASIC_NEXT_HOP, ecmp_nhid_list[0]) + helper.check_deleted_object(self.adb, vxlan_obj.ASIC_NEXT_HOP, ecmp_nhid_list[1]) + + vxlan_obj.check_vrf_routes_ecmp_nexthop_grp_del(dvs, 2) + print ("\n\nTest VRF IPv4 Route with Tunnel Nexthop update from non-ECMP to ECMP") print ("\tTest VRF IPv4 Route with Tunnel Nexthop 7.7.7.7 Add") vxlan_obj.fetch_exist_entries(dvs) @@ -371,21 +392,12 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and DIP tunnel delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - - print ("\tTesting LastVlan removal and DIP tunnel delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '8.8.8.8') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -402,7 +414,7 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): # Test 4 - Create and Delete DIP Tunnel and Test IPv6 route and overlay nexthop add and delete # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_dip_tunnel_ipv6_routes(self, dvs, testlog): + def test_tunnel_ipv6_routes(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -415,6 +427,7 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): print ("\n\nTesting IPv6 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -449,28 +462,14 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First DIP tunnel creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - - print ("\tTesting Second DIP tunnel creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '8.8.8.8') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension(dvs, '100', '8.8.8.8') - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') vxlan_obj.fetch_exist_entries(dvs) print ("\tTesting VLAN 100 interface creation") @@ -579,21 +578,12 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and DIP tunnel delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - - print ("\tTesting LastVlan removal and DIP tunnel delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '8.8.8.8') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "2001::8/64") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) diff --git a/tests/test_evpn_l3_vxlan_p2mp.py b/tests/test_evpn_l3_vxlan_p2mp.py index 1bff4cce1e..f3041979eb 100644 --- a/tests/test_evpn_l3_vxlan_p2mp.py +++ b/tests/test_evpn_l3_vxlan_p2mp.py @@ -67,10 +67,10 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan VNI Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel VRF VNI Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -81,6 +81,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') print ("\tTesting Tunnel Vlan VNI Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -141,10 +142,10 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -172,6 +173,7 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -200,6 +202,7 @@ def test_remote_ipv4_routes(self, dvs, testlog): print ("\n\nTesting IPv4 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -233,27 +236,14 @@ def test_remote_ipv4_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First Remote end point to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting Second remote end point to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vxlan_obj.vlan_id_map['100'], 2) @@ -360,19 +350,12 @@ def test_remote_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and remote end point delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting LastVlan removal and remote end point delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -402,6 +385,7 @@ def test_remote_ipv6_routes(self, dvs, testlog): print ("\n\nTesting IPv6 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -436,27 +420,14 @@ def test_remote_ipv6_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First remote endpoint creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting Second remote endpoint creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - vxlan_obj.fetch_exist_entries(dvs) print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "2001::8/64") @@ -564,19 +535,12 @@ def test_remote_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and remote endpoint delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting LastVlan removal and remote endpoint delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "2001::8/64") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -589,6 +553,95 @@ def test_remote_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") + def test_prefix_route_create_on_l2_vni(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + helper = self.get_vxlan_helper() + + self.setup_db(dvs) + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + vrf_map_name = 'evpn_map_1000_Vrf-RED' + vxlan_obj.fetch_exist_entries(dvs) + + print ("\tCreate SIP Tunnel") + vlan_ids = vxlan_obj.helper.get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + vlan_oid = vxlan_obj.create_vlan(dvs,"Vlan100", vlan_ids) + vxlan_obj.check_vlan_obj(dvs, "100") + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + print ("\tCreate Vlan-VNI map") + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + print ("\tTesting VRF-VNI map in APP DB") + vxlan_obj.create_vrf(dvs, "Vrf-RED") + + vlanlist = ['100'] + vnilist = ['1000'] + + print ("\tTesting SIP Tunnel Creation") + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=1) + + print ("\tTesting VLAN 100 interface creation") + vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") + vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vlan_oid, 2) + + print ("\tTest if IPv4 Route with Tunnel Nexthop Add is not created") + vxlan_obj.create_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', "Vlan100", "00:11:11:11:11:11", '1000') + vxlan_obj.check_vrf_routes_absence(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + + print ("\tCreate Vlan-VNI map and VRF-VNI map") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + exp_attrs = [ + ("vni", "1000"), + ] + exp_attr = {} + for an in range(len(exp_attrs)): + exp_attr[exp_attrs[an][0]] = exp_attrs[an][1] + + helper.check_object(self.pdb, "VRF_TABLE", 'Vrf-RED', exp_attr) + + exp_attrs1 = [ + ("vni", "1000"), + ("vlan", "Vlan100"), + ] + exp_attr1 = {} + for an in range(len(exp_attrs1)): + exp_attr1[exp_attrs1[an][0]] = exp_attrs1[an][1] + + helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') + print ("\tTest VRF IPv4 Route with Tunnel Nexthop Add") + vxlan_obj.check_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + + print ("\tTest VRF IPv4 Route with Tunnel Nexthop Delete") + vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_tunnel_nexthop(dvs, 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + + print ("\tTesting Tunnel Vrf Map Entry removal") + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') + + print ("\tTesting Vlan 100 interface delete") + vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") + vxlan_obj.check_del_router_interface(dvs, "Vlan100") + + print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print ("\tTesting SIP Tunnel Deletion") + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + time.sleep(2) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") + vxlan_obj.remove_vlan(dvs, "100") # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_evpn_tunnel.py b/tests/test_evpn_tunnel.py index b58944f7ce..86f5ad53f6 100644 --- a/tests/test_evpn_tunnel.py +++ b/tests/test_evpn_tunnel.py @@ -59,6 +59,9 @@ def test_p2p_tunnel(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') @@ -161,3 +164,95 @@ def test_p2mp_tunnel_with_dip(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + vxlan_obj.remove_vlan(dvs, "102") + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vxlan_dip_tunnel_not_created(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.check_vlan_extension(dvs, '101', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete(dvs, '101', '7.7.7.7') + vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + + def test_invalid_vlan_extension(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_vrf(dvs, "Vrf-RED") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') + vxlan_obj.check_vlan_extension_not_created(dvs, '100', '7.7.7.7') + + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + print("Testing VLAN 100 extension") + vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') diff --git a/tests/test_evpn_tunnel_p2mp.py b/tests/test_evpn_tunnel_p2mp.py index 22f12f0beb..bbbb786f9a 100644 --- a/tests/test_evpn_tunnel_p2mp.py +++ b/tests/test_evpn_tunnel_p2mp.py @@ -57,6 +57,9 @@ def test_vlan_extension(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') @@ -122,3 +125,92 @@ def test_vlan_extension(self, dvs, testlog): vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + vxlan_obj.remove_vlan(dvs, "102") + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vlan_extension_not_created_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vlan_extension_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + + def test_invalid_vlan_extension(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_vrf(dvs, "Vrf-RED") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') + vxlan_obj.check_vlan_extension_not_created_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + print("Testing VLAN 100 extension") + vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) diff --git a/tests/test_fabric.py b/tests/test_fabric.py new file mode 100644 index 0000000000..2d1ea8c293 --- /dev/null +++ b/tests/test_fabric.py @@ -0,0 +1,83 @@ +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase +import ast +import json + +# Fabric counters +NUMBER_OF_RETRIES = 10 + +counter_group_meta = { + 'fabric_port_counter': { + 'key': 'FABRIC_PORT', + 'group_name': 'FABRIC_PORT_STAT_COUNTER', + 'name_map': 'COUNTERS_FABRIC_PORT_NAME_MAP', + 'post_test': 'post_port_counter_test', + }, + 'fabric_queue_counter': { + 'key': 'FABRIC_QUEUE', + 'group_name': 'FABRIC_QUEUE_STAT_COUNTER', + 'name_map': 'COUNTERS_FABRIC_QUEUE_NAME_MAP', + }, +} + +class TestVirtualChassis(object): + + def wait_for_id_list(self, flex_db, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) > 0: + return + else: + time.sleep(1) + + assert False, "No ID list for counter " + str(name) + + def verify_flex_counters_populated(self, flex_db, counters_db, map, stat): + counters_keys = counters_db.db_connection.hgetall(map) + for counter_entry in counters_keys.items(): + name = counter_entry[0] + oid = counter_entry[1] + self.wait_for_id_list(flex_db, stat, name, oid) + + def test_voq_switch(self, vst): + """Test VOQ switch objects configuration. + + This test validates configuration of switch creation objects required for + VOQ switches. The switch_type, max_cores and switch_id attributes configuration + are verified. For the System port config list, it is verified that all the + configured system ports are avaiable in the asic db by checking the count. + """ + + if vst is None: + return + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + flex_db = dvs.get_flex_db() + counters_db = dvs.get_counters_db() + for ct in counter_group_meta.keys(): + meta_data = counter_group_meta[ct] + counter_key = meta_data['key'] + counter_stat = meta_data['group_name'] + counter_map = meta_data['name_map'] + self.verify_flex_counters_populated(flex_db, counters_db, counter_map, counter_stat) + + port_counters_keys = counters_db.db_connection.hgetall(meta_data['name_map']) + port_counters_stat_keys = flex_db.get_keys("FLEX_COUNTER_TABLE:" + meta_data['group_name']) + for port_stat in port_counters_stat_keys: + assert port_stat in dict(port_counters_keys.items()).values(), "Non port created on PORT_STAT_COUNTER group: {}".format(port_stat) + else: + print( "We do not check switch type:", cfg_switch_type ) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fabric_port.py b/tests/test_fabric_port.py new file mode 100644 index 0000000000..a7ad9958b0 --- /dev/null +++ b/tests/test_fabric_port.py @@ -0,0 +1,48 @@ +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_link(self, vst): + """Test fabric link manual isolation commands in VOQ switch. + + By issuing config fabric port isolation command, the value + of isolateStatus field in config_db get changed. This test validates appl_db + updates of a fabric link isolateStatus as the value in config_db changed. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + # get config_db information + cdb = dvs.get_config_db() + + # set config_db to isolateStatus: True + cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) + cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) + + # check if appl_db value changes to isolateStatus: True + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "True"}) + + # cleanup + cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "False"}) + cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "False"}) + adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "False"}) + else: + print( "We do not check switch type:", cfg_switch_type ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + + diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py new file mode 100644 index 0000000000..d92cb73fe1 --- /dev/null +++ b/tests/test_fabric_port_isolation.py @@ -0,0 +1,65 @@ +import random +from dvslib.dvs_database import DVSDatabase +from dvslib.dvs_common import PollingConfig + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_link(self, vst): + """Test basic fabric link monitoring infrastructure in VOQ switchs. + + This test validates that fabric links get isolated if they experienced some errors. + And the link get unisolated if it clears the error for several consecutive polls. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config information and choose a linecard or fabric card to test. + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + # get state_db infor + sdb = dvs.get_state_db() + # key + port = "PORT1" + # There are 16 fabric ports in the test environment. + portNum = random.randint(1, 16) + port = "PORT"+str(portNum) + # wait for link monitoring algorithm skips init pollings + max_poll = PollingConfig(polling_interval=60, timeout=1200, strict=True) + if sdb.get_entry("FABRIC_PORT_TABLE", port)['STATUS'] == 'up': + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"SKIP_FEC_ERR_ON_LNKUP_CNT": "2"}, polling_config=max_poll) + try: + # clean up the system for the testing port. + # set TEST_CRC_ERRORS to 0 + # set TEST_CODE_ERRORS to 0 + # set TEST to "TEST" + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS":"0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "TEST"}) + # inject testing errors and wait for link get isolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + + # clear the testing errors and wait for link get unisolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + finally: + # cleanup + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "product"}) + else: + print("The link ", port, " is down") + else: + print("We do not check switch type:", cfg_switch_type) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fdb.py b/tests/test_fdb.py index 2f9067a599..06c79f230f 100644 --- a/tests/test_fdb.py +++ b/tests/test_fdb.py @@ -229,7 +229,7 @@ def test_FdbWarmRestartNotifications(self, dvs, testlog): dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index f5a0b146b2..f590b7748c 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -20,6 +20,11 @@ 'group_name': 'QUEUE_STAT_COUNTER', 'name_map': 'COUNTERS_QUEUE_NAME_MAP', }, + 'queue_watermark_counter': { + 'key': 'QUEUE_WATERMARK', + 'group_name': 'QUEUE_WATERMARK_STAT_COUNTER', + 'name_map': 'COUNTERS_QUEUE_NAME_MAP', + }, 'rif_counter': { 'key': 'RIF', 'group_name': 'RIF_STAT_COUNTER', @@ -37,6 +42,11 @@ 'group_name': 'PORT_BUFFER_DROP_STAT', 'name_map': 'COUNTERS_PORT_NAME_MAP', }, + 'pg_drop_counter': { + 'key': 'PG_DROP', + 'group_name': 'PG_DROP_STAT_COUNTER', + 'name_map': 'COUNTERS_PG_NAME_MAP', + }, 'pg_watermark_counter': { 'key': 'PG_WATERMARK', 'group_name': 'PG_WATERMARK_STAT_COUNTER', @@ -190,6 +200,10 @@ def set_flex_counter_group_interval(self, key, group, interval): self.config_db.create_entry("FLEX_COUNTER_TABLE", key, group_stats_entry) self.wait_for_interval_set(group, interval) + def set_only_config_db_buffers_field(self, value): + fvs = {'create_only_config_db_buffers' : value} + self.config_db.update_entry("DEVICE_METADATA", "localhost", fvs) + @pytest.mark.parametrize("counter_type", counter_group_meta.keys()) def test_flex_counters(self, dvs, counter_type): """ @@ -702,19 +716,43 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - def test_create_remove_buffer_pg_counter(self, dvs): + @pytest.mark.parametrize('counter_type', [('queue_counter'), ('pg_drop_counter')]) + def test_create_only_config_db_buffers_false(self, dvs, counter_type): """ Test steps: - 1. Enable PG flex counters. - 2. Configure new buffer prioriy group for a port - 3. Verify counter is automatically created - 4. Remove the new buffer prioriy group for the port - 5. Verify counter is automatically removed + 1. By default the configuration knob 'create_only_config_db_value' is missing. + 2. Get the counter OID for the interface 'Ethernet0:7' from the counters database. + 3. Perform assertions based on the 'create_only_config_db_value': + - If 'create_only_config_db_value' is 'false' or does not exist, assert that the counter OID has a valid OID value. Args: dvs (object): virtual switch object + counter_type (str): The type of counter being tested """ self.setup_dbs(dvs) + meta_data = counter_group_meta[counter_type] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:7') + assert counter_oid is not None, "Counter OID should have a valid OID value when create_only_config_db_value is 'false' or does not exist" + + def test_create_remove_buffer_pg_watermark_counter(self, dvs): + """ + Test steps: + 1. Reset config_db + 2. Set 'create_only_config_db_buffers' to 'true' + 3. Enable PG flex counters. + 4. Configure new buffer prioriy group for a port + 5. Verify counter is automatically created + 6. Remove the new buffer prioriy group for the port + 7. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + dvs.restart() + self.setup_dbs(dvs) + self.set_only_config_db_buffers_field('true') meta_data = counter_group_meta['pg_watermark_counter'] self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) @@ -727,6 +765,26 @@ def test_create_remove_buffer_pg_counter(self, dvs): self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + @pytest.mark.parametrize('counter_type', [('queue_counter'), ('pg_drop_counter')]) + def test_create_only_config_db_buffers_true(self, dvs, counter_type): + """ + Test steps: + 1. The 'create_only_config_db_buffers' was set to 'true' by previous test. + 2. Get the counter OID for the interface 'Ethernet0:7' from the counters database. + 3. Perform assertions based on the 'create_only_config_db_value': + - If 'create_only_config_db_value' is 'true', assert that the counter OID is None. + + Args: + dvs (object): virtual switch object + counter_type (str): The type of counter being tested + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta[counter_type] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:7') + assert counter_oid is None, "Counter OID should be None when create_only_config_db_value is 'true'" + def test_create_remove_buffer_queue_counter(self, dvs): """ Test steps: @@ -751,3 +809,37 @@ def test_create_remove_buffer_queue_counter(self, dvs): self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_watermark_queue_pg_counter(self, dvs): + """ + Test steps: + 1. Enable Queue/Watermark/PG-drop flex counters. + 2. Configure new buffer queue for a port + 3. Verify counters is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counters is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + + # set flex counter + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|7', {'profile': 'ingress_lossy_profile'}) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|7') + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_gearbox.py b/tests/test_gearbox.py index 7d5b568661..6707213990 100644 --- a/tests/test_gearbox.py +++ b/tests/test_gearbox.py @@ -70,6 +70,7 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): DVSDatabase.__init__(self, db_id, connector) self.gearbox = gearbox self.ports = {} + self.port_oid_to_intf_idx = {} self._wait_for_gb_asic_db_to_initialize() for connector in self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_CONNECTOR"): @@ -88,9 +89,31 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): if intf["system_lanes"] == system_lanes: assert intf["line_lanes"] == line_lanes self.ports[intf["index"]] = (system_port_oid, line_port_oid) + self.port_oid_to_intf_idx[system_port_oid] = (i, True) + self.port_oid_to_intf_idx[line_port_oid] = (i, False) assert len(self.ports) == len(self.gearbox.interfaces) + for serdes in self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_SERDES"): + fvs = self.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT_SERDES", serdes) + port_oid = fvs.get("SAI_PORT_SERDES_ATTR_PORT_ID") + intf_idx, is_system = self.port_oid_to_intf_idx[port_oid] + intf = self.gearbox.interfaces[ intf_idx ] + appl_db_key_prefix = 'system_' if is_system else 'line_' + for asic_db_key, appl_db_key_suffix in [ + ("SAI_PORT_SERDES_ATTR_TX_FIR_MAIN", "tx_fir_main"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE1", "tx_fir_pre1"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE2", "tx_fir_pre2"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE3", "tx_fir_pre3"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST1", "tx_fir_post1"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST2", "tx_fir_post2"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST3", "tx_fir_post3"), + ]: + if asic_db_key not in fvs: + continue + asic_db_value = fvs.get(asic_db_key).split(":")[-1] + assert intf[appl_db_key_prefix + appl_db_key_suffix] == asic_db_value + def _wait_for_gb_asic_db_to_initialize(self) -> None: """Wait up to 30 seconds for the default fields to appear in ASIC DB.""" def _verify_db_contents(): diff --git a/tests/test_hash.py b/tests/test_hash.py new file mode 100644 index 0000000000..b84dd91eaf --- /dev/null +++ b/tests/test_hash.py @@ -0,0 +1,292 @@ +import pytest +import logging + + +logging.basicConfig(level=logging.INFO) +hashlogger = logging.getLogger(__name__) + + +HASH_FIELD_LIST = [ + "DST_MAC", + "SRC_MAC", + "ETHERTYPE", + "IP_PROTOCOL", + "DST_IP", + "SRC_IP", + "L4_DST_PORT", + "L4_SRC_PORT" +] +INNER_HASH_FIELD_LIST = [ + "INNER_DST_MAC", + "INNER_SRC_MAC", + "INNER_ETHERTYPE", + "INNER_IP_PROTOCOL", + "INNER_DST_IP", + "INNER_SRC_IP", + "INNER_L4_DST_PORT", + "INNER_L4_SRC_PORT" +] +DEFAULT_HASH_FIELD_LIST = [ + "DST_MAC", + "SRC_MAC", + "ETHERTYPE", + "IN_PORT" +] +HASH_ALGORITHM = [ + "CRC", + "XOR", + "RANDOM", + "CRC_32LO", + "CRC_32HI", + "CRC_CCITT", + "CRC_XOR" +] + +SAI_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_DST_MAC", + "SAI_NATIVE_HASH_FIELD_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_IP_PROTOCOL", + "SAI_NATIVE_HASH_FIELD_DST_IP", + "SAI_NATIVE_HASH_FIELD_SRC_IP", + "SAI_NATIVE_HASH_FIELD_L4_DST_PORT", + "SAI_NATIVE_HASH_FIELD_L4_SRC_PORT" +] +SAI_INNER_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_INNER_DST_MAC", + "SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL", + "SAI_NATIVE_HASH_FIELD_INNER_DST_IP", + "SAI_NATIVE_HASH_FIELD_INNER_SRC_IP", + "SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT", + "SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT" +] +SAI_DEFAULT_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_DST_MAC", + "SAI_NATIVE_HASH_FIELD_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_IN_PORT" +] +SAI_HASH_ALGORITHM = [ + "SAI_HASH_ALGORITHM_CRC", + "SAI_HASH_ALGORITHM_XOR", + "SAI_HASH_ALGORITHM_RANDOM", + "SAI_HASH_ALGORITHM_CRC_32LO", + "SAI_HASH_ALGORITHM_CRC_32HI", + "SAI_HASH_ALGORITHM_CRC_CCITT", + "SAI_HASH_ALGORITHM_CRC_XOR" +] + + +@pytest.mark.usefixtures("dvs_hash_manager") +@pytest.mark.usefixtures("dvs_switch_manager") +class TestHashBasicFlows: + @pytest.fixture(scope="class") + def hashData(self, dvs_hash_manager): + hashlogger.info("Initialize HASH data") + + hashlogger.info("Verify HASH count") + self.dvs_hash.verify_hash_count(0) + + hashlogger.info("Get ECMP/LAG HASH id") + hashIdList = sorted(self.dvs_hash.get_hash_ids()) + + # Assumption: VS has only two HASH objects: ECMP, LAG + meta_dict = { + "ecmp": hashIdList[0], + "lag": hashIdList[1] + } + + yield meta_dict + + hashlogger.info("Deinitialize HASH data") + + @pytest.fixture(scope="class") + def switchData(self, dvs_switch_manager): + hashlogger.info("Initialize SWITCH data") + + hashlogger.info("Verify SWITCH count") + self.dvs_switch.verify_switch_count(0) + + hashlogger.info("Get SWITCH id") + switchIdList = self.dvs_switch.get_switch_ids() + + # Assumption: VS has only one SWITCH object + meta_dict = { + "id": switchIdList[0] + } + + yield meta_dict + + hashlogger.info("Deinitialize SWITCH data") + + @pytest.mark.parametrize( + "hash,field", [ + pytest.param( + "ecmp", + "ecmp_hash", + id="ecmp-hash" + ), + pytest.param( + "lag", + "lag_hash", + id="lag-hash" + ) + ] + ) + @pytest.mark.parametrize( + "hfList,saiHfList", [ + pytest.param( + ",".join(HASH_FIELD_LIST), + SAI_HASH_FIELD_LIST, + id="outer-frame" + ), + pytest.param( + ",".join(INNER_HASH_FIELD_LIST), + SAI_INNER_HASH_FIELD_LIST, + id="inner-frame" + ) + ] + ) + def test_HashSwitchGlobalConfiguration(self, hash, field, hfList, saiHfList, testlog, hashData): + attr_dict = { + field: hfList + } + + hashlogger.info("Update {} hash".format(hash.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + hashId = hashData[hash] + sai_attr_dict = { + "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": saiHfList + } + + hashlogger.info("Validate {} hash".format(hash.upper())) + self.dvs_hash.verify_hash_generic( + sai_hash_id=hashId, + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "hash,field", [ + pytest.param( + "ecmp", + "ecmp_hash", + id="ecmp-hash" + ), + pytest.param( + "lag", + "lag_hash", + id="lag-hash" + ) + ] + ) + def test_HashDefaultSwitchGlobalConfiguration(self, hash, field, testlog, hashData): + attr_dict = { + field: ",".join(DEFAULT_HASH_FIELD_LIST) + } + + hashlogger.info("Update {} hash".format(hash.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + hashId = hashData[hash] + sai_attr_dict = { + "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": SAI_DEFAULT_HASH_FIELD_LIST + } + + hashlogger.info("Validate {} hash".format(hash.upper())) + self.dvs_hash.verify_hash_generic( + sai_hash_id=hashId, + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "algorithm,attr,field", [ + pytest.param( + "ecmp", + "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM", + "ecmp_hash_algorithm", + id="ecmp-hash-algorithm" + ), + pytest.param( + "lag", + "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM", + "lag_hash_algorithm", + id="lag-hash-algorithm" + ) + ] + ) + @pytest.mark.parametrize( + "value", HASH_ALGORITHM + ) + def test_HashAlgorithmSwitchGlobalConfiguration(self, algorithm, attr, field, value, testlog, switchData): + attr_dict = { + field: value + } + + hashlogger.info("Update {} hash algorithm".format(algorithm.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + switchId = switchData["id"] + sai_attr_dict = { + attr: SAI_HASH_ALGORITHM[HASH_ALGORITHM.index(value)] + } + + hashlogger.info("Validate {} hash algorithm".format(algorithm.upper())) + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "algorithm,attr,field", [ + pytest.param( + "ecmp", + "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM", + "ecmp_hash_algorithm", + id="ecmp-hash-algorithm" + ), + pytest.param( + "lag", + "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM", + "lag_hash_algorithm", + id="lag-hash-algorithm" + ) + ] + ) + @pytest.mark.parametrize( + "value", [ "CRC" ] + ) + def test_HashDefaultAlgorithmSwitchGlobalConfiguration(self, algorithm, attr, field, value, testlog, switchData): + attr_dict = { + field: value + } + + hashlogger.info("Update {} hash algorithm".format(algorithm.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + switchId = switchData["id"] + sai_attr_dict = { + attr: SAI_HASH_ALGORITHM[HASH_ALGORITHM.index(value)] + } + + hashlogger.info("Validate {} hash algorithm".format(algorithm.upper())) + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_mclag_cfg.py b/tests/test_mclag_cfg.py index 0a79c767da..f93632bd37 100644 --- a/tests/test_mclag_cfg.py +++ b/tests/test_mclag_cfg.py @@ -35,7 +35,37 @@ def check_table_doesnt_exists(db, table, key): return True, error_info - +def create_mclag_domain(dvs, domain_id, source_ip, peer_ip, peer_link): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + fvs = swsscommon.FieldValuePairs([("source_ip", source_ip), + ("peer_ip", peer_ip), + ("peer_link", peer_link)]) + tbl.set(domain_id, fvs) + time.sleep(1) + +def remove_mclag_domain(dvs, domain_id): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + tbl._del(domain_id) + time.sleep(1) + +def add_mclag_domain_field(dvs, domain_id, field, value): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(domain_id, fvs) + time.sleep(1) + +def create_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + fvs = swsscommon.FieldValuePairs([("if_type", "PortChannel")]) + key_string = domain_id + "|" + mclag_interface + tbl.set(key_string, fvs) + time.sleep(1) + +def remove_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + key_string = domain_id + "|" + mclag_interface + tbl._del(key_string) + time.sleep(1) # Test MCLAG Configs class TestMclagConfig(object): @@ -66,173 +96,115 @@ class TestMclagConfig(object): # Testcase 1 Verify Configuration of MCLAG Domain with src, peer ip and peer link config gets updated in CONFIG_DB @pytest.mark.dev_sanity def test_mclag_cfg_domain_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() #cleanup existing entries - delete_table_keys(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE) - delete_table_keys(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE) + delete_table_keys(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE) + delete_table_keys(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE) - cmd_string ="config mclag add {} {} {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) - dvs.runcmd(cmd_string) + create_mclag_domain(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK) - ] + ] ) assert ok,error_info - # Testcase 2 Verify that second domain addition fails when there is already a domain configured - @pytest.mark.dev_sanity - def test_mclag_cfg_domain_add_2nd(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - cmd_string ="config mclag add {} {} {} {}".format(self.MCLAG_DOMAIN_2, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether second domain config is not added to config db - key_string = self.MCLAG_DOMAIN_2 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, key_string) - assert ok,error_info - - # Testcase 3 Verify Configuration of MCLAG Interface to existing domain @pytest.mark.dev_sanity def test_mclag_cfg_intf_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - cmd_string ="config mclag member add {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) - dvs.runcmd(cmd_string) + dvs.setup_db() + + create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) time.sleep(2) - + #check whether mclag interface config is reflected key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1 - ok,error_info = check_table_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info # Testcase 4 Verify remove and add mclag interface @pytest.mark.dev_sanity def test_mclag_cfg_intf_remove_and_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() - cmd_string ="config mclag member del {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) - dvs.runcmd(cmd_string) + remove_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) time.sleep(2) - + #check whether mclag interface is removed key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info #add different mclag interface - cmd_string ="config mclag member del {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE2) - dvs.runcmd(cmd_string) + create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE2) time.sleep(2) #check whether new mclag interface is added key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE2 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info # Testcase 5 Verify Configuration of valid values for session timeout @pytest.mark.dev_sanity def test_mclag_cfg_session_timeout_valid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() for value in self.MCLAG_SESS_TMOUT_VALID_LIST: - cmd_string ="config mclag session-timeout {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) + add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "session_timeout", value) + time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK), ("session_timeout",value) - ] + ] ) assert ok,error_info # Testcase 6 Verify Configuration of valid values for KA timer @pytest.mark.dev_sanity def test_mclag_cfg_ka_valid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() for value in self.MCLAG_KA_VALID_LIST: - cmd_string ="config mclag keepalive-interval {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) + add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "keepalive_interval", value) time.sleep(2) - #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK), ("keepalive_interval",value) - ] + ] ) assert ok,error_info - - # Testcase 7 Verify Configuration of invalid values for KA - @pytest.mark.dev_sanity - def test_mclag_cfg_ka_invalid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - for value in self.MCLAG_KA_INVALID_LIST: - cmd_string ="config mclag keepalive-interval {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether domain cfg table contents are same as configured values - found,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ - ("keepalive_interval",value) - ] - ) - assert found == False, "invalid keepalive value %s written to CONFIG_DB" % value - - # Testcase 8 Verify Configuration of invalid values for session timeout - @pytest.mark.dev_sanity - def test_mclag_cfg_session_invalid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - for value in self.MCLAG_SESS_TMOUT_INVALID_LIST: - cmd_string ="config mclag session-timeout {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether domain cfg table contents are same as configured values - found,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ - ("session_timeout",value) - ] - ) - assert found == False, "invalid keepalive value %s written to CONFIG_DB" % value - - # Testcase 9 Verify Deletion of MCLAG Domain + # Testcase 7 Verify Deletion of MCLAG Domain @pytest.mark.dev_sanity def test_mclag_cfg_domain_del(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() - cmd_string ="config mclag del {}".format(self.MCLAG_DOMAIN_ID) - dvs.runcmd(cmd_string) + remove_mclag_domain(dvs, self.MCLAG_DOMAIN_ID) time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok, error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID) + ok, error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID) assert ok,error_info #make sure mclag interface tables entries are also deleted when mclag domain is deleted - key_string = self.MCLAG_DOMAIN_ID - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + key_string = self.MCLAG_DOMAIN_ID + ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info - diff --git a/tests/test_mclag_fdb.py b/tests/test_mclag_fdb.py index a4e5ff0f9d..8252db8421 100644 --- a/tests/test_mclag_fdb.py +++ b/tests/test_mclag_fdb.py @@ -20,6 +20,10 @@ def create_entry_pst(db, table, key, pairs): tbl = swsscommon.ProducerStateTable(db, table) create_entry(tbl, key, pairs) +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + def delete_entry_pst(db, table, key): tbl = swsscommon.ProducerStateTable(db, table) tbl._del(key) @@ -32,6 +36,14 @@ def get_port_oid(dvs, port_name): return k[1] return None +def get_portchannel_oid(dvs, alias): + counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + lag_name_map_tbl = swsscommon.Table(counters_db, 'COUNTERS_LAG_NAME_MAP') + for k in lag_name_map_tbl.get('')[1]: + if k[0] == alias: + return k[1] + return None + def get_bridge_port_oid(dvs, port_oid): tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") for key in tbl.getKeys(): @@ -483,7 +495,77 @@ def test_mclagFdb_static_mac_dynamic_move_reject(dvs, testlog): "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", ) -# Test-12 Verify cleanup of the basic config. +# Test-12 Verify Remote to Local Move. + +@pytest.mark.dev_sanity +def test_mclagFdb_remote_to_local_mac_move_ntf(dvs, testlog): + dvs.setup_db() + + #Add remote MAC to MCLAG_FDB_TABLE on PortChannel0005 + create_entry_pst( + dvs.pdb, + "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + [ + ("port", "PortChannel0005"), + ("type", "dynamic"), + ] + ) + + # check that the FDB entry inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry not inserted to ASIC" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "true")] + ) + + assert ok, str(extra) + + mac = "3C:85:99:5E:00:01" + vlan_oid = dvs.getVlanOid("200") + switch_id = dvs.getSwitchOid() + port_oid = get_portchannel_oid(dvs, "PortChannel0008") + bp_port_oid = get_bridge_port_oid(dvs, port_oid) + + # send fdb_event SAI_FDB_EVENT_MOVE + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid+"\\\",\\\"mac\\\":\\\"3C:85:99:5E:00:01\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_MOVE\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_port_oid+"\"}]}]" + ntf.send("fdb_event", ntf_data, fvp) + + time.sleep(2) + + # check that the FDB entry was inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The fdb entry not inserted to ASIC" + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "false")] + ) + assert ok, str(extra) + + delete_entry_tbl( + dvs.sdb, + "FDB_TABLE", "Vlan200:3c:85:99:5e:00:01", + ) + + time.sleep(2) + + delete_entry_tbl( + dvs.adb, + "ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid+"\",\"mac\":\"3C:85:99:5E:00:01\",\"switch_id\":\""+switch_id+"\"}" + ) + + # check that the FDB entry was deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 0, "The MCLAG static fdb entry not deleted" + + delete_entry_pst( + dvs.pdb, + "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + ) + +# Test-13 Verify cleanup of the basic config. @pytest.mark.dev_sanity def test_mclagFdb_basic_config_del(dvs, testlog): diff --git a/tests/test_mux.py b/tests/test_mux.py index 71193735c9..207ec6741b 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -1,6 +1,7 @@ import time import pytest import json +import itertools from ipaddress import ip_network, ip_address, IPv4Address from swsscommon import swsscommon @@ -15,13 +16,16 @@ def create_fvs(**kwargs): class TestMuxTunnelBase(): APP_MUX_CABLE = "MUX_CABLE_TABLE" APP_NEIGH_TABLE = "NEIGH_TABLE" + APP_ROUTE_TABLE = "ROUTE_TABLE" APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" + APP_TUNNEL_ROUTE_TABLE_NAME = "TUNNEL_ROUTE_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" ASIC_NEIGH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" + ASIC_NHG_MEMBER_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" ASIC_FDB_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY" ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" @@ -95,7 +99,11 @@ class TestMuxTunnelBase(): TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} - + + def check_syslog(self, dvs, marker, err_log, expected_cnt): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() >= str(expected_cnt) + def create_vlan_interface(self, dvs): confdb = dvs.get_config_db() @@ -161,6 +169,34 @@ def get_vlan_rif_oid(self, asicdb): break return vlan_oid + + def get_nexthop_oid(self, asicdb, nexthop): + # gets nexthop oid + nexthop_keys = asicdb.get_keys(self.ASIC_NEXTHOP_TABLE) + + nexthop_oid = '' + for nexthop_key in nexthop_keys: + entry = asicdb.get_entry(self.ASIC_NEXTHOP_TABLE, nexthop_key) + if entry["SAI_NEXT_HOP_ATTR_IP"] == nexthop: + nexthop_oid = nexthop_key + break + + return nexthop_oid + + def get_route_nexthop_oid(self, route_key, asicdb): + # gets nexthop oid + entry = asicdb.get_entry(self.ASIC_ROUTE_TABLE, route_key) + assert 'SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID' in entry + + return entry['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID'] + + def check_tunnel_route_in_app_db(self, dvs, destinations, expected=True): + appdb = dvs.get_app_db() + + if expected: + appdb.wait_for_matching_keys(self.APP_TUNNEL_ROUTE_TABLE_NAME, destinations) + else: + appdb.wait_for_deleted_keys(self.APP_TUNNEL_ROUTE_TABLE_NAME, destinations) def check_neigh_in_asic_db(self, asicdb, ip, expected=True): rif_oid = self.get_vlan_rif_oid(asicdb) @@ -232,6 +268,18 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): assert num_tnl_nh == count + def check_route_nexthop(self, dvs_route, asicdb, route, nexthop, tunnel=False): + route_key = dvs_route.check_asicdb_route_entries([route]) + route_nexthop_oid = self.get_route_nexthop_oid(route_key[0], asicdb) + + if tunnel: + assert route_nexthop_oid == nexthop + return + + nexthop_oid = self.get_nexthop_oid(asicdb, nexthop) + + assert route_nexthop_oid == nexthop_oid + def add_neighbor(self, dvs, ip, mac): if ip_address(ip).version == 6: dvs.runcmd("ip -6 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") @@ -260,6 +308,34 @@ def del_fdb(self, dvs, mac): time.sleep(1) + def add_route(self, dvs, route, nexthops, ifaces=[]): + apdb = dvs.get_app_db() + if len(nexthops) > 1: + nexthop_str = ",".join(nexthops) + if len(ifaces) == 0: + ifaces = [self.VLAN_1000 for k in range(len(nexthops))] + iface_str = ",".join(ifaces) + else: + nexthop_str = str(nexthops[0]) + if len(ifaces) == 0: + iface_str = self.VLAN_1000 + else: + iface_str = ifaces[0] + + ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", nexthop_str), + ("ifname", iface_str) + ] + ) + ps.set(route, fvs) + + def del_route(self, dvs, route): + apdb = dvs.get_app_db() + ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) + ps._del(route) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") @@ -271,9 +347,6 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01") srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) - self.add_neighbor(dvs, self.SERV1_SOC_IPV4, "00:00:00:00:00:01") - self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) - existing_keys = asicdb.get_keys(self.ASIC_NEIGH_TABLE) self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") @@ -287,7 +360,7 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): ) # The first standby route also creates as tunnel Nexthop - self.check_tnl_nexthop_in_asic_db(asicdb, 4) + self.check_tnl_nexthop_in_asic_db(asicdb, 3) # Change state to Standby. This will delete Neigh and add Route self.set_mux_state(appdb, "Ethernet0", "standby") @@ -297,8 +370,6 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): dvs_route.check_asicdb_route_entries( [self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK] ) - self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) - dvs_route.check_asicdb_deleted_route_entries([self.SERV1_SOC_IPV4+self.IPV4_MASK]) # Change state to Active. This will add Neigh and delete Route self.set_mux_state(appdb, "Ethernet4", "active") @@ -309,6 +380,35 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) + def create_and_test_soc(self, appdb, asicdb, dvs, dvs_route): + + self.set_mux_state(appdb, "Ethernet0", "active") + + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-00-01") + self.add_neighbor(dvs, self.SERV1_SOC_IPV4, "00:00:00:00:00:01") + + time.sleep(1) + + srv1_soc_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) + self.check_tunnel_route_in_app_db(dvs, [self.SERV1_SOC_IPV4+self.IPV4_MASK], expected=False) + + self.set_mux_state(appdb, "Ethernet0", "standby") + + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_soc_v4) + dvs_route.check_asicdb_route_entries( + [self.SERV1_SOC_IPV4+self.IPV4_MASK] + ) + self.check_tunnel_route_in_app_db(dvs, [self.SERV1_SOC_IPV4+self.IPV4_MASK], expected=False) + + marker = dvs.add_log_marker() + + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet0", "active") + self.check_syslog(dvs, marker, "Maintaining current MUX state", 1) + + self.set_mux_state(appdb, "Ethernet0", "init") + self.check_syslog(dvs, marker, "State transition from active to init is not-handled", 1) + def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") @@ -413,92 +513,356 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - # Test ECMP routes - - self.set_mux_state(appdb, "Ethernet0", "active") - self.set_mux_state(appdb, "Ethernet4", "active") - - rtprefix = "5.6.7.0/24" - - dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - - ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - - fvs = swsscommon.FieldValuePairs( - [ - ("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), - ("ifname", "Vlan1000,Vlan1000") - ] - ) - - ps.set(rtprefix, fvs) - - # Check if route was propagated to ASIC DB - rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + rtprefix + + " " + self.SERV1_IPV4 + "\"" + ) - # Check for nexthop group and validate nexthop group member in asic db - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) + def multi_nexthop_check(self, asicdb, dvs_route, route, nexthops, mux_states, non_mux_nexthop = None): + if isinstance(route, list): + route_copy = route.copy() + else: + route_copy = [route] - # Step: 1 - Change one NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet0", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) + for r in route_copy: + if non_mux_nexthop != None: + self.check_route_nexthop(dvs_route, asicdb, r, non_mux_nexthop) + continue + for i,state in enumerate(mux_states): + # Find first active mux port, and check that route points to that neighbor + if state == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, r, nexthops[i]) + break + else: + # If no active mux port, check that route points to tunnel + self.check_route_nexthop(dvs_route, asicdb, r, tunnel_nh_id, True) + + def multi_nexthop_test_create(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop = None): + ''' + Tests the creation of a route with multiple nexthops in various combinations of initial mux state + ''' + init_mux_states = list(itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test create route in various combos of mux nexthop states for route with multiple nexthops") + for states in init_mux_states: + print("Create route with mux ports: %s in states: %s" % (str(mux_ports), str(states))) + # Set mux states + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, states[i]) + + # Add route + if non_mux_nexthop != None: + self.add_route(dvs, route, nexthops + [non_mux_nexthop]) + else: + self.add_route(dvs, route, nexthops) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, states, non_mux_nexthop) + + self.del_route(dvs, route) + + def multi_nexthop_test_fdb(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs): + ''' + Tests fbd updates for mux neighbors + ''' + init_mux_states = list(itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test fdb update on route with multiple mux nexthops for various mux states") + for states in init_mux_states: + print("Testing fdb update in states: %s, for nexthops: %s" % (str(states), str(nexthops))) + + # Set mux states + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, states[i]) + + for i,nexthop in enumerate(nexthops): + print("Triggering fdb update for %s" % (nexthop)) + # only supports testing up to 9 nexhops at the moment + self.add_neighbor(dvs, nexthop, "00:aa:bb:cc:dd:0%d" % (i)) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, states) + + # Reset fdb + self.add_neighbor(dvs, nexthop, macs[i]) + + def multi_nexthop_test_toggle(self, appdb, asicdb, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests toggling mux state for a route with multiple nexthops + ''' + init_mux_states = list(list(tup) for tup in itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test toggling mux state for route with multiple mux nexthops") + for states in init_mux_states: + print("Testing state change in states: %s, for nexthops: %s" % (str(states), str(nexthops))) + for i,port in enumerate(mux_ports): + if nexthops[i] == non_mux_nexthop: + continue + self.set_mux_state(appdb, port, states[i]) + + for toggle_index,toggle_port in enumerate(mux_ports): + if nexthops[toggle_index] == non_mux_nexthop: + continue + new_states = states.copy() + + print("Toggling %s from %s" % (toggle_port, states[toggle_index])) + + if states[toggle_index] == ACTIVE: + new_states[toggle_index] = STANDBY + self.set_mux_state(appdb, toggle_port, STANDBY) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) + + new_states[toggle_index] = ACTIVE + self.set_mux_state(appdb, toggle_port, ACTIVE) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) + else: + new_states[toggle_index] = ACTIVE + self.set_mux_state(appdb, toggle_port, ACTIVE) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) - # Step: 2 - Change the other NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + new_states[toggle_index] = STANDBY + self.set_mux_state(appdb, toggle_port, STANDBY) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) - # Step: 3 - Change one NH to back to Active and verify ecmp route + # Set everything back to active + for i,port in enumerate(mux_ports): + if nexthops[i] == non_mux_nexthop: + continue + self.set_mux_state(appdb, port, ACTIVE) + + def multi_nexthop_test_route_update_keep_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, new_nexthop, new_mux_port, nh_is_mux=True): + ''' + Tests route update for a route with multiple nexthops with same number of nexthops + - nh_is_mux: is True if new nexthop is a mux nexthop, False if not + ''' + # Add route + self.add_route(dvs, route, nexthops) + + print("Test route update for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + new_nexthops = nexthops.copy() + new_muxports = mux_ports.copy() + + print("Triggering route update %s to replace: %s with: %s" % (str(new_nexthops), str(nexthop), str(new_nexthop))) + new_nexthops[i] = new_nexthop + new_muxports[i] = new_mux_port + + if nh_is_mux: + # We need to sort the nexthops to match the way they will pe processed + new_nexthops.sort() + new_muxports.sort() + + self.add_route(dvs, route, new_nexthops) + + if nh_is_mux: + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, new_muxports, new_nexthops) + else: + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, new_muxports, new_nexthops, non_mux_nexthop=new_nexthop) + + # Reset route + self.add_route(dvs, route, nexthops) + + self.del_route(dvs, route) + + def multi_nexthop_test_route_update_increase_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests route update for a route with multiple nexthops increasing number of nexthops over time + ''' + print("Test route update for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + print("Triggering route update to add: %s. new route %s -> %s" % (str(nexthop), route, nexthops[:i+1])) + self.add_route(dvs, route, nexthops[:i+1]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports[:i+1], nexthops[:i+1]) + + # Add non_mux_nexthop to route list + if non_mux_nexthop != None: + print("Triggering route update to add non_mux: %s. new route %s -> %s" % (str(non_mux_nexthop), route, nexthops + [non_mux_nexthop])) + self.add_route(dvs, route, nexthops + [non_mux_nexthop]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports + [None], nexthops + [non_mux_nexthop], non_mux_nexthop=non_mux_nexthop) + + self.del_route(dvs, route) + + def multi_nexthop_test_route_update_decrease_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests route update for a route with multiple nexthops increasing number of nexthops over time + ''' + print("Test route update for route with multiple mux nexthops") + + if non_mux_nexthop != None: + print("Triggering route update to add non_mux: %s. new route %s -> %s" % (str(non_mux_nexthop), route, [non_mux_nexthop] + nexthops)) + self.add_route(dvs, route, [non_mux_nexthop] + nexthops) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, [None] + mux_ports, [non_mux_nexthop] + nexthops, non_mux_nexthop=non_mux_nexthop) + + for i,nexthop in enumerate(nexthops): + print("Triggering route update to remove: %s. new route %s -> %s" % (str(nexthop), route, nexthops[i:])) + self.add_route(dvs, route, nexthops[i:]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports[i:], nexthops[i:]) + + self.del_route(dvs, route) + + def multi_nexthop_test_neighbor_add(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs): + ''' + Tests adding neighbors for a route with multiple nexthops + ''' + print("Test adding neighbors for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + print("Triggering neighbor add for %s" % (nexthop)) + self.add_neighbor(dvs, nexthop, macs[i]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports, nexthops) + + def multi_nexthop_test_neighbor_del(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops): + ''' + Tests deleting neighbors for a route with multiple nexthops + ''' + print("Test setting 0 mac neighbors for route with multiple mux nexthops") + for nexthop in nexthops: + print("Triggering neighbor del for %s" % (nexthop)) + self.add_neighbor(dvs, nexthop, "00:00:00:00:00:00") + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports, nexthops) + + def create_and_test_multi_nexthop_routes(self, dvs, dvs_route, appdb, macs, new_mac, asicdb): + ''' + Tests case where there are multiple nexthops tied to a route + If the nexthops are tied to a mux, then only the first active neighbor will be programmed + If not, the route should point to a regular ECMP group + ''' + + route_ipv4 = "2.3.4.0/24" + route_ipv6 = "2023::/64" + route_B_ipv4 = "2.3.5.0/24" + route_B_ipv6 = "2024::/64" + ipv4_nexthops = [self.SERV1_IPV4, self.SERV2_IPV4] + ipv6_nexthops = [self.SERV1_IPV6, self.SERV2_IPV6] + new_ipv4_nexthop = self.SERV3_IPV4 + new_ipv6_nexthop = self.SERV3_IPV6 + non_mux_ipv4 = "11.11.11.11" + non_mux_ipv6 = "2222::100" + non_mux_mac = "00:aa:aa:aa:aa:aa" + mux_ports = ["Ethernet0", "Ethernet4"] + new_mux_port = "Ethernet8" + + for i,mac in enumerate(macs): + self.add_neighbor(dvs, ipv4_nexthops[i], mac) + self.add_neighbor(dvs, ipv6_nexthops[i], mac) + + self.add_neighbor(dvs, new_ipv4_nexthop, new_mac) + self.add_neighbor(dvs, new_ipv6_nexthop, new_mac) + self.add_neighbor(dvs, non_mux_ipv4, non_mux_mac) + self.add_neighbor(dvs, non_mux_ipv6, non_mux_mac) + + for port in mux_ports: + self.set_mux_state(appdb, port, ACTIVE) + self.set_mux_state(appdb, new_mux_port, ACTIVE) + + try: + # These tests create route: + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_ipv4) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_ipv6) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, new_ipv4_nexthop, new_mux_port) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, new_ipv6_nexthop, new_mux_port) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_ipv4, None, nh_is_mux=False) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_ipv6, None, nh_is_mux=False) + self.multi_nexthop_test_route_update_increase_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_nexthop=non_mux_ipv4) + self.multi_nexthop_test_route_update_increase_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_nexthop=non_mux_ipv6) + self.multi_nexthop_test_route_update_decrease_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_nexthop=non_mux_ipv4) + self.multi_nexthop_test_route_update_decrease_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_nexthop=non_mux_ipv6) + + # # These tests do not create route, so create beforehand: + self.add_route(dvs, route_ipv4, ipv4_nexthops) + self.add_route(dvs, route_ipv6, ipv6_nexthops) + self.add_route(dvs, route_B_ipv4, ipv4_nexthops) + self.add_route(dvs, route_B_ipv6, ipv6_nexthops) + + self.multi_nexthop_test_fdb(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops, macs) + self.multi_nexthop_test_fdb(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops, macs) + self.multi_nexthop_test_neighbor_add(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops, macs) + self.multi_nexthop_test_neighbor_add(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops, macs) + self.multi_nexthop_test_neighbor_del(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops) + self.multi_nexthop_test_neighbor_del(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops) + finally: + # Cleanup + self.del_route(dvs,route_ipv4) + self.del_route(dvs,route_B_ipv4) + self.del_route(dvs,route_ipv6) + self.del_route(dvs,route_B_ipv6) + for neighbor in ipv4_nexthops: + self.del_neighbor(dvs, neighbor) + for neighbor in ipv6_nexthops: + self.del_neighbor(dvs, neighbor) + self.del_neighbor(dvs, new_ipv4_nexthop) + self.del_neighbor(dvs, new_ipv6_nexthop) + + def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): + ''' + Tests case where neighbor is removed in standby and added in active with route + ''' + nh_route = "2.2.2.0/24" + nh_route_ipv6 = "2023::/64" + neigh_ip = self.SERV1_IPV4 + neigh_ipv6 = self.SERV1_IPV6 + apdb = dvs.get_app_db() + + # Setup self.set_mux_state(appdb, "Ethernet0", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) - - # Step: 4 - Change the other NH to Active and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + apdb.wait_for_entry("ROUTE_TABLE", nh_route) + apdb.wait_for_entry("ROUTE_TABLE", nh_route_ipv6) - ps._del(rtprefix) + rtkeys = dvs_route.check_asicdb_route_entries([nh_route]) + rtkeys_ipv6 = dvs_route.check_asicdb_route_entries([nh_route_ipv6]) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) - # Test IPv6 ECMP routes and start with standby config + # Set state to standby and delete neighbor self.set_mux_state(appdb, "Ethernet0", "standby") - self.set_mux_state(appdb, "Ethernet4", "standby") - - rtprefix = "2020::/64" - - dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - - ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - - fvs = swsscommon.FieldValuePairs( - [ - ("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), - ("ifname", "tun0,tun0") - ] - ) - - ps.set(rtprefix, fvs) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) - # Check if route was propagated to ASIC DB - rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ip) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ipv6) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) - # Check for nexthop group and validate nexthop group member in asic db - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) - # Step: 1 - Change one NH to active and verify ecmp route + # Set state to active, learn neighbor again self.set_mux_state(appdb, "Ethernet0", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) - - # Step: 2 - Change the other NH to active and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) - # Step: 3 - Change one NH to back to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet0", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + self.check_neigh_in_asic_db(asicdb, neigh_ip) + self.check_neigh_in_asic_db(asicdb, neigh_ipv6) - # Step: 4 - Change the other NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + dvs.runcmd( + "ip neigh flush " + neigh_ip + ) + dvs.runcmd( + "ip neigh flush " + neigh_ipv6 + ) - ps._del(rtprefix) + # Cleanup + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { @@ -517,46 +881,54 @@ def create_and_test_acl(self, appdb, dvs_acl): dvs_acl.verify_no_acl_rules() - # Set one mux port to standby, verify ACL rule with inport bitmap (1 port) + # Set mux port in active-active cable type, no ACL rules programmed self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0"], dvs_acl) + dvs_acl.verify_no_acl_rules() + + # Set one mux port to standby, verify ACL rule with inport bitmap (1 port) + self.set_mux_state(appdb, "Ethernet4", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Set two mux ports to standby, verify ACL rule with inport bitmap (2 ports) - self.set_mux_state(appdb, "Ethernet4", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet8", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - # Set one mux port to active, verify ACL rule with inport bitmap (1 port) self.set_mux_state(appdb, "Ethernet0", "active") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - # Set last mux port to active, verify ACL rule is deleted + # Set one mux port to active, verify ACL rule with inport bitmap (1 port) self.set_mux_state(appdb, "Ethernet4", "active") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet8"], dvs_acl) + dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) + + # Set last mux port to active, verify ACL rule is deleted + self.set_mux_state(appdb, "Ethernet8", "active") dvs_acl.verify_no_acl_rules() # Set unknown state and verify the behavior as standby - self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify change while setting unknown from active - self.set_mux_state(appdb, "Ethernet4", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet8", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - self.set_mux_state(appdb, "Ethernet0", "active") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "active") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify no change while setting unknown from standby - self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) def create_and_test_metrics(self, appdb, statedb): @@ -655,6 +1027,8 @@ def create_and_test_peer(self, asicdb, tc_to_dscp_map_oid=None, tc_to_queue_map_ assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_ENCAP_TTL_MODE": assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" + elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": + assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" elif field == "SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION": assert value == "SAI_PACKET_ACTION_DROP" elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP": @@ -663,6 +1037,8 @@ def create_and_test_peer(self, asicdb, tc_to_dscp_map_oid=None, tc_to_queue_map_ assert value == tc_to_queue_map_oid elif field == "SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE": assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" + elif field == "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE": + assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" else: assert False, "Field %s is not tested" % field @@ -1050,12 +1426,38 @@ def test_Route(self, dvs, dvs_route, testlog): self.create_and_test_route(appdb, asicdb, dvs, dvs_route) + def test_NH(self, dvs, dvs_route, intf_fdb_map, setup, setup_mux_cable, + setup_peer_switch, setup_tunnel, testlog): + """ test NH routes and mux state change """ + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + mac = intf_fdb_map["Ethernet0"] + + # get tunnel nexthop + self.check_tnl_nexthop_in_asic_db(asicdb, 5) + + self.create_and_test_NH_routes(appdb, asicdb, dvs, dvs_route, mac) + + def test_multi_nexthop(self, dvs, dvs_route, intf_fdb_map, neighbor_cleanup, testlog, setup): + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"]] + new_mac = intf_fdb_map["Ethernet8"] + + self.create_and_test_multi_nexthop_routes(dvs, dvs_route, appdb, macs, new_mac, asicdb) + def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - self.create_and_test_acl(appdb, dvs_acl) + try: + self.create_and_test_acl(appdb, dvs_acl) + finally: + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "active") + dvs_acl.verify_no_acl_rules() def test_mux_metrics(self, dvs, testlog): """ test metrics for mux state change """ @@ -1089,6 +1491,37 @@ def test_neighbor_miss( expected_mac=mac if exp_result[REAL_MAC] else '00:00:00:00:00:00' ) + def test_neighbor_miss_no_mux( + self, dvs, dvs_route, setup_vlan, setup_tunnel, setup, + setup_peer_switch, neighbor_cleanup, testlog + ): + config_db = dvs.get_config_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + test_ip = self.SERV1_SOC_IPV4 + self.ping_ip(dvs, test_ip) + + # no mux present, no standalone tunnel route installed + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=False) + + # setup the mux + config_db = dvs.get_config_db() + self.create_mux_cable(config_db) + # tunnel route should be installed immediately after mux setup + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # set port state as standby + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # set port state as active + self.set_mux_state(appdb, "Ethernet0", "active") + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # clear the FAILED neighbor + self.clear_neighbors(dvs) + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=False) + def test_neighbor_miss_no_peer( self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, remove_peer_switch, neighbor_cleanup, testlog @@ -1105,6 +1538,48 @@ def test_neighbor_miss_no_peer( for ip in test_ips: self.check_neighbor_state(dvs, dvs_route, ip, expect_route=False) + def test_soc_ip(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + + self.create_and_test_soc(appdb, asicdb, dvs, dvs_route) + + def test_warm_boot_mux_state( + self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, + remove_peer_switch, neighbor_cleanup, testlog + ): + """ + test mux initialization during warm boot. + """ + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + apdb = dvs.get_app_db() + + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "standby") + + # Execute the warm reboot + dvs.runcmd("config warm_restart enable swss") + dvs.stop_swss() + dvs.start_swss() + + time.sleep(5) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet0") + for key in fvs: + if key == "state": + assert fvs[key] == "active", "Ethernet0 Mux state is not active after warm boot, state: {}".format(fvs[key]) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet4") + for key in fvs: + if key == "state": + assert fvs[key] == "active", "Ethernet4 Mux state is not active after warm boot, state: {}".format(fvs[key]) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet8") + for key in fvs: + if key == "state": + assert fvs[key] == "standby", "Ethernet8 Mux state is not standby after warm boot, state: {}".format(fvs[key]) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nhg.py b/tests/test_nhg.py index aab088deb2..6647a8d0de 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -149,6 +149,41 @@ def flap_intf(self, i, status): assert bool(fvs) assert fvs["oper_status"] == status + # BFD utilities for static route BFD and ecmp acceleration -- begin + def get_exist_bfd_session(self): + return set(self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")) + + def create_bfd_session(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.app_db.db_connection, "BFD_SESSION_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_bfd_session(self, key): + tbl = swsscommon.ProducerStateTable(self.app_db.db_connection, "BFD_SESSION_TABLE") + tbl._del(key) + + def check_asic_bfd_session_value(self, key, expected_values): + fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def check_state_bfd_session_value(self, key, expected_values): + fvs = self.state_db.get_entry("BFD_SESSION_TABLE", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def update_bfd_session_state(self, dvs, session, state): + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + ntf = swsscommon.NotificationProducer(self.asic_db.db_connection, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+session+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + # BFD utilities for static route BFD and ecmp acceleration -- end + def init_test(self, dvs, num_intfs): self.dvs = dvs self.app_db = self.dvs.get_app_db() @@ -952,6 +987,57 @@ def test_route_nhg(self, ordered_ecmp, dvs, dvs_route, testlog): else: assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # BFD: test validate/invalidate nexthop group member when bfd state changes -- begin + bfdSessions = self.get_exist_bfd_session() + # Create BFD session + fieldValues = {"local_addr": "10.0.0.2"} + self.create_bfd_session("default:default:10.0.0.3", fieldValues) + time.sleep(1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + session = createdSessions.pop() + + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.3", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.2"} + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Down") + time.sleep(1) + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Down" + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + #check nexthop group member is removed + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + assert len(keys) == 2 + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(1) + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + #check nexthop group member is added back + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + assert len(keys) == 3 + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.3") + self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + # BFD: test validate/invalidate nexthop group member when bfd state changes -- end + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) diff --git a/tests/test_pbh.py b/tests/test_pbh.py index 270e59d429..65401a3ea9 100644 --- a/tests/test_pbh.py +++ b/tests/test_pbh.py @@ -130,6 +130,7 @@ def test_PbhTablePortChannelBinding(self, testlog): self.dvs_lag.get_and_verify_port_channel(0) +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhBasicFlows: def test_PbhHashFieldCreationDeletion(self, testlog): try: @@ -162,12 +163,12 @@ def test_PbhHashCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -205,7 +206,7 @@ def test_PbhRuleCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -247,7 +248,7 @@ def test_PbhRuleCreationDeletion(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -255,6 +256,7 @@ def test_PbhRuleCreationDeletion(self, testlog): self.dvs_pbh.verify_pbh_hash_field_count(0) +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhBasicEditFlows: def test_PbhRuleUpdate(self, testlog): try: @@ -273,7 +275,7 @@ def test_PbhRuleUpdate(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -319,7 +321,7 @@ def test_PbhRuleUpdate(self, testlog): flow_counter="ENABLED" ) - hash_id = self.dvs_pbh.get_pbh_hash_ids(1)[0] + hash_id = self.dvs_hash.get_hash_ids(1)[0] counter_id = self.dvs_acl.get_acl_counter_ids(1)[0] sai_attr_dict = { @@ -352,7 +354,7 @@ def test_PbhRuleUpdate(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -377,7 +379,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -463,7 +465,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -475,6 +477,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): test_flex_counters.post_trap_flow_counter_test(meta_data) +@pytest.mark.usefixtures("dvs_hash_manager") @pytest.mark.usefixtures("dvs_lag_manager") class TestPbhExtendedFlows: class PbhRefCountHelper(object): @@ -596,13 +599,13 @@ def create_hash(self, meta_dict, pbh_ref_count): hash_field_list=meta_dict["hash_field_list"] ) pbh_ref_count.incPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def remove_hash(self, meta_dict, pbh_ref_count): pbhlogger.info("Remove PBH hash: {}".format(meta_dict["name"])) self.dvs_pbh.remove_pbh_hash(meta_dict["name"]) pbh_ref_count.decPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def create_table(self, meta_dict, pbh_ref_count): pbhlogger.info("Create PBH table: {}".format(meta_dict["name"])) @@ -909,6 +912,7 @@ def test_PbhNvgreVxlanConfiguration(self, testlog, pbh_nvgre, pbh_vxlan): pass +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhDependencyFlows: def test_PbhHashCreationDeletionWithDependencies(self, testlog): try: @@ -918,7 +922,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -928,7 +932,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -938,7 +942,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) def test_PbhRuleCreationDeletionWithDependencies(self, testlog): @@ -949,7 +953,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -959,7 +963,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH rule attr_dict = { @@ -1009,7 +1013,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) diff --git a/tests/test_port.py b/tests/test_port.py index c63dae5c57..3853a61ffe 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -186,6 +186,7 @@ def test_PortFec(self, dvs, testlog): for fv in fvs: if fv[0] == "SAI_PORT_ATTR_FEC_MODE": assert fv[1] == "SAI_PORT_FEC_MODE_RS" + assert fv[0] != "SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE" def test_PortPreemp(self, dvs, testlog): @@ -277,6 +278,33 @@ def test_PortIpredriver(self, dvs, testlog): if fv[0] == "SAI_PORT_ATTR_SERDES_IPREDRIVER": assert fv[1] == ipre_val_asic + def test_PortHostif(self, dvs): + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF") + host_intfs = atbl.getKeys() + for intf in host_intfs: + status, fvs = atbl.get(intf) + assert status, "Error getting value for key" + attributes = dict(fvs) + hostif_queue = attributes.get("SAI_HOSTIF_ATTR_QUEUE") + assert hostif_queue == "7" + + def test_PortHostTxSignalSet(self, dvs, testlog): + adb = dvs.get_asic_db() + statedb = dvs.get_state_db() + + transceiver_info_tbl = swsscommon.Table(statedb.db_connection, "TRANSCEIVER_INFO") + fvs = swsscommon.FieldValuePairs([("supported_max_tx_power","N/A")]) + transceiver_info_tbl.set("Ethernet0", fvs) + + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + transceiver_info_tbl.hdel("Ethernet0", "supported_max_tx_power") + expected_fields = {"SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE":"false"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port_add_remove.py b/tests/test_port_add_remove.py old mode 100755 new mode 100644 index bfc3074d83..54cd6599c9 --- a/tests/test_port_add_remove.py +++ b/tests/test_port_add_remove.py @@ -248,4 +248,278 @@ def test_add_remove_all_the_ports(self, dvs, testlog, scenario): dvs.set_interface_status(PORT_B, port_admin_b) dvs.remove_vlan_member("6", PORT_A) dvs.remove_vlan_member("6", PORT_B) + dvs.remove_ip_address("Vlan6", "6.6.6.1/24") dvs.remove_vlan("6") + + +@pytest.mark.usefixtures("dynamic_buffer") +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveDup(object): + def test_add_remove_with_dup_lanes(self, testlog, dvs): + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + state_db = dvs.get_state_db() + + # set mmu size + fvs = {"mmu_size": "12766208"} + state_db.create_entry("BUFFER_MAX_PARAM_TABLE", "global", fvs) + + # get port count + port_count = len(self.dvs_port.get_port_ids()) + + # get port info + port_info = config_db.get_entry("PORT", PORT_A) + + # remove buffer pg cfg for the port + pgs = config_db.get_keys("BUFFER_PG") + buffer_pgs = {} + for key in pgs: + if PORT_A in key: + buffer_pgs[key] = config_db.get_entry("BUFFER_PG", key) + config_db.delete_entry("BUFFER_PG", key) + app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", key.replace(config_db.separator, app_db.separator)) + + # remove buffer queue cfg for the port + queues = config_db.get_keys("BUFFER_QUEUE") + buffer_queues = {} + for key in queues: + if PORT_A in key: + buffer_queues[key] = config_db.get_entry("BUFFER_QUEUE", key) + config_db.delete_entry("BUFFER_QUEUE", key) + app_db.wait_for_deleted_entry("BUFFER_QUEUE_TABLE", key.replace(config_db.separator, app_db.separator)) + + # shutdown port + dvs.port_admin_set(PORT_A, "down") + + # remove port + self.dvs_port.remove_port_generic(PORT_A) + self.dvs_port.verify_port_count(port_count-1) + + # make port config with duplicate lanes + dup_lanes = port_info["lanes"] + dup_lanes += ",{}".format(port_info["lanes"].split(",")[-1]) + + # add port + self.dvs_port.create_port_generic(PORT_A, dup_lanes, port_info["speed"]) + self.dvs_port.verify_port_count(port_count) + + # shutdown port + dvs.port_admin_set(PORT_A, "down") + + # remove port + self.dvs_port.remove_port_generic(PORT_A) + self.dvs_port.verify_port_count(port_count-1) + + # make port config + port_lanes = port_info.pop("lanes") + port_speed = port_info.pop("speed") + + # re-add port + self.dvs_port.create_port_generic(PORT_A, port_lanes, port_speed, port_info) + self.dvs_port.verify_port_count(port_count) + + # re-add buffer pg and queue cfg to the port + for key, pg in buffer_pgs.items(): + config_db.update_entry("BUFFER_PG", key, pg) + app_db.wait_for_entry("BUFFER_PG_TABLE", key.replace(config_db.separator, app_db.separator)) + + for key, queue in buffer_queues.items(): + config_db.update_entry("BUFFER_QUEUE", key, queue) + app_db.wait_for_entry("BUFFER_QUEUE_TABLE", key.replace(config_db.separator, app_db.separator)) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidMandatoryParam(object): + @pytest.mark.parametrize( + "port,lanes,speed", [ + pytest.param("Ethernet1000", "", "10000", id="empty-lanes-list"), + pytest.param("Ethernet1004", "1004,x,1006,1007", "10000", id="invalid-lanes-list"), + pytest.param("Ethernet1008", "1008,1009,1010,1011", "", id="empty-speed"), + pytest.param("Ethernet1012", "1012,1013,1014,1015", "invalid", id="invalid-speed"), + pytest.param("Ethernet1016", "1016,1017,1018,1019", "0", id="out-of-range-speed") + ] + ) + def test_add_remove_neg(self, testlog, port, lanes, speed): + # get port count + port_asicdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)) + port_appdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + + # add port + self.dvs_port.create_port_generic(port, lanes, speed) + self.dvs_port.verify_port_count(port_appdb_count+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(port) + self.dvs_port.verify_port_count(port_appdb_count, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidSerdesParam(object): + @pytest.fixture(scope="class") + def port_attr(self): + meta_dict = { + "port": "Ethernet1000", + "lanes": "1000,1001,1002,1003", + "speed": "100000", + "port_asicdb_count": len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)), + "port_appdb_count": len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + } + yield meta_dict + + def verify_add_remove(self, attr, qualifiers): + # add port + self.dvs_port.create_port_generic(attr["port"], attr["lanes"], attr["speed"], qualifiers) + self.dvs_port.verify_port_count(attr["port_appdb_count"]+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(attr["port_asicdb_count"], self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(attr["port"]) + self.dvs_port.verify_port_count(attr["port_appdb_count"], self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(attr["port_asicdb_count"], self.dvs_port.ASIC_DB) + + @pytest.mark.parametrize( + "serdes", [ + pytest.param("preemphasis", id="preemphasis"), + pytest.param("idriver", id="idriver"), + pytest.param("ipredriver", id="ipredriver"), + pytest.param("pre1", id="pre1"), + pytest.param("pre2", id="pre2"), + pytest.param("pre3", id="pre3"), + pytest.param("main", id="main"), + pytest.param("post1", id="post1"), + pytest.param("post2", id="post2"), + pytest.param("post3", id="post3"), + pytest.param("attn", id="attn") + ] + ) + def test_add_remove_neg(self, testlog, port_attr, serdes): + qualifiers = { serdes: "" } + self.verify_add_remove(port_attr, qualifiers) + + qualifiers = { serdes: "invalid" } + self.verify_add_remove(port_attr, qualifiers) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidParam(object): + def verify_add_remove(self, qualifiers): + port = "Ethernet1000" + lanes = "1000,1001,1002,1003" + speed = "100000" + + # get port count + port_asicdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)) + port_appdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + + # add port + self.dvs_port.create_port_generic(port, lanes, speed, qualifiers) + self.dvs_port.verify_port_count(port_appdb_count+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(port) + self.dvs_port.verify_port_count(port_appdb_count, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + def test_add_remove_neg_alias(self, testlog): + qualifiers = { "alias": "" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_index(self, testlog): + qualifiers = { "index": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "index": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_autoneg(self, testlog): + qualifiers = { "autoneg": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "autoneg": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_adv_speeds(self, testlog): + qualifiers = { "adv_speeds": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_speeds": "0" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_speeds": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_interface_type(self, testlog): + qualifiers = { "interface_type": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "interface_type": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_adv_interface_types(self, testlog): + qualifiers = { "adv_interface_types": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_interface_types": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_fec(self, testlog): + qualifiers = { "fec": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "fec": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_mtu(self, testlog): + qualifiers = { "mtu": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "mtu": "0" } + self.verify_add_remove(qualifiers) + + qualifiers = { "mtu": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_tpid(self, testlog): + qualifiers = { "tpid": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "tpid": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_pfc_asym(self, testlog): + qualifiers = { "pfc_asym": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "pfc_asym": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_learn_mode(self, testlog): + qualifiers = { "learn_mode": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "learn_mode": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_link_training(self, testlog): + qualifiers = { "link_training": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "link_training": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_role(self, testlog): + qualifiers = { "role": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "role": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_admin_status(self, testlog): + qualifiers = { "admin_status": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "admin_status": "invalid" } + self.verify_add_remove(qualifiers) diff --git a/tests/test_port_an.py b/tests/test_port_an.py index 9c004aa790..5356d2e837 100644 --- a/tests/test_port_an.py +++ b/tests/test_port_an.py @@ -258,7 +258,7 @@ def test_PortAutoNegWarm(self, dvs, testlog): dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) @@ -311,6 +311,48 @@ def test_PortAutoNegRemoteAdvSpeeds(self, dvs, testlog): assert status == True assert "rmt_adv_speeds" in [fv[0] for fv in fvs] + def test_PortAdvWithoutAutoneg(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set autoneg = off + fvs = swsscommon.FieldValuePairs([("autoneg", "off")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + fvs = swsscommon.FieldValuePairs([("adv_speeds", "100,1000"), + ("adv_interface_types", "CR2,CR4")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_AUTO_NEG_MODE" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_SPEED" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_AUTO_NEG_MODE": + assert fv[1] == "false" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_SPEED": + assert fv[1] == "2:100,1000" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE": + assert fv[1] == "2:SAI_PORT_INTERFACE_TYPE_CR2,SAI_PORT_INTERFACE_TYPE_CR4" + + # set admin up + cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + ctbl.set("Ethernet0", cfvs) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_port_fec_override.py b/tests/test_port_fec_override.py new file mode 100644 index 0000000000..08d960116d --- /dev/null +++ b/tests/test_port_fec_override.py @@ -0,0 +1,46 @@ +import time +import os +import pytest + +from swsscommon import swsscommon + +DVS_ENV = ["HWSKU=Mellanox-SN2700"] + +class TestPort(object): + def test_PortFecOverride(self, dvs, testlog): + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + adb = dvs.get_asic_db() + + ptbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + + # set fec + fvs = swsscommon.FieldValuePairs([("fec","auto")]) + ptbl.set("Ethernet0", fvs) + fvs = swsscommon.FieldValuePairs([("fec","rs")]) + ptbl.set("Ethernet4", fvs) + + # validate if fec none is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_FEC_MODE":"SAI_PORT_FEC_MODE_NONE", "SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE":"false"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + # validate if fec rs is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet4"] + expected_fields = {"SAI_PORT_ATTR_FEC_MODE":"SAI_PORT_FEC_MODE_RS", "SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + fvs = swsscommon.FieldValuePairs([("fec","none")]) + ptbl.set("Ethernet0", fvs) + ptbl.set("Ethernet4", fvs) + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_FEC_MODE":"SAI_PORT_FEC_MODE_NONE", "SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + port_oid = adb.port_name_map["Ethernet4"] + expected_fields = {"SAI_PORT_ATTR_FEC_MODE":"SAI_PORT_FEC_MODE_NONE", "SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_port_lt.py b/tests/test_port_lt.py index 3ec51ed68b..0da6abb071 100644 --- a/tests/test_port_lt.py +++ b/tests/test_port_lt.py @@ -103,7 +103,7 @@ def test_PortLinkTrainingWarm(self, dvs, testlog): assert exitcode == 0 # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 39c6c717ca..905b0dacaa 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -3,6 +3,32 @@ from swsscommon import swsscommon +CFG_TC_TO_DSCP_MAP_TABLE_NAME = "TC_TO_DSCP_MAP" +CFG_TC_TO_DSCP_MAP_KEY = "AZURE" +TC_TO_DSCP_MAP = { + "0": "20", + "1": "16", + "2": "5", + "3": "43", + "4": "34", + "5": "52", + "6": "61", + "7": "17", +} + +CFG_TC_TO_DOT1P_MAP_TABLE_NAME = "TC_TO_DOT1P_MAP" +CFG_TC_TO_DOT1P_MAP_KEY = "AZURE" +TC_TO_DOT1P_MAP = { + "0": "0", + "1": "6", + "2": "5", + "3": "3", + "4": "4", + "5": "2", + "6": "1", + "7": "7", +} + CFG_DOT1P_TO_TC_MAP_TABLE_NAME = "DOT1P_TO_TC_MAP" CFG_DOT1P_TO_TC_MAP_KEY = "AZURE" DOT1P_TO_TC_MAP = { @@ -32,9 +58,166 @@ CFG_PORT_QOS_MAP_TABLE_NAME = "PORT_QOS_MAP" CFG_PORT_QOS_DOT1P_MAP_FIELD = "dot1p_to_tc_map" CFG_PORT_QOS_MPLS_TC_MAP_FIELD = "mpls_tc_to_tc_map" +CFG_PORT_QOS_TC_DOT1P_MAP_FIELD = "tc_to_dot1p_map" +CFG_PORT_QOS_TC_DSCP_MAP_FIELD = "tc_to_dscp_map" CFG_PORT_TABLE_NAME = "PORT" +#Tests for TC-to-DSCP qos map configuration +class TestTcDscp(object): + def connect_dbs(self, dvs): + self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + def create_tc_dscp_profile(self): + tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DSCP_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs(list(TC_TO_DSCP_MAP.items())) + tbl.set(CFG_TC_TO_DSCP_MAP_KEY, fvs) + time.sleep(1) + + def find_tc_dscp_profile(self): + found = False + tc_dscp_map_raw = None + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST": + tc_dscp_map_raw = fv[1] + elif fv[0] == "SAI_QOS_MAP_ATTR_TYPE" and fv[1] == "SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP": + found = True + + if found: + break + + assert found == True + + return (key, tc_dscp_map_raw) + + def apply_tc_dscp_profile_on_all_ports(self): + tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_TC_DSCP_MAP_FIELD, CFG_TC_TO_DSCP_MAP_KEY)]) + ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() + for port in ports: + tbl.set(port, fvs) + + time.sleep(1) + + + def test_tc_dscp_cfg(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dscp_profile() + _, tc_dscp_map_raw = self.find_tc_dscp_profile() + + tc_dscp_map = json.loads(tc_dscp_map_raw) + for tc2dscp in tc_dscp_map['list']: + tc_val = str(tc2dscp['key']['tc']) + dscp_val = str(tc2dscp['value']['dscp']) + assert dscp_val == TC_TO_DSCP_MAP[tc_val] + + def test_port_tc_dscp(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dscp_profile() + oid, _ = self.find_tc_dscp_profile() + + self.apply_tc_dscp_profile_on_all_ports() + + cnt = 0 + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP": + cnt += 1 + assert fv[1] == oid + + port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) + assert port_cnt == cnt + + +#Tests for TC-to-Dot1p qos map configuration +class TestTcDot1p(object): + def connect_dbs(self, dvs): + self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + def create_tc_dot1p_profile(self): + tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DOT1P_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs(list(TC_TO_DOT1P_MAP.items())) + tbl.set(CFG_TC_TO_DOT1P_MAP_KEY, fvs) + time.sleep(1) + + def find_tc_dot1p_profile(self): + found = False + tc_dot1p_map_raw = None + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST": + tc_dot1p_map_raw = fv[1] + elif fv[0] == "SAI_QOS_MAP_ATTR_TYPE" and fv[1] == "SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DOT1P": + found = True + + if found: + break + + assert found == True + + return (key, tc_dot1p_map_raw) + + def apply_tc_dot1p_profile_on_all_ports(self): + tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_TC_DOT1P_MAP_FIELD, CFG_TC_TO_DOT1P_MAP_KEY)]) + ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() + for port in ports: + tbl.set(port, fvs) + + time.sleep(1) + + + def test_tc_dot1p_cfg(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dot1p_profile() + _, tc_dot1p_map_raw = self.find_tc_dot1p_profile() + + tc_dot1p_map = json.loads(tc_dot1p_map_raw) + for tc2dot1p in tc_dot1p_map['list']: + tc_val = str(tc2dot1p['key']['tc']) + dot1p_val = str(tc2dot1p['value']['dot1p']) + assert dot1p_val == TC_TO_DOT1P_MAP[tc_val] + + def test_port_tc_dot1p(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dot1p_profile() + oid, _ = self.find_tc_dot1p_profile() + + self.apply_tc_dot1p_profile_on_all_ports() + + cnt = 0 + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DOT1P_MAP": + cnt += 1 + assert fv[1] == oid + + port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) + assert port_cnt == cnt +#Tests for Dot1p-to-TC qos map configuration class TestDot1p(object): def connect_dbs(self, dvs): self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) diff --git a/tests/test_route.py b/tests/test_route.py index ed96a34bde..dfa6d04cc4 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -1035,6 +1035,84 @@ def test_PerfAddRemoveRoute(self, dvs, testlog): dvs.servers[1].runcmd("ip route del default dev eth0") dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0") +class TestFpmSyncResponse(TestRouteBase): + @pytest.fixture + def setup(self, dvs): + self.setup_db(dvs) + + # create l3 interface + self.create_l3_intf("Ethernet0", "") + # set ip address + self.add_ip_address("Ethernet0", "10.0.0.0/31") + # bring up interface + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + + dvs.runcmd("ping -c 1 10.0.0.1") + + yield + + # remove ip address and default route + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + # bring interface down + self.set_admin_status("Ethernet0", "down") + # remove ip address + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + # remove l3 interface + self.remove_l3_intf("Ethernet0") + + def is_offloaded(self, dvs, route): + rc, output = dvs.runcmd(f"vtysh -c 'show ip route {route} json'") + assert rc == 0 + + route_entry = json.loads(output) + return bool(route_entry[route][0].get('offloaded')) + + @pytest.mark.xfail(reason="Requires VS docker update in https://github.com/sonic-net/sonic-buildimage/pull/12853") + @pytest.mark.parametrize("suppress_state", ["enabled", "disabled"]) + def test_offload(self, suppress_state, setup, dvs): + route = "1.1.1.0/24" + + # enable route suppression + rc, _ = dvs.runcmd(f"config suppress-fib-pending {suppress_state}") + assert rc == 0, "Failed to configure suppress-fib-pending" + + time.sleep(5) + + try: + rc, _ = dvs.runcmd("bash -c 'kill -SIGSTOP $(pidof orchagent)'") + assert rc == 0, "Failed to suspend orchagent" + + rc, _ = dvs.runcmd(f"ip route add {route} via 10.0.0.1 proto bgp") + assert rc == 0, "Failed to configure route" + + time.sleep(5) + + if suppress_state == 'disabled': + assert self.is_offloaded(dvs,route), f"{route} is expected to be offloaded (suppression is {suppress_state})" + return + + assert not self.is_offloaded(dvs, route), f"{route} is expected to be not offloaded (suppression is {suppress_state})" + + rc, _ = dvs.runcmd("bash -c 'kill -SIGCONT $(pidof orchagent)'") + assert rc == 0, "Failed to resume orchagent" + + def check_offloaded(): + return (self.is_offloaded(dvs, route), None) + + wait_for_result(check_offloaded, failure_message=f"{route} is expected to be offloaded after orchagent resume") + finally: + dvs.runcmd("bash -c 'kill -SIGCONT $(pidof orchagent)'") + dvs.runcmd(f"ip route del {route}") + + # make sure route suppression is disabled + dvs.runcmd("config suppress-fib-pending disabled") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_sflow.py b/tests/test_sflow.py index f6ab6a3c13..25e3a8eaf9 100644 --- a/tests/test_sflow.py +++ b/tests/test_sflow.py @@ -253,6 +253,93 @@ def test_Teardown(self, dvs, testlog): self.cdb.delete_entry("SFLOW", "global") self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", 0) + def test_globalSetSampleDir(self, dvs, testlog): + self.setup_sflow(dvs) + + # Verify that the session is up first + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + self.cdb.update_entry("SFLOW", "global", {"sample_direction": "both"}) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW", "global", {"sample_direction": "tx"}) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.delete_entry("SFLOW", "global") + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + def test_globalAllSetDir(self, dvs, testlog): + self.setup_sflow(dvs) + # Verify that the session is up first + port_oid = self.adb.port_name_map["Ethernet0"] + self.cdb.update_entry("SFLOW_SESSION", "all", {"sample_direction": "both"}) + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW_SESSION", "all", {"sample_direction": "tx"}) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.delete_entry("SFLOW", "global") + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + def test_InterfaceSetDir(self, dvs, testlog): + self.setup_sflow(dvs) + + # Get the global session info as a baseline + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = ["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + fvs = self.adb.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + global_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + + # Then create the interface session + session_params = {"admin_state": "up", "sample_rate": "1000", "sample_direction": "both"} + self.cdb.create_entry("SFLOW_SESSION", "Ethernet0", session_params) + + # Verify that the new interface session has been created and is different from the global one + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": global_session} + fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": global_session} + fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + local_ing_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + local_egr_session = fvs["SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE"] + + self.cdb.update_entry("SFLOW_SESSION", "Ethernet0", {"sample_direction": "tx"}) + + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": local_egr_session} + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW_SESSION", "Ethernet0", {"sample_direction": "rx"}) + + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_ing_session} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + # interface config higher precedence then global/all. Changing all sample-dir should not affect existing interface config + self.cdb.create_entry("SFLOW_SESSION", "all", {"admin_state": "up", "sample_direction": "both"}) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + # interface delete will set fallback to all (sample-direction) if enabled. + self.cdb.delete_entry("SFLOW_SESSION", "Ethernet0") + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_ing_session}) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_egr_session}) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_srv6.py b/tests/test_srv6.py index dddb10153b..3ce19421b0 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -36,6 +36,29 @@ def create_vrf(self, vrf_name): def remove_vrf(self, vrf_name): self.cdb.delete_entry("VRF", vrf_name) + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def add_neighbor(self, interface, ip, mac, family): + table = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + fvs = swsscommon.FieldValuePairs([("neigh", mac), + ("family", family)]) + tbl.set(interface + ":" + ip, fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_neighbor(self, interface, ip): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + tbl._del(interface + ":" + ip) + time.sleep(1) + def create_mysid(self, mysid, fvs): table = "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY" existed_entries = get_exist_entries(self.adb.db_connection, table) @@ -50,6 +73,43 @@ def remove_mysid(self, mysid): tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_MY_SID_TABLE") tbl._del(mysid) + def create_l3_intf(self, interface, vrf_name): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_l3_intf(self, interface): + self.cdb.delete_entry("INTERFACE", interface) + + def get_nexthop_id(self, ip_address): + next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for next_hop_entry in next_hop_entries: + (status, fvs) = tbl.get(next_hop_entry) + + assert status == True + assert len(fvs) == 3 + + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_IP" and fv[1] == ip_address: + return next_hop_entry + + return None + + def set_interface_status(self, dvs, interface, admin_status): + tbl_name = "PORT" + tbl = swsscommon.Table(self.cdb.db_connection, tbl_name) + fvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + tbl.set(interface, fvs) + time.sleep(1) + def test_mysid(self, dvs, testlog): self.setup_db(dvs) @@ -57,6 +117,13 @@ def test_mysid(self, dvs, testlog): mysid1='16:8:8:8:baba:2001:10::' mysid2='16:8:8:8:baba:2001:20::' mysid3='16:8:8:8:fcbb:bb01:800::' + mysid4='16:8:8:8:baba:2001:40::' + mysid5='32:16:16:0:fc00:0:1:e000::' + mysid6='32:16:16:0:fc00:0:1:e001::' + mysid7='32:16:16:0:fc00:0:1:e002::' + mysid8='32:16:16:0:fc00:0:1:e003::' + mysid9='32:16:16:0:fc00:0:1:e004::' + mysid10='32:16:16:0:fc00:0:1:e005::' # create MySID END fvs = swsscommon.FieldValuePairs([('action', 'end')]) @@ -107,25 +174,293 @@ def test_mysid(self, dvs, testlog): elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + # create MySID END.DT4 with default vrf + fvs = swsscommon.FieldValuePairs([('action', 'end.dt4'), ('vrf', 'default')]) + key = self.create_mysid(mysid4, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "baba:2001:40::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert True + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4" + + # create interface + self.create_l3_intf("Ethernet104", "") + + # Assign IP to interface + self.add_ip_address("Ethernet104", "2001::2/126") + self.add_ip_address("Ethernet104", "192.0.2.2/30") + + # create neighbor + self.add_neighbor("Ethernet104", "2001::1", "00:00:00:01:02:04", "IPv6") + self.add_neighbor("Ethernet104", "192.0.2.1", "00:00:00:01:02:05", "IPv4") + + # get nexthops + next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + assert len(next_hop_entries) == 2 + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for next_hop_entry in next_hop_entries: + (status, fvs) = tbl.get(next_hop_entry) + + assert status == True + assert len(fvs) == 3 + + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_IP": + if fv[1] == "2001::1": + next_hop_ipv6_id = next_hop_entry + elif fv[1] == "192.0.2.1": + next_hop_ipv4_id = next_hop_entry + else: + assert False, "Nexthop IP %s not expected" % fv[1] + + assert next_hop_ipv6_id is not None + assert next_hop_ipv4_id is not None + + # create MySID END.X + fvs = swsscommon.FieldValuePairs([('action', 'end.x'), ('adj', '2001::1')]) + key = self.create_mysid(mysid5, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e000::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX4 + fvs = swsscommon.FieldValuePairs([('action', 'end.dx4'), ('adj', '192.0.2.1')]) + key = self.create_mysid(mysid6, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e001::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv4_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX6 + fvs = swsscommon.FieldValuePairs([('action', 'end.dx6'), ('adj', '2001::1')]) + key = self.create_mysid(mysid7, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e002::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID uA + fvs = swsscommon.FieldValuePairs([('action', 'ua'), ('adj', '2001::1')]) + key = self.create_mysid(mysid8, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e003::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID uDX4 + fvs = swsscommon.FieldValuePairs([('action', 'udx4'), ('adj', '192.0.2.1')]) + key = self.create_mysid(mysid9, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e004::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv4_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX6 + fvs = swsscommon.FieldValuePairs([('action', 'udx6'), ('adj', '2001::1')]) + key = self.create_mysid(mysid10, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e005::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + # delete MySID self.remove_mysid(mysid1) self.remove_mysid(mysid2) self.remove_mysid(mysid3) + self.remove_mysid(mysid4) + self.remove_mysid(mysid5) + self.remove_mysid(mysid6) + self.remove_mysid(mysid7) + self.remove_mysid(mysid8) + self.remove_mysid(mysid9) + self.remove_mysid(mysid10) # remove vrf self.remove_vrf("VrfDt46") + # remove nexthop + self.remove_neighbor("Ethernet104", "2001::1") + self.remove_neighbor("Ethernet104", "192.0.2.1") + + # Reemove IP from interface + self.remove_ip_address("Ethernet104", "2001::2/126") + self.remove_ip_address("Ethernet104", "192.0.2.2/30") + + self.remove_l3_intf("Ethernet104") + + def test_mysid_l3adj(self, dvs, testlog): + self.setup_db(dvs) + + # create MySID entries + mysid1='32:16:16:0:fc00:0:1:e000::' + + # create interface + self.create_l3_intf("Ethernet104", "") + + # assign IP to interface + self.add_ip_address("Ethernet104", "2001::2/64") + + time.sleep(3) + + # bring up Ethernet104 + self.set_interface_status(dvs, "Ethernet104", "up") + + time.sleep(3) + + # save the initial number of entries in MySID table + initial_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + + # save the initial number of entries in Nexthop table + initial_next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + + # create MySID END.X, neighbor does not exist yet + fvs = swsscommon.FieldValuePairs([('action', 'end.x'), ('adj', '2001::1')]) + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_MY_SID_TABLE") + tbl.set(mysid1, fvs) + + time.sleep(2) + + # check the current number of entries in MySID table + # since the neighbor does not exist yet, we expect the SID has not been installed (i.e., we + # expect the same number of MySID entries as before) + exist_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + assert len(exist_my_sid_entries) == len(initial_my_sid_entries) + + # now, let's create the neighbor + self.add_neighbor("Ethernet104", "2001::1", "00:00:00:01:02:04", "IPv6") + + # verify that the nexthop is created in the ASIC (i.e., we have the previous number of next hop entries + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_next_hop_entries) + 1) + + # get the new nexthop and nexthop ID, which will be used later to verify the MySID entry + next_hop_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", initial_next_hop_entries) + assert next_hop_entry is not None + next_hop_id = self.get_nexthop_id("2001::1") + assert next_hop_id is not None + + # now the neighbor has been created in the ASIC, we expect the MySID entry to be created in the ASIC as well + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(initial_my_sid_entries) + 1) + my_sid_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", initial_my_sid_entries) + assert my_sid_entry is not None + + # check ASIC MySID database and verify the SID + mysid = json.loads(my_sid_entry) + assert mysid is not None + assert mysid["sid"] == "fc00:0:1:e000::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid_entry) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # remove neighbor + self.remove_neighbor("Ethernet104", "2001::1") + + # delete MySID + self.remove_mysid(mysid1) + + # # verify that the nexthop has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_next_hop_entries)) + + # check the current number of entries in MySID table + # since the MySID has been removed, we expect the SID has been removed from the ASIC as well + exist_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + assert len(exist_my_sid_entries) == len(initial_my_sid_entries) + + # remove IP from interface + self.remove_ip_address("Ethernet104", "2001::2/64") + + # remove interface + self.remove_l3_intf("Ethernet104") + class TestSrv6(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() self.adb = dvs.get_asic_db() self.cdb = dvs.get_config_db() - def create_sidlist(self, segname, ips): + def create_sidlist(self, segname, ips, type=None): table = "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST" existed_entries = get_exist_entries(self.adb.db_connection, table) - fvs=swsscommon.FieldValuePairs([('path', ips)]) + if type is None: + fvs=swsscommon.FieldValuePairs([('path', ips)]) + else: + fvs=swsscommon.FieldValuePairs([('path', ips), ('type', type)]) segtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_SID_LIST_TABLE") segtbl.set(segname, fvs) @@ -239,9 +574,30 @@ def test_srv6(self, dvs, testlog): # create 2nd seg lists - self.create_sidlist('seg2', 'baba:2002:10::,baba:2002:20::') - # create 3rd seg lists - self.create_sidlist('seg3', 'baba:2003:10::,baba:2003:20::') + sidlist_id = self.create_sidlist('seg2', 'baba:2002:10::,baba:2002:20::', 'insert.red') + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "2:baba:2002:10::,baba:2002:20::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_INSERT_RED" + + # create 3rd seg lists with unsupported or wrong naming of sid list type, for this case, it will use default type: ENCAPS_RED + sidlist_id = self.create_sidlist('seg3', 'baba:2003:10::,baba:2003:20::', 'reduced') + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "2:baba:2003:10::,baba:2003:20::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED" # create 2nd v4 route with single sidlists self.create_srv6_route('20.20.20.21/32','seg2','1001:2000::1') diff --git a/tests/test_storm_control.py b/tests/test_storm_control.py index 76deef9268..ec4da04917 100644 --- a/tests/test_storm_control.py +++ b/tests/test_storm_control.py @@ -211,7 +211,7 @@ def test_warm_restart_all_interfaces(self,dvs,testlog): dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index 3a1bad68fe..ec76ec13bb 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -1,6 +1,5 @@ import json import time -import pytest from dvslib.dvs_common import wait_for_result from swsscommon import swsscommon @@ -185,11 +184,12 @@ def set_parent_port_admin_status(self, dvs, port_name, status): self.config_db.create_entry(tbl_name, port_name, fvs) time.sleep(1) - if port_name.startswith(ETHERNET_PREFIX): - self.set_parent_port_oper_status(dvs, port_name, "down") - self.set_parent_port_oper_status(dvs, port_name, "up") - else: - self.set_parent_port_oper_status(dvs, port_name, "up") + if status == "up": + if port_name.startswith(ETHERNET_PREFIX): + self.set_parent_port_oper_status(dvs, port_name, "down") + self.set_parent_port_oper_status(dvs, port_name, "up") + else: + self.set_parent_port_oper_status(dvs, port_name, "up") def create_vxlan_tunnel(self, tunnel_name, vtep_ip): fvs = { @@ -582,7 +582,6 @@ def _test_sub_port_intf_creation(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_creation(self, dvs): self.connect_dbs(dvs) @@ -669,7 +668,6 @@ def _test_sub_port_intf_add_ip_addrs(self, dvs, sub_port_intf_name, vrf_name=Non self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_add_ip_addrs(self, dvs): self.connect_dbs(dvs) @@ -745,7 +743,6 @@ def _test_sub_port_intf_appl_db_proc_seq(self, dvs, sub_port_intf_name, admin_up self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_appl_db_proc_seq(self, dvs): self.connect_dbs(dvs) @@ -767,7 +764,7 @@ def test_sub_port_intf_appl_db_proc_seq(self, dvs): self._test_sub_port_intf_appl_db_proc_seq(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, admin_up=True, vrf_name=self.VNET_UNDER_TEST) self._test_sub_port_intf_appl_db_proc_seq(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, admin_up=False, vrf_name=self.VNET_UNDER_TEST) - def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_name=None): + def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_name=None, defer_parent_adminup=False): substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) parent_port = substrs[0] parent_port = self.get_parent_port(sub_port_intf_name) @@ -775,7 +772,16 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n vrf_oid = self.default_vrf_oid old_rif_oids = self.get_oids(ASIC_RIF_TABLE) + if defer_parent_adminup: + self.set_parent_port_admin_status(dvs, parent_port, "down") + _, oa_pid = dvs.runcmd("pgrep orchagent") + oa_pid = oa_pid.strip() + # This is to block orchagent daemon in order to simulate the scenario that + # there are a large number of items pending in orchagent's m_toSync queue + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + self.set_parent_port_admin_status(dvs, parent_port, "up") + if vrf_name: self.create_vrf(vrf_name) vrf_oid = self.get_newly_created_oid(ASIC_VIRTUAL_ROUTER_TABLE, [vrf_oid]) @@ -785,6 +791,10 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n if vrf_name is None or not vrf_name.startswith(VNET_PREFIX): self.add_sub_port_intf_ip_addr(sub_port_intf_name, self.IPV6_ADDR_UNDER_TEST) + if defer_parent_adminup: + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + time.sleep(1) + fv_dict = { ADMIN_STATUS: "up", } @@ -870,12 +880,12 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_admin_status_change(self, dvs): self.connect_dbs(dvs) self._test_sub_port_intf_admin_status_change(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST) self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST) + self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, defer_parent_adminup=True) self._test_sub_port_intf_admin_status_change(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST, self.VRF_UNDER_TEST) self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, self.VRF_UNDER_TEST) @@ -955,7 +965,6 @@ def _test_sub_port_intf_remove_ip_addrs(self, dvs, sub_port_intf_name, vrf_name= self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_remove_ip_addrs(self, dvs): self.connect_dbs(dvs) @@ -1147,7 +1156,6 @@ def _test_sub_port_intf_removal(self, dvs, sub_port_intf_name, removal_seq_test= self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_removal(self, dvs): self.connect_dbs(dvs) @@ -1223,7 +1231,6 @@ def _test_sub_port_intf_mtu(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_mtu(self, dvs): self.connect_dbs(dvs) @@ -1452,7 +1459,6 @@ def _test_sub_port_intf_nhg_accel(self, dvs, sub_port_intf_name, nhop_num=3, cre parent_port_idx += (4 if parent_port_prefix == ETHERNET_PREFIX else 1) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_nhg_accel(self, dvs): self.connect_dbs(dvs) @@ -1593,7 +1599,6 @@ def _test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs, sub_ parent_port_idx += (4 if parent_port_prefix == ETHERNET_PREFIX else 1) - @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs): self.connect_dbs(dvs) diff --git a/tests/test_twamp.py b/tests/test_twamp.py new file mode 100644 index 0000000000..d2d8edb8f0 --- /dev/null +++ b/tests/test_twamp.py @@ -0,0 +1,182 @@ +# This test suite covers the functionality of twamp light feature in SwSS +import pytest +import time + +@pytest.mark.usefixtures("testlog") +@pytest.mark.usefixtures('dvs_twamp_manager') +class TestTwampLight(object): + + def check_syslog(self, dvs, marker, log, expected_cnt): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)]) + assert out.strip() == str(expected_cnt) + + def test_SenderPacketCountSingle(self, dvs, testlog): + """ + This test covers the TWAMP Light session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using once packet-count + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER1" + src_ip = "1.1.1.1" + src_udp_port = "862" + dst_ip = "2.2.2.2" + dst_udp_port = "863" + packet_count = "1000" + tx_interval = "10" + timeout = "10" + stats_interval = "20000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_packet_count(session, src_ip, src_udp_port, dst_ip, dst_udp_port, packet_count, tx_interval, timeout) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(12) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderPacketCountMulti(self, dvs, testlog): + """ + This test covers the TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using multi packet-count + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER1" + src_ip = "1.2.3.4" + src_udp_port = "862" + dst_ip = "5.6.7.8" + dst_udp_port = "863" + packet_count = "1000" + tx_interval = "10" + timeout = "10" + stats_interval = "11000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_packet_count(session, src_ip, src_udp_port, dst_ip, dst_udp_port, packet_count, tx_interval, timeout, stats_interval) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(120) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderContinuousSingle(self, dvs, testlog): + """ + This test covers the TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using once continuous + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER2" + src_ip = "11.11.11.11" + src_udp_port = "862" + dst_ip = "12.12.12.12" + dst_udp_port = "863" + monitor_time = "60" + tx_interval = "100" + timeout = "10" + stats_interval = "60000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_continuous(session, src_ip, src_udp_port, dst_ip, dst_udp_port, monitor_time, tx_interval, timeout) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + # wait for sending TWAMP-test done + time.sleep(60) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderContinuousMulti(self, dvs, testlog): + """ + This test covers the continuous TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using multi continuous + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER2" + src_ip = "11.12.13.14" + src_udp_port = "862" + dst_ip = "15.16.17.18" + dst_udp_port = "863" + monitor_time = "60" + tx_interval = "100" + timeout = "10" + stats_interval = "20000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_continuous(session, src_ip, src_udp_port, dst_ip, dst_udp_port, monitor_time, tx_interval, timeout, stats_interval) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(60) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_Reflector(self, dvs, testlog): + """ + This test covers the TWAMP Light Reflector session creation and removal operations + Operation flow: + 1. Create twamp-light session-reflector + 2. Remove twamp-light session + """ + + session = "TEST_REFLECTOR1" + src_ip = "22.1.1.1" + src_udp_port = "862" + dst_ip = "22.1.1.2" + dst_udp_port = "863" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_reflector(session, src_ip, src_udp_port, dst_ip, dst_udp_port) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index c2fa852483..5401f6870f 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -2,6 +2,10 @@ from dvslib.dvs_database import DVSDatabase import ast import time +import pytest +import buffer_model + +DVS_ENV = ["ASIC_VENDOR=vs"] class TestVirtualChassis(object): @@ -39,7 +43,7 @@ def config_inbandif_port(self, vct, ibport): # Configure only for line cards if cfg_switch_type == "voq": - dvs.runcmd(f"config interface startup {ibport}") + dvs.port_admin_set(f"{ibport}", "up") config_db.create_entry("VOQ_INBAND_INTERFACE", f"{ibport}", {"inband_type": "port"}) def del_inbandif_port(self, vct, ibport): @@ -307,8 +311,8 @@ def chassis_system_neigh_create(): test_sysneigh = "" for sysnk in sysneighkeys: sysnk_tok = sysnk.split("|") - assert len(sysnk_tok) == 3, "Invalid system neigh key in chassis app db" - if sysnk_tok[2] == test_neigh_ip: + assert len(sysnk_tok) == 4, "Invalid system neigh key in chassis app db" + if sysnk_tok[3] == test_neigh_ip: test_sysneigh = sysnk break @@ -367,7 +371,7 @@ def chassis_system_neigh_create(): # Check for kernel entries _, output = dvs.runcmd("ip neigh show") - assert f"{test_neigh_ip} dev {inband_port}" in output, "Kernel neigh not found for remote neighbor" + assert f"{test_neigh_ip} dev {inband_port} lladdr {mac_address}" in output, "Kernel neigh not found for remote neighbor" _, output = dvs.runcmd("ip route show") assert f"{test_neigh_ip} dev {inband_port} scope link" in output, "Kernel route not found for remote neighbor" @@ -845,7 +849,130 @@ def test_chassis_system_lag_id_allocator_del_id(self, vct): assert len(lagmemberkeys) == 0, "Stale system lag member entries in asic db" break - + + def test_chassis_add_remove_ports(self, vct): + """Test removing and adding a port in a VOQ chassis. + + Test validates that when a port is created the port is removed from the default vlan. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + num_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + # Get the port info we'll flap + port = config_db.get_keys('PORT')[0] + port_info = config_db.get_entry("PORT", port) + + # Remove port's other configs + pgs = config_db.get_keys('BUFFER_PG') + queues = config_db.get_keys('BUFFER_QUEUE') + for key in pgs: + if port in key: + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) + + for key in queues: + if port in key: + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + # Remove port + config_db.delete_entry('PORT', port) + app_db.wait_for_deleted_entry('PORT_TABLE', port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + # Create port + config_db.update_entry("PORT", port, port_info) + app_db.wait_for_entry("PORT_TABLE", port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + # Check that we see the logs for removing default vlan + _, logSeen = dvs.runcmd( [ "sh", "-c", + "awk STARTFILE/ENDFILE /var/log/syslog | grep 'removeDefaultVlanMembers: Remove 32 VLAN members from default VLAN' | wc -l"] ) + assert logSeen.strip() == "1" + + buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + def test_voq_egress_queue_counter(self, vct): + if vct is None: + return + dvss = vct.dvss + dvs = None + for name in dvss.keys(): + if "supervisor" in name: + continue + dvs = dvss[name] + break + assert dvs + _, _ = dvs.runcmd("counterpoll queue enable") + + num_voqs_per_port = 8 + # vs-switch creates 20 queues per port. + num_queues_per_local_port = 20 + num_ports_per_linecard = 32 + num_local_ports = 32 + num_linecards = 3 + num_sysports = num_ports_per_linecard * num_linecards + num_egress_queues = num_local_ports * num_queues_per_local_port + num_voqs = ( num_ports_per_linecard * num_voqs_per_port * num_linecards ) + num_queues_to_be_polled = num_voqs + num_egress_queues + + flex_db = dvs.get_flex_db() + flex_db.wait_for_n_keys("FLEX_COUNTER_TABLE:QUEUE_STAT_COUNTER", num_queues_to_be_polled) + + def test_chassis_wred_profile_on_system_ports(self, vct): + """Test whether wred profile is applied on system ports in VoQ chassis. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + # Get all the keys from SYTEM_PORT table and check whether wred_profile is applied properly + system_ports = config_db.get_keys('SYSTEM_PORT') + + for key in system_ports: + queue3 = key + '|' + '3' + queue_entry = config_db.get_entry('QUEUE', queue3) + wred_profile = queue_entry['wred_profile'] + if wred_profile != 'AZURE_LOSSLESS': + print("WRED profile not applied on queue3 on system port %s", key) + assert wred_profile == 'AZURE_LOSSLESS' + + queue4 = key + '|' + '4' + queue_entry = config_db.get_entry('QUEUE', queue4) + wred_profile = queue_entry['wred_profile'] + if wred_profile != 'AZURE_LOSSLESS': + print("WRED profile not applied on queue4 on system port %s", key) + assert wred_profile == 'AZURE_LOSSLESS' + + # Check that we see the logs for applying WRED_PROFILE on all system ports + matching_log = "SAI_QUEUE_ATTR_WRED_PROFILE_ID" + _, logSeen = dvs.runcmd([ "sh", "-c", + "awk STARTFILE/ENDFILE /var/log/swss/sairedis.rec | grep SAI_QUEUE_ATTR_WRED_PROFILE_ID | wc -l"]) + + # Total number of logs = (No of system ports * No of lossless priorities) - No of lossless priorities for CPU ports + assert logSeen.strip() == str(len(system_ports)*2 - 2) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vlan.py b/tests/test_vlan.py index 6e43227a56..28d3de3a29 100644 --- a/tests/test_vlan.py +++ b/tests/test_vlan.py @@ -2,7 +2,7 @@ import pytest from distutils.version import StrictVersion -from dvslib.dvs_common import PollingConfig +from dvslib.dvs_common import PollingConfig, wait_for_result @pytest.mark.usefixtures("testlog") @pytest.mark.usefixtures('dvs_vlan_manager') @@ -436,6 +436,104 @@ def test_VlanHostIf(self, dvs): self.dvs_vlan.get_and_verify_vlan_ids(0) self.dvs_vlan.get_and_verify_vlan_hostif_ids(len(dvs.asic_db.hostif_name_map) - 1) + def test_VlanGratArp(self, dvs): + def arp_accept_enabled(): + rc, res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/arp_accept".format(vlan)) + return (res.strip("\n") == "1", res) + + def arp_accept_disabled(): + rc, res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/arp_accept".format(vlan)) + return (res.strip("\n") == "0", res) + + vlan = "2" + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.create_vlan_interface(vlan) + self.dvs_vlan.set_vlan_intf_property(vlan, "grat_arp", "enabled") + + wait_for_result(arp_accept_enabled, PollingConfig(), "IPv4 arp_accept not enabled") + + # Not currently possible to test `accept_untracked_na` as it doesn't exist in the kernel for + # our test VMs (only present in kernels 5.19 and above) + + self.dvs_vlan.set_vlan_intf_property(vlan, "grat_arp", "disabled") + + wait_for_result(arp_accept_disabled, PollingConfig(), "IPv4 arp_accept not disabled") + + self.dvs_vlan.remove_vlan_interface(vlan) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + def test_VlanProxyArp(self, dvs): + + def proxy_arp_enabled(): + rc, proxy_arp_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp".format(vlan)) + rc, pvlan_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp_pvlan".format(vlan)) + + return (proxy_arp_res.strip("\n") == "1" and pvlan_res.strip("\n") == "1", (proxy_arp_res, pvlan_res)) + + def proxy_arp_disabled(): + rc, proxy_arp_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp".format(vlan)) + rc, pvlan_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp_pvlan".format(vlan)) + + return (proxy_arp_res.strip("\n") == "0" and pvlan_res.strip("\n") == "0", (proxy_arp_res, pvlan_res)) + + vlan = "2" + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.create_vlan_interface(vlan) + self.dvs_vlan.set_vlan_intf_property(vlan, "proxy_arp", "enabled") + + wait_for_result(proxy_arp_enabled, PollingConfig(), 'IPv4 proxy_arp or proxy_arp_pvlan not enabled') + + self.dvs_vlan.set_vlan_intf_property(vlan, "proxy_arp", "disabled") + + wait_for_result(proxy_arp_disabled, PollingConfig(), 'IPv4 proxy_arp or proxy_arp_pvlan not disabled') + + self.dvs_vlan.remove_vlan_interface(vlan) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + def test_VlanMemberLinkDown(self, dvs): + + # TODO: add_ip_address has a dependency on cdb within dvs, + # so we still need to setup the db. This should be refactored. + dvs.setup_db() + + vlan = "1000" + vlan_ip = "192.168.0.1/21" + interface = "Ethernet0" + vlan_interface = "Vlan%s" % vlan + server_ip = "192.168.0.100" + vlan_intf_sysctl_param_path = "/proc/sys/net/ipv4/conf/%s/arp_evict_nocarrier" % vlan_interface + + self.dvs_vlan.create_vlan(vlan) + vlan_oid = self.dvs_vlan.get_and_verify_vlan_ids(1)[0] + self.dvs_vlan.verify_vlan(vlan_oid, vlan) + self.dvs_vlan.create_vlan_member(vlan, interface) + self.dvs_vlan.verify_vlan_member(vlan_oid, interface) + dvs.set_interface_status(interface, "up") + dvs.add_ip_address(vlan_interface, vlan_ip) + dvs.runcmd("ip neigh replace %s lladdr 11:22:33:44:55:66 dev %s nud stale" % (server_ip, vlan_interface)) + + neigh_oid = self.dvs_vlan.app_db.wait_for_n_keys("NEIGH_TABLE", 1)[0] + assert vlan_interface in neigh_oid and server_ip in neigh_oid + + # NOTE: arp_evict_nocarrier is available for kernel >= v5.16 and current + # docker-sonic-vs is based on kernel v5.4.0, so test only if this sysctl + # param is present + rc, res = dvs.runcmd("cat %s" % vlan_intf_sysctl_param_path) + if rc == 0: + assert res.strip() == "0" + dvs.set_interface_status(interface, "down") + neigh_oid = self.dvs_vlan.app_db.wait_for_n_keys("NEIGH_TABLE", 1)[0] + assert vlan_interface in neigh_oid and server_ip in neigh_oid + + dvs.runcmd("ip neigh flush all") + dvs.remove_ip_address(vlan_interface, vlan_ip) + self.dvs_vlan.remove_vlan_member(vlan, interface) + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 0dec1f7446..c28d7cf320 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -64,7 +64,7 @@ def get_created_entry(db, table, existed_entries): def get_all_created_entries(db, table, existed_entries): tbl = swsscommon.Table(db, table) entries = set(tbl.getKeys()) - new_entries = list(entries - existed_entries) + new_entries = list(entries - set(existed_entries)) assert len(new_entries) >= 0, "Get all could be no new created entries." new_entries.sort() return new_entries @@ -140,11 +140,11 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name): time.sleep(2) -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile) +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) attrs = [ @@ -163,6 +163,15 @@ def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor= if profile: attrs.append(('profile', profile)) + if primary: + attrs.append(('primary', primary)) + + if monitoring: + attrs.append(('monitoring', monitoring)) + + if adv_prefix: + attrs.append(('adv_prefix', adv_prefix)) + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") fvs = swsscommon.FieldValuePairs(attrs) tbl.set("%s|%s" % (vnet_name, prefix), fvs) @@ -317,7 +326,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -333,6 +342,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_pre if advertise_prefix: attrs.append(('advertise_prefix', 'true')) + if overlay_dmac: + attrs.append(('overlay_dmac', overlay_dmac)) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -444,6 +456,15 @@ def update_bfd_session_state(dvs, addr, state): ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" ntf.send("bfd_session_state_change", ntf_data, fvp) +def update_monitor_session_state(dvs, addr, monitor, state): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + create_entry_tbl( + state_db, + "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), + [ + ("state", state), + ] + ) def get_bfd_session_id(dvs, addr): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -515,10 +536,30 @@ def check_remove_routes_advertisement(dvs, prefix): assert prefix not in keys +def check_syslog(dvs, marker, err_log): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() == "0" + + loopback_id = 0 def_vr_id = 0 switch_mac = None +def update_bgp_global_dev_state(dvs, state): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + create_entry_tbl( + config_db, + "BGP_DEVICE_GLOBAL",'|',"STATE", + [ + ("tsa_enabled", state), + ] + ) + +def set_tsa(dvs): + update_bgp_global_dev_state(dvs, "true") + +def clear_tsa(dvs): + update_bgp_global_dev_state(dvs, "false") class VnetVxlanVrfTunnel(object): @@ -534,6 +575,7 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" def __init__(self): self.tunnel_map_ids = set() @@ -752,6 +794,9 @@ def check_router_interface(self, dvs, intf_name, name, vlan_oid=0): expected_attr = { 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + expected_attr = { 'SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } + check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + check_linux_intf_arp_proxy(dvs, intf_name) self.rifs.add(new_rif) @@ -872,6 +917,26 @@ def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str + def get_nexthop_groups(self, dvs, nhg): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + nhg_data = {} + nhg_data['id'] = nhg + entries = set(tbl_nhgm.getKeys()) + nhg_data['endpoints'] = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + nhg_data['endpoints'].append(endpoint) + return nhg_data def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) @@ -935,6 +1000,74 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r return new_route, new_nhg + def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) + new_nhgs = [] + expected_attrs_primary = {} + for idx, endpoint in enumerate(endpoints_primary): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + expected_attrs_primary[endpoint] = expected_attr + + if len(endpoints_primary) == 1: + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + return new_route + else : + new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + found_match = False + + for nhg in new_nhgs: + nhg_data = self.get_nexthop_groups(dvs, nhg) + eplist = self.serialize_endpoint_group(nhg_data['endpoints']) + if eplist == self.serialize_endpoint_group(endpoints_primary): + self.nhg_ids[endpoint_str_primary] = nhg + found_match = True + + assert found_match, "the expected Nexthop group was not found." + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + for nhg in new_nhgs: + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + active_nhg = self.nhg_ids[endpoint_str_primary] + for idx in range(count): + if prefix != "" and prefix not in new_route[idx] : + continue + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + + + self.routes.update(new_route) + del self.nhg_ids[endpoint_str_primary] + return new_route + def check_del_vnet_routes(self, dvs, name, prefixes=[]): # TODO: Implement for VRF VNET @@ -948,12 +1081,73 @@ def _access_function(): return True + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) class TestVnetOrch(object): def get_vnet_obj(self): return VnetVxlanVrfTunnel() + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() + + def clear_srv_config(self, dvs): + dvs.servers[0].runcmd("ip address flush dev eth0") + dvs.servers[1].runcmd("ip address flush dev eth0") + dvs.servers[2].runcmd("ip address flush dev eth0") + dvs.servers[3].runcmd("ip address flush dev eth0") + + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def create_route_entry(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_route_entry(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + tbl._del(key) + + def check_route_entries(self, destinations): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] + for route_entry in route_entries] + return (all(destination in route_destinations for destination in destinations), None) + + wait_for_result(_access_function) + + @pytest.fixture(params=["true", "false"]) def ordered_ecmp(self, dvs, request): @@ -1470,6 +1664,10 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): create_vxlan_tunnel_map(dvs, tunnel_name, 'map_1', 'Vlan1000', '1000') + delete_vnet_entry(dvs, 'Vnet1') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet1') + delete_vxlan_tunnel(dvs, tunnel_name) + ''' Test 7 - Test for vnet tunnel routes with ECMP nexthop group ''' @@ -1538,6 +1736,7 @@ def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group @@ -1624,6 +1823,7 @@ def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -1755,6 +1955,7 @@ def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -1891,7 +2092,7 @@ def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) - + delete_vxlan_tunnel(dvs, tunnel_name) ''' Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor @@ -1999,6 +2200,7 @@ def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -2128,7 +2330,1199 @@ def test_vnet_orch_12(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet12') vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 13 - Test for configuration idempotent behaviour + ''' + def test_vnet_orch_13(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_13' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet13', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet13', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 1 + assert nhg1_1 == nhg1_2 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet13', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 0 + delete_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet13') + + ''' + Test 14 - Test for configuration idempotent behaviour 2 + ''' + def test_vnet_orch_14(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_14' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet14', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet14', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + #update nexthops for the same tunnel. + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + assert nhg1_2 in vnet_obj.nhgs + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert nhg1_1 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet14') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 15 - Test for configuration idempotent behaviour single endpoint + ''' + def test_vnet_orch_15(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_15' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet15', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet15', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet15', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet15') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 16 - Test for configuration idempotent behaviour single endpoint with BFD + ''' + def test_vnet_orch_16(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_16' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + create_vnet_entry(dvs, 'Vnet16', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet16', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + + # Create a tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) + + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 + + update_bfd_session_state(dvs, 'fd:8:2::1', 'Down') + time.sleep(2) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) + + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name,route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet16', ["fd:8:11::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128") + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet16') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 17 - Test for configuration idempotent behaviour multiple endpoint with BFD + ''' + def test_vnet_orch_17(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_17' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10017', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet17', '10017') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + #readd the route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + #readd the active route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + route2, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + assert nhg1_1 == nhg1_2 + assert len(vnet_obj.nhgs) == 1 + + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + + delete_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 18 - Test for priority vnet tunnel routes with ECMP nexthop group. test primary secondary switchover. + ''' + def test_vnet_orch_18(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_18' + vnet_name = 'vnet18' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10018', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10018') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') + + # default monitor status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Remove first primary endpoint from group. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Switch to secondary if both primary down + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # removing first endpoint of secondary. route should remain on secondary NHG + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # removing last endpoint of secondary. route should be removed + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, []) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + + #Route should come up with secondary endpoints. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should be switched to the primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should be updated with the second primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints going down. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints coming back up. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + time.sleep(2) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 19 - Test for 2 priority vnet tunnel routes with overlapping primary secondary ECMP nexthop group. + ''' + def test_vnet_orch_19(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_19' + vnet_name = 'Vnet19' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10019', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10019') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', profile="Test_profile", primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') + create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.3,9.1.0.4', monitoring='custom', adv_prefix='200.100.1.0/24') + + # default monitor session status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'up') + time.sleep(2) + + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + + #we should still have two NHGs but no active route + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, vnet_obj.nhgs) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.4") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 20 - Test for Single enpoint priority vnet tunnel routes. Test primary secondary switchover. + ''' + def test_vnet_orch_20(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_20' + vnet_name = 'Vnet20' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10020', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10020') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2', primary ='9.1.0.1', profile="Test_profile", monitoring='custom', adv_prefix='100.100.1.0/24') + + # default monitor session status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + + time.sleep(2) + + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.2'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + + time.sleep(2) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 21 - Test for priority vxlan tunnel with adv_prefix, adv profile. test route re-addition, route update, primary seocndary swap. + ''' + def test_vnet_orch_21(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_21' + vnet_name = "Vnet21" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10021', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10021') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + vnet_obj.fetch_exist_entries(dvs) + + #Add first Route + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4', profile = "test_prf", primary ='fd:10:1::3,fd:10:1::4',monitoring='custom', adv_prefix="fd:10:10::/64") + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::2', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1','fd:10:1::2'], tunnel_name, prefix="fd:10:10::1/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1,fd:10:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #add 2nd route + create_vnet_routes(dvs, "fd:10:10::21/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::2', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::3', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::21/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::21/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #remove first route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::1/128") + + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") + + #add 3rd route + create_vnet_routes(dvs, "fd:10:10::31/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::31/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::31/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #delete 2nd route + delete_vnet_routes(dvs, "fd:10:10::21/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::21/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::21/128") + + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") + + #remove 3rd route + delete_vnet_routes(dvs, "fd:10:10::31/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::31/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::31/128") + + #adv should be gone. + check_remove_routes_advertisement(dvs, "fd:10:10::/64") + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 22 - Test for vxlan custom monitoring with adv_prefix. Add route twice and change nexthops case + ''' + def test_vnet_orch_22(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + + vnet_obj.fetch_exist_entries(dvs) + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, vnet_name, '19.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #add 2nd route + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1,5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 2nd route switch primary with secondary + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1','5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #delete 2nd route + delete_vnet_routes(dvs, "100.100.1.57/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.57/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.57/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #add 3rd route + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1,5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 3rd route next hops to secondary + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1','5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 3rd route next hops to a new set. + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.5,5.0.0.6',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.5', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.6', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.5','5.0.0.6'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.5,5.0.0.6']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.7', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.8', 'up') + + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.7,5.0.0.8',monitoring='custom', adv_prefix='100.100.1.0/24') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.7','5.0.0.8'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.7,5.0.0.8']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #delete 3rd route + delete_vnet_routes(dvs, "100.100.1.67/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.67/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.67/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #Add priority route with no secondary enpoints + create_vnet_routes(dvs, "100.100.1.71/32", vnet_name, '19.0.0.1,19.0.0.2', ep_monitor='19.0.0.1,19.0.0.2', profile = "test_prf", primary ='19.0.0.1,19.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'up') + + #verify that no BFD sessions are created. + check_del_bfd_session(dvs, ['19.0.0.1']) + check_del_bfd_session(dvs, ['19.0.0.2']) + time.sleep(2) + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.1,19.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'down') + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'down') + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.71/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.71/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 23 - Test for vxlan custom monitoring. CHanging the overlay_dmac of the Vnet on the fly. + ''' + def test_vnet_orch_23(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + delete_vnet_entry(dvs,vnet_name) + + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + delete_vnet_entry(dvs,vnet_name) + + #update the Dmac of the vnet before adding any routes. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + + vnet_obj.fetch_exist_entries(dvs) + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:77") + + #update the Dmac after a route is added. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:88") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #bring up an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #update the Dmac to empty. This should have no impact. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") + time.sleep(2) + + #bring down an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'down') + + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + + #bring up the endpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + + # The default Vnet setting advertises the prefix. + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:66") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") + time.sleep(2) + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 24 - Test duplicate route addition and removal. + ''' + def test_vnet_orch_24(self, dvs, testlog): + self.setup_db(dvs) + self.clear_srv_config(dvs) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # create vxlan tunnel and vnet in default vrf + tunnel_name = 'tunnel_24' + create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "", 'default') + + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + vnet_obj.fetch_exist_entries(dvs) + + # create vnet route + create_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000', '10.10.10.3') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.3', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24", ['10.10.10.3']) + time.sleep(2) + + # create l3 interface + self.create_l3_intf("Ethernet0", "") + + # set ip address + self.add_ip_address("Ethernet0", "10.10.10.1/24") + + # bring up interface + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.10.10.3/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.10.10.1") + + marker = dvs.add_log_marker("/var/log/syslog") + time.sleep(2) + + # add another route for same prefix as vnet route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 100.100.1.0/24 10.10.10.3\"") + + # check application database + self.pdb.wait_for_entry("ROUTE_TABLE", "100.100.1.0/24") + + # check ASIC route database + self.check_route_entries(["100.100.1.0/24"]) + + log_string = "Encountered failure in create operation, exiting orchagent, SAI API: SAI_API_ROUTE, status: SAI_STATUS_NOT_EXECUTED" + # check for absence of log_string in syslog + check_syslog(dvs, marker, log_string) + + # remove route entry + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 100.100.1.0/24 10.10.10.3\"") + + # delete vnet route + delete_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24") + + # delete vnet + delete_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + + # delete vxlan tunnel + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 25 - Test for BFD TSA and TSB behaviour within overlay tunnel routes. + ''' + def test_vnet_orch_25(self, dvs, testlog): + # This test creates a vnet route with BFD monitoring.This followd by application of TSA and absence of BFD sessions + # is verified. Following the removal of TSA the Vnet route is verified to be up. + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_25' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet25', tunnel_name, '10025', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet25') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet25', '10025') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "125.100.1.1/32", 'Vnet25', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + + # make sure the route is up. + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet25', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + + # tsa would remove all bfd sessions down. + set_tsa(dvs) + time.sleep(2) + + # Route should be removed. + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + + #clearing TSA should bring the route back. + clear_tsa(dvs) + time.sleep(2) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet25', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + + # Remove tunnel route + delete_vnet_routes(dvs, "125.100.1.1/32", 'Vnet25') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32") + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + + delete_vnet_entry(dvs, 'Vnet25') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet25') + delete_vxlan_tunnel(dvs, tunnel_name) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index 9447dc55ed..c0e4117f4b 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -60,6 +60,31 @@ def check_port_oper_status(appl_db, port_name, state): break assert oper_status == state +def check_port_host_tx_ready_status(state_db, port_name, status): + portTable = swsscommon.Table(state_db, swsscommon.STATE_PORT_TABLE_NAME) + (status, fvs) = portTable.get(port_name) + + assert status == True + + assert "host_tx_ready" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "host_tx_ready": + assert fv[1] == "true" if status == "up" else "false" + +def update_host_tx_ready_status(dvs, port_id, switch_id, admin_state): + host_tx_ready = "SAI_PORT_HOST_TX_READY_STATUS_READY" if admin_state == "up" else "SAI_PORT_HOST_TX_READY_STATUS_NOT_READY" + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"host_tx_ready_status\":\""+host_tx_ready+"\",\"port_id\":\""+port_id+"\",\"switch_id\":\""+switch_id+"\"}]" + ntf.send("port_host_tx_ready", ntf_data, fvp) + +def get_port_id(dvs, port_name): + count_db = swsscommon.DBConnector(2, dvs.redis_sock, 0) + port_name_map = swsscommon.Table(count_db, "COUNTERS_PORT_NAME_MAP") + status, returned_value = port_name_map.hget("", port_name) + assert status == True + return returned_value + # function to check the restore count incremented by 1 for a single process def swss_app_check_RestoreCount_single(state_db, restore_count, name): warmtbl = swsscommon.Table(state_db, swsscommon.STATE_WARM_RESTART_TABLE_NAME) @@ -256,6 +281,8 @@ def warm_restart_timer_set(dvs, app, timer, val): class TestWarmReboot(object): def test_PortSyncdWarmRestart(self, dvs, testlog): + dvs.setup_db() + switch_id = dvs.getSwitchOid() conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -294,6 +321,13 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet16", "up") check_port_oper_status(appl_db, "Ethernet20", "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet16") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet20") , switch_id, "up") + + # Ethernet port host_tx_ready status should be "true" + check_port_host_tx_ready_status(state_db, "Ethernet16", "up") + check_port_host_tx_ready_status(state_db, "Ethernet20", "up") + # Ping should work between servers via vs vlan interfaces ping_stats = dvs.servers[4].runcmd("ping -c 1 11.0.0.10") time.sleep(1) @@ -337,6 +371,13 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet20", "up") check_port_oper_status(appl_db, "Ethernet24", "down") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet16") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet20") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet24") , switch_id, "down") + + check_port_host_tx_ready_status(state_db, "Ethernet16", "up") + check_port_host_tx_ready_status(state_db, "Ethernet20", "up") + check_port_host_tx_ready_status(state_db, "Ethernet24", "down") swss_app_check_RestoreCount_single(state_db, restore_count, "portsyncd") @@ -891,23 +932,23 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): time.sleep(1) # Should fail, since neighbor for next 20.0.0.1 has not been not resolved yet - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK failed\n" # Should succeed, the option for skipPendingTaskCheck -s and noFreeze -n have been provided. # Wait up to 500 milliseconds for response from orchagent. Default wait time is 1000 milliseconds. - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" # Remove unfinished routes ps._del("3.3.3.0/24") time.sleep(1) - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" # Should fail since orchagent has been frozen at last step. - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500", include_stderr=False) assert result == "RESTARTCHECK failed\n" # Cleaning previously pushed route-entry to ease life of subsequent testcases. @@ -1049,7 +1090,7 @@ def test_swss_port_state_syncup(self, dvs, testlog): orchStateCount += 1; # Only WARM_RESTART_TABLE|orchagent state=reconciled operation may exist after port oper status change. - assert orchStateCount == 1 + assert orchStateCount == 2 #clean up arp dvs.runcmd("arp -d 10.0.0.1") @@ -2397,7 +2438,7 @@ def test_TunnelMgrdWarmRestart(self, dvs): "ecn_mode": "standard", "ttl_mode": "pipe" } - + pubsub = dvs.SubscribeAppDbObject(tunnel_table) dvs.runcmd("config warm_restart enable swss") diff --git a/tests/test_zmq.py b/tests/test_zmq.py new file mode 100644 index 0000000000..8a3dc49894 --- /dev/null +++ b/tests/test_zmq.py @@ -0,0 +1,104 @@ +from swsscommon import swsscommon + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * + +import typing +import time +import binascii +import uuid +import ipaddress +import sys +import socket +import logging +import pytest + +logging.basicConfig(level=logging.INFO) +zmq_logger = logging.getLogger(__name__) + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +class Table(object): + def __init__(self, database, table_name: str): + self.table_name = table_name + self.table = swsscommon.Table(database.db_connection, self.table_name) + + def __getitem__(self, key: str): + exists, result = self.table.get(str(key)) + if not exists: + return None + else: + return dict(result) + + def get_keys(self): + return self.table.getKeys() + + def get_newly_created_oid(self, old_oids): + new_oids = self.asic_db.wait_for_n_keys(table, len(old_oids) + 1) + oid = [ids for ids in new_oids if ids not in old_oids] + return oid[0] + +class DashZmq(object): + def __init__(self, dvs): + self.dvs = dvs + self.asic_direction_lookup_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + self.asic_vip_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + +class TestZmqDash(object): + @pytest.fixture(scope="class") + def enable_orchagent_zmq(self, dvs): + # change orchagent to use ZMQ + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_zmq_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -q tcp:\/\/127.0.0.1:8100 /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + yield + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_zmq_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + @pytest.mark.usefixtures("enable_orchagent_zmq") + def test_appliance(self, dvs): + # upload test script to test container and create applicance with it + dvs.copy_file("/", "create_appliance.py") + dvs.runcmd(['sh', '-c', "python3 create_appliance.py {}".format(1234)]) + time.sleep(3) + + asic_direction_lookup_table = Table( + dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + direction_entries = asic_direction_lookup_table.get_keys() + zmq_logger.info("Keys from asic_direction_lookup_table: {}".format(direction_entries)) + + assert direction_entries + fvs = asic_direction_lookup_table[direction_entries[0]] + zmq_logger.info("Data from asic_direction_lookup_table: {}={}".format(direction_entries[0], fvs)) + for fv in fvs.items(): + if fv[0] == "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION" + + asic_vip_table = Table( + dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + vip_entries = asic_vip_table.get_keys() + zmq_logger.info("Keys from asic_vip_table: {}".format(direction_entries)) + + assert vip_entries + fvs = asic_vip_table[vip_entries[0]] + zmq_logger.info("Data from asic_vip_table: {}={}".format(vip_entries[0], fvs)) + for fv in fvs.items(): + if fv[0] == "SAI_VIP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_VIP_ENTRY_ACTION_ACCEPT" diff --git a/tests/virtual_chassis/1/default_config.json b/tests/virtual_chassis/1/default_config.json index 3e0c3fce72..88769c9ce6 100644 --- a/tests/virtual_chassis/1/default_config.json +++ b/tests/virtual_chassis/1/default_config.json @@ -27,672 +27,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4", diff --git a/tests/virtual_chassis/2/default_config.json b/tests/virtual_chassis/2/default_config.json index d306c30ea3..2556706ce0 100644 --- a/tests/virtual_chassis/2/default_config.json +++ b/tests/virtual_chassis/2/default_config.json @@ -22,672 +22,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4", diff --git a/tests/virtual_chassis/3/default_config.json b/tests/virtual_chassis/3/default_config.json index 4579733d35..7b747d5e54 100644 --- a/tests/virtual_chassis/3/default_config.json +++ b/tests/virtual_chassis/3/default_config.json @@ -22,672 +22,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4", diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json new file mode 100644 index 0000000000..b50c86ffff --- /dev/null +++ b/tests/virtual_chassis/8/default_config.json @@ -0,0 +1,95 @@ +{ + "DEVICE_METADATA": { + "localhost": { + "hostname": "supervisor", + "chassis_db_address" : "10.8.1.200", + "inband_address" : "10.8.1.200/24", + "switch_type": "fabric", + "sub_role" : "BackEnd", + "start_chassis_db" : "1", + "comment" : "default_config for a vs that runs chassis_db" + } + }, + "FABRIC_PORT": { + "Fabric0": { + "alias": "Fabric0", + "isolateStatus": "False", + "lanes": "0" + }, + "Fabric1": { + "alias": "Fabric1", + "isolateStatus": "False", + "lanes": "1" + }, + "Fabric2": { + "alias": "Fabric2", + "isolateStatus": "False", + "lanes": "2" + }, + "Fabric3": { + "alias": "Fabric3", + "isolateStatus": "False", + "lanes": "3" + }, + "Fabric4": { + "alias": "Fabric4", + "isolateStatus": "False", + "lanes": "4" + }, + "Fabric5": { + "alias": "Fabric5", + "isolateStatus": "False", + "lanes": "5" + }, + "Fabric6": { + "alias": "Fabric6", + "isolateStatus": "False", + "lanes": "6" + }, + "Fabric7": { + "alias": "Fabric7", + "isolateStatus": "False", + "lanes": "7" + }, + "Fabric8": { + "alias": "Fabric8", + "isolateStatus": "False", + "lanes": "8" + }, + "Fabric9": { + "alias": "Fabric9", + "isolateStatus": "False", + "lanes": "9" + }, + "Fabric10": { + "alias": "Fabric10", + "isolateStatus": "False", + "lanes": "10" + }, + "Fabric11": { + "alias": "Fabric11", + "isolateStatus": "False", + "lanes": "11" + }, + "Fabric12": { + "alias": "Fabric12", + "isolateStatus": "False", + "lanes": "12" + }, + "Fabric13": { + "alias": "Fabric13", + "isolateStatus": "False", + "lanes": "13" + }, + "Fabric14": { + "alias": "Fabric14", + "isolateStatus": "False", + "lanes": "14" + }, + "Fabric15": { + "alias": "Fabric15", + "isolateStatus": "False", + "lanes": "15" + } + } +} diff --git a/tests/virtual_chassis/chassis_supervisor.json b/tests/virtual_chassis/chassis_supervisor.json new file mode 100644 index 0000000000..373b44f257 --- /dev/null +++ b/tests/virtual_chassis/chassis_supervisor.json @@ -0,0 +1,5 @@ +{ + "VIRTUAL_TOPOLOGY": { + "chassis_instances" : [ "8", "1", "2", "3" ] + } +} diff --git a/tlm_teamd/Makefile.am b/tlm_teamd/Makefile.am index 46ddfd22f5..4548ea06ba 100644 --- a/tlm_teamd/Makefile.am +++ b/tlm_teamd/Makefile.am @@ -15,7 +15,7 @@ tlm_teamd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(JANSSON_CFLAGS) tlm_teamd_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lteamdctl $(JANSSON_LIBS) if GCOV_ENABLED -tlm_teamd_LDADD += -lgcovpreload +tlm_teamd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/warmrestart/warmRestartAssist.cpp b/warmrestart/warmRestartAssist.cpp index 988f8279db..9b1a8dfddd 100644 --- a/warmrestart/warmRestartAssist.cpp +++ b/warmrestart/warmRestartAssist.cpp @@ -208,10 +208,31 @@ void AppRestartAssist::insertToMap(string tableName, string key, vectorsecond, SAME); + auto state = getCacheEntryState(found->second); + /* + * In case an entry has been updated for more than once with the same value but different from the stored one, + * keep the state as NEW. + * Eg. + * Assume the entry's value that is restored from last warm reboot is V0. + * 1. The first update with value V1 is received and handled by the above `if (found != appTableCacheMap[tableName].end())` branch, + * - state is set to NEW + * - value is updated to V1 + * 2. The second update with the same value V1 is received and handled by this branch + * - Originally, state was set to SAME, which is wrong because V1 is different from the stored value V0 + * - The correct logic should be: set the state to same only if the state is not NEW + * This is a very rare case because in most of times the entry won't be updated for multiple times + */ + if (state == NEW) + { + SWSS_LOG_NOTICE("%s, found key: %s, it has been updated for the second time, keep state as NEW", + tableName.c_str(), key.c_str()); + } + else + { + SWSS_LOG_INFO("%s, found key: %s, same value", tableName.c_str(), key.c_str()); + // mark as SAME flag + setCacheEntryState(found->second, SAME); + } } } else diff --git a/warmrestart/warmRestartHelper.cpp b/warmrestart/warmRestartHelper.cpp index 580e9f98a6..b7dafd64d7 100644 --- a/warmrestart/warmRestartHelper.cpp +++ b/warmrestart/warmRestartHelper.cpp @@ -264,7 +264,7 @@ void WarmStartHelper::reconcile(void) * Compare all field-value-tuples within two vectors. * * Example: v1 {nexthop: 10.1.1.1, ifname: eth1} - * v2 {nexthop: 10.1.1.2, ifname: eth2} + * v2 {nexthop: 10.1.1.2, ifname: eth2, protocol: kernel, weight: 1} * * Returns: * @@ -274,25 +274,24 @@ void WarmStartHelper::reconcile(void) bool WarmStartHelper::compareAllFV(const std::vector &v1, const std::vector &v2) { + /* Size mismatch implies a diff */ + if (v1.size() != v2.size()) + { + return true; + } + std::unordered_map v1Map((v1.begin()), v1.end()); /* Iterate though all v2 tuples to check if their content match v1 ones */ for (auto &v2fv : v2) { auto v1Iter = v1Map.find(v2fv.first); - /* - * The sizes of both tuple-vectors should always match within any - * given application. In other words, all fields within v1 should be - * also present in v2. - * - * To make this possible, every application should continue relying on a - * uniform schema to create/generate information. For example, fpmsyncd - * will be always expected to push FieldValueTuples with "nexthop" and - * "ifname" fields; neighsyncd is expected to make use of "family" and - * "neigh" fields, etc. The existing reconciliation logic will rely on - * this assumption. - */ - assert(v1Iter != v1Map.end()); + + /* Return true when v2 has a new field */ + if (v1Iter == v1Map.end()) + { + return true; + } if (compareOneFV(v1Map[fvField(*v1Iter)], fvValue(v2fv))) {