diff --git a/.github/actions/build-component-multi-arch/action.yml b/.github/actions/build-component-multi-arch/action.yml index 7acf06bdd..48382afa5 100644 --- a/.github/actions/build-component-multi-arch/action.yml +++ b/.github/actions/build-component-multi-arch/action.yml @@ -20,13 +20,7 @@ inputs: description: Component prefix used by Makefile required: true github_event_name: - description: Specify the github event name (push, pull_request_target, release, etc) - required: true - dockerhub_username: - description: Dockerhub name - required: true - dockerhub_password: - description: Dockerhub password + description: Specify the github event name (push, pull_request, release, etc) required: true runs: using: 'node12' diff --git a/.github/actions/build-component-multi-arch/main.js b/.github/actions/build-component-multi-arch/main.js index 55efb0d5a..7c600a5b3 100644 --- a/.github/actions/build-component-multi-arch/main.js +++ b/.github/actions/build-component-multi-arch/main.js @@ -30,9 +30,6 @@ async function shell_cmd(cmd) { const latest_label = `latest${dev_suffix}`; console.log(`Use labels: versioned=${versioned_label} latest=${latest_label}`); - console.log(`Log into dockerhub to avoid throttled anonymous dockerhub pulls user=${core.getInput('dockerhub_username')}`); - await shell_cmd(`echo "${core.getInput('dockerhub_password')}" | docker login -u ${core.getInput('dockerhub_username')} --password-stdin`); - console.log(`Login into Container Registry user=${core.getInput('container_registry_username')} repo=${core.getInput('container_registry_base_url')}`); await shell_cmd(`echo "${core.getInput('container_registry_password')}" | docker login -u ${core.getInput('container_registry_username')} --password-stdin ${core.getInput('container_registry_base_url')}`); diff --git a/.github/actions/build-component-per-arch/action.yml b/.github/actions/build-component-per-arch/action.yml index a36e7ddd8..b58084319 100644 --- a/.github/actions/build-component-per-arch/action.yml +++ b/.github/actions/build-component-per-arch/action.yml @@ -26,7 +26,7 @@ inputs: description: Specify whether rust is being built required: true github_event_name: - description: Specify the github event name (push, pull_request_target, release, etc) + description: Specify the github event name (push, pull_request, release, etc) required: true github_ref: description: Specify the github ref @@ -37,12 +37,6 @@ inputs: github_merged: description: Specify whether a PR has been merged required: true - dockerhub_username: - description: Dockerhub name - required: true - dockerhub_password: - description: Dockerhub password - required: true runs: using: 'node12' main: 'main.js' diff --git a/.github/actions/build-component-per-arch/main.js b/.github/actions/build-component-per-arch/main.js index 5a320692c..665e6c9db 100644 --- a/.github/actions/build-component-per-arch/main.js +++ b/.github/actions/build-component-per-arch/main.js @@ -25,9 +25,6 @@ async function shell_cmd(cmd) { try { console.log(`Start main.js`) - console.log(`Log into dockerhub to avoid throttled anonymous dockerhub pulls user=${core.getInput('dockerhub_username')}`); - await shell_cmd(`echo "${core.getInput('dockerhub_password')}" | docker login -u ${core.getInput('dockerhub_username')} --password-stdin`); - console.log(`Use multiarch/qemu-user-static to configure cross-plat`); await shell_cmd('docker run --rm --privileged multiarch/qemu-user-static --reset -p yes'); @@ -67,9 +64,6 @@ async function shell_cmd(cmd) { } console.log(`Makefile build target suffix: ${makefile_target_suffix}`) - console.log(`Login into Container Registry user=${core.getInput('container_registry_username')} repo=${core.getInput('container_registry_base_url')}`); - await shell_cmd(`echo "${core.getInput('container_registry_password')}" | docker login -u ${core.getInput('container_registry_username')} --password-stdin ${core.getInput('container_registry_base_url')}`); - if (core.getInput('build_rust') == '1') { console.log(`Install Rust`) child_process.execSync(`curl https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.41.0`); @@ -102,6 +96,9 @@ async function shell_cmd(cmd) { await shell_cmd(`docker run ${image_name} find container-images-legal-notice.md | wc -l | grep -v 0`) if (push_containers == "1") { + console.log(`Login into Container Registry user=${core.getInput('container_registry_username')} repo=${core.getInput('container_registry_base_url')}`); + await shell_cmd(`echo "${core.getInput('container_registry_password')}" | docker login -u ${core.getInput('container_registry_username')} --password-stdin ${core.getInput('container_registry_base_url')}`); + console.log(`Push the versioned container: make ${core.getInput('makefile_component_name')}-docker-per-arch-${makefile_target_suffix}`) process.env.LABEL_PREFIX = `${versioned_label}` await exec.exec(`make ${core.getInput('makefile_component_name')}-docker-per-arch-${makefile_target_suffix}`) diff --git a/.github/actions/build-intermediate/action.yml b/.github/actions/build-intermediate/action.yml index 8773eabcd..83305d114 100644 --- a/.github/actions/build-intermediate/action.yml +++ b/.github/actions/build-intermediate/action.yml @@ -23,7 +23,7 @@ inputs: description: Platform to build (amd64|arm64|arm32) required: true github_event_name: - description: Specify the github event name (push, pull_request_target, release, etc) + description: Specify the github event name (push, pull_request, release, etc) required: true github_ref: description: Specify the github ref @@ -34,12 +34,6 @@ inputs: github_merged: description: Specify whether a PR has been merged required: true - dockerhub_username: - description: Dockerhub name - required: true - dockerhub_password: - description: Dockerhub password - required: true runs: using: 'node12' main: 'main.js' diff --git a/.github/actions/build-intermediate/main.js b/.github/actions/build-intermediate/main.js index c1977d2d7..1973f86ec 100644 --- a/.github/actions/build-intermediate/main.js +++ b/.github/actions/build-intermediate/main.js @@ -25,9 +25,6 @@ async function shell_cmd(cmd) { try { console.log(`Start main.js`) - console.log(`Log into dockerhub to avoid throttled anonymous dockerhub pulls user=${core.getInput('dockerhub_username')}`); - await shell_cmd(`echo "${core.getInput('dockerhub_password')}" | docker login -u ${core.getInput('dockerhub_username')} --password-stdin`); - console.log(`Use multiarch/qemu-user-static to configure cross-plat`); child_process.execSync('docker run --rm --privileged multiarch/qemu-user-static --reset -p yes'); @@ -62,15 +59,15 @@ async function shell_cmd(cmd) { } console.log(`Makefile build target suffix: ${makefile_target_suffix}`) - console.log(`Login into Container Registry user=${core.getInput('container_registry_username')} repo=${core.getInput('container_registry_base_url')}`); - await shell_cmd(`echo "${core.getInput('container_registry_password')}" | docker login -u ${core.getInput('container_registry_username')} --password-stdin ${core.getInput('container_registry_base_url')}`); - process.env.PREFIX = `${core.getInput('container_prefix')}` console.log(`Build the versioned container: make ${core.getInput('makefile_component_name')}-build-${makefile_target_suffix}`) await exec.exec(`make ${core.getInput('makefile_component_name')}-build-${makefile_target_suffix}`) if (push_containers == "1") { + console.log(`Login into Container Registry user=${core.getInput('container_registry_username')} repo=${core.getInput('container_registry_base_url')}`); + await shell_cmd(`echo "${core.getInput('container_registry_password')}" | docker login -u ${core.getInput('container_registry_username')} --password-stdin ${core.getInput('container_registry_base_url')}`); + console.log(`Push the versioned container: make ${core.getInput('makefile_component_name')}-docker-per-arch-${makefile_target_suffix}`) await exec.exec(`make ${core.getInput('makefile_component_name')}-docker-per-arch-${makefile_target_suffix}`) } else { diff --git a/.github/workflows/build-agent-container.yml b/.github/workflows/build-agent-container.yml index b036195ea..db45d0ce8 100644 --- a/.github/workflows/build-agent-container.yml +++ b/.github/workflows/build-agent-container.yml @@ -10,20 +10,6 @@ on: - build/containers/Dockerfile.agent - agent/** - shared/** - - build/setup.sh - - version.txt - - build/akri-containers.mk - - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-agent-container.yml - - build/containers/Dockerfile.agent - - agent/** - - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -36,7 +22,6 @@ on: - build/containers/Dockerfile.agent - agent/** - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -51,11 +36,6 @@ env: jobs: per-arch: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -66,18 +46,10 @@ jobs: - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -100,8 +72,6 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} build_rust: "1" @@ -115,6 +85,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -134,6 +106,4 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/build-controller-container.yml b/.github/workflows/build-controller-container.yml index c1a87b786..1330df84c 100644 --- a/.github/workflows/build-controller-container.yml +++ b/.github/workflows/build-controller-container.yml @@ -10,7 +10,6 @@ on: - build/containers/Dockerfile.controller - controller/** - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -23,20 +22,6 @@ on: - build/containers/Dockerfile.controller - controller/** - shared/** - - build/setup.sh - - version.txt - - build/akri-containers.mk - - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-controller-container.yml - - build/containers/Dockerfile.controller - - controller/** - - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -51,11 +36,6 @@ env: jobs: per-arch: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -65,18 +45,10 @@ jobs: - arm32v7 - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -99,8 +71,6 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} build_rust: "1" @@ -114,6 +84,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -133,6 +105,4 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/build-onvif-video-broker-container.yml b/.github/workflows/build-onvif-video-broker-container.yml index 348783471..0f69d1afd 100644 --- a/.github/workflows/build-onvif-video-broker-container.yml +++ b/.github/workflows/build-onvif-video-broker-container.yml @@ -23,17 +23,6 @@ on: - version.txt - build/akri-containers.mk - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-onvif-video-broker-container.yml - - build/containers/Dockerfile.onvif-video-broker - - samples/brokers/onvif-video-broker/** - - version.txt - - build/akri-containers.mk - - Makefile release: types: - published @@ -45,11 +34,6 @@ env: jobs: per-arch: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 20 strategy: @@ -60,18 +44,10 @@ jobs: - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -94,8 +70,6 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} build_rust: "0" @@ -109,6 +83,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -128,6 +104,4 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/build-opencv-base-container.yml b/.github/workflows/build-opencv-base-container.yml index 92cd96381..614d96bf9 100644 --- a/.github/workflows/build-opencv-base-container.yml +++ b/.github/workflows/build-opencv-base-container.yml @@ -17,14 +17,6 @@ on: - build/containers/intermediate/Dockerfile.opencvsharp-build - build/intermediate-containers.mk - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-intermediate/** - - .github/workflows/build-opencv-base-container.yml - - build/containers/intermediate/Dockerfile.opencvsharp-build - - build/intermediate-containers.mk - - Makefile env: AKRI_COMPONENT: opencvsharp-build @@ -35,10 +27,7 @@ jobs: per-arch: if: >- !contains(github.event.pull_request.title, '[IGNORE INTERMEDIATE BUILDS]') && - !contains(github.event.commits[0].message, '[IGNORE INTERMEDIATE BUILDS]') && - (( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) )) + !contains(github.event.commits[0].message, '[IGNORE INTERMEDIATE BUILDS]') runs-on: ubuntu-latest strategy: matrix: @@ -47,18 +36,10 @@ jobs: - arm32v7 - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Akri intermediate builds are LONG running and should only be run when absolutely needed if: >- @@ -101,7 +82,5 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} diff --git a/.github/workflows/build-rust-crossbuild-container.yml b/.github/workflows/build-rust-crossbuild-container.yml index 91698b41c..ba0b0bbc8 100644 --- a/.github/workflows/build-rust-crossbuild-container.yml +++ b/.github/workflows/build-rust-crossbuild-container.yml @@ -17,14 +17,6 @@ on: - build/containers/intermediate/Dockerfile.rust-crossbuild-* - build/intermediate-containers.mk - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-intermediate/** - - .github/workflows/build-rust-crossbuild-container.yml - - build/containers/intermediate/Dockerfile.rust-crossbuild-* - - build/intermediate-containers.mk - - Makefile env: AKRI_COMPONENT: rust-crossbuild @@ -35,10 +27,7 @@ jobs: per-arch: if: >- !contains(github.event.pull_request.title, '[IGNORE INTERMEDIATE BUILDS]') && - !contains(github.event.commits[0].message, '[IGNORE INTERMEDIATE BUILDS]') && - (( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) )) + !contains(github.event.commits[0].message, '[IGNORE INTERMEDIATE BUILDS]') runs-on: ubuntu-latest strategy: matrix: @@ -47,18 +36,10 @@ jobs: - arm32v7 - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Akri intermediate builds are LONG running and should only be run when absolutely needed if: >- @@ -101,7 +82,5 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} diff --git a/.github/workflows/build-udev-video-broker-container.yml b/.github/workflows/build-udev-video-broker-container.yml index 3174dcae4..e2e29a932 100644 --- a/.github/workflows/build-udev-video-broker-container.yml +++ b/.github/workflows/build-udev-video-broker-container.yml @@ -10,7 +10,6 @@ on: - build/containers/Dockerfile.udev-video-broker - samples/brokers/udev-video-broker/** - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -23,20 +22,6 @@ on: - build/containers/Dockerfile.udev-video-broker - samples/brokers/udev-video-broker/** - shared/** - - build/setup.sh - - version.txt - - build/akri-containers.mk - - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-udev-video-broker-container.yml - - build/containers/Dockerfile.udev-video-broker - - samples/brokers/udev-video-broker/** - - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -51,11 +36,6 @@ env: jobs: per-arch: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -66,18 +46,10 @@ jobs: - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -100,8 +72,6 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} build_rust: "1" @@ -115,6 +85,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -134,6 +106,4 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/build-video-streaming-app-container.yml b/.github/workflows/build-video-streaming-app-container.yml index d6f3694ac..a0d44872b 100644 --- a/.github/workflows/build-video-streaming-app-container.yml +++ b/.github/workflows/build-video-streaming-app-container.yml @@ -23,17 +23,6 @@ on: - version.txt - build/akri-containers.mk - Makefile - pull_request_target: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-video-streaming-app-container.yml - - build/containers/Dockerfile.video-streaming-app - - samples/apps/video-streaming-app/** - - version.txt - - build/akri-containers.mk - - Makefile release: types: - published @@ -45,11 +34,6 @@ env: jobs: per-arch: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 20 strategy: @@ -60,18 +44,10 @@ jobs: - amd64 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -94,8 +70,6 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} build_rust: "0" @@ -109,6 +83,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 + with: + persist-credentials: false - name: Prepare To Install uses: actions/setup-node@v1 @@ -128,6 +104,4 @@ jobs: container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - dockerhub_username: ${{ secrets.dhUsername }} - dockerhub_password: ${{ secrets.dhPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 4f2011eed..8b0a026de 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -8,7 +8,6 @@ on: - '**.rs' - '**/Cargo.toml' - '**/Cargo.lock' - - build/setup.sh pull_request: branches: [ main ] paths: @@ -16,43 +15,20 @@ on: - '**.rs' - '**/Cargo.toml' - '**/Cargo.lock' - - build/setup.sh - pull_request_target: - branches: [ main ] - paths: - - .github/workflows/check-rust.yml - - '**.rs' - - '**/Cargo.toml' - - '**/Cargo.lock' - - build/setup.sh env: CARGO_TERM_COLOR: always jobs: build: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) - runs-on: ubuntu-latest timeout-minutes: 20 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - name: Rust install uses: actions-rs/toolchain@v1 @@ -60,8 +36,12 @@ jobs: toolchain: 1.41.0 override: true components: clippy, rustfmt - - name: Install linux requirement - run: ./build/setup.sh + - name: Install Linux requirements + run: | + apt_dependencies="git curl libssl-dev pkg-config libudev-dev libv4l-dev" + echo "Run apt update and apt install the following dependencies: $apt_dependencies" + sudo apt update + sudo apt install -y $apt_dependencies - name: Check rust format run: cargo fmt --all -- --check - name: Check clippy diff --git a/.github/workflows/check-versioning.yml b/.github/workflows/check-versioning.yml index 8f319051e..bba5b9823 100644 --- a/.github/workflows/check-versioning.yml +++ b/.github/workflows/check-versioning.yml @@ -35,23 +35,6 @@ on: - docs/** - scripts/** - tests/** - pull_request_target: - branches: [ main ] - paths-ignore: - - '.gitignore' - - 'LICENSE' - - '**.md' - - Notice.txt - - '.github/workflows/check-versioning.yml' - - '.github/workflows/check-rust.yml' - - '.github/workflows/run-tarpaulin.yml' - - '.github/workflows/run-test-cases.yml' - - '.github/ISSUE_TEMPLATE/**' - - '.github/CODEOWNERS' - - '.vscode/**' - - docs/** - - scripts/** - - tests/** release: types: - published @@ -61,27 +44,14 @@ env: jobs: build: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 5 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false # Only run version check for PRs. If PR title does NOT contain "[SAME VERSION]", then ensure that # version.txt is different from what is in main. diff --git a/.github/workflows/run-helm.yml b/.github/workflows/run-helm.yml index 77a222478..ffb76df8a 100644 --- a/.github/workflows/run-helm.yml +++ b/.github/workflows/run-helm.yml @@ -13,12 +13,6 @@ on: - .github/workflows/run-helm.yml - deployment/** - version.txt - pull_request_target: - branches: [ main ] - paths: - - .github/workflows/run-helm.yml - - deployment/** - - version.txt release: types: - published @@ -28,25 +22,14 @@ env: jobs: lint-with-current-helm: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) + if: github.event_name == 'pull_request' runs-on: ubuntu-latest timeout-minutes: 20 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - uses: azure/setup-helm@v1 @@ -55,26 +38,13 @@ jobs: helm: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 20 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - uses: azure/setup-helm@v1 with: @@ -123,6 +93,7 @@ jobs: uses: actions/checkout@v2 with: ref: gh-pages + persist-credentials: false - name: Get new chart from artifact path if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') diff --git a/.github/workflows/run-tarpaulin.yml b/.github/workflows/run-tarpaulin.yml index 24e1f40a1..ab346a363 100644 --- a/.github/workflows/run-tarpaulin.yml +++ b/.github/workflows/run-tarpaulin.yml @@ -7,62 +7,48 @@ on: - .github/workflows/run-tarpaulin.yml - '**.rs' - '**/Cargo.toml' - - build/setup.sh pull_request: branches: [ main ] paths: - .github/workflows/run-tarpaulin.yml - '**.rs' - '**/Cargo.toml' - - build/setup.sh - pull_request_target: - branches: [ main ] - paths: - - .github/workflows/run-tarpaulin.yml - - '**.rs' - - '**/Cargo.toml' - - build/setup.sh env: CARGO_TERM_COLOR: always jobs: build: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) runs-on: ubuntu-latest timeout-minutes: 20 steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 + with: + persist-credentials: false - - name: Log into dockerhub to avoid throttled anonymous dockerhub pulls - run: echo "${{ secrets.DHPASSWORD }}" | docker login --username "${{ secrets.DHUSERNAME }}" --password-stdin - name: Create tarpaulin instance run: docker create --network host --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin:0.12.2 bash -c "echo 'sleep 20m; echo bye' > /tmp/keep_alive.sh; chmod 777 /tmp/keep_alive.sh; /tmp/keep_alive.sh" > container_id.txt - name: Start tarpaulin instance run: docker start $(cat container_id.txt) - name: Install linux requirement in tarpaulin instance - run: docker exec $(cat container_id.txt) sh -c "./build/setup.sh" + run: docker exec $(cat container_id.txt) sh -c "echo Run apt update and apt install the following dependencies - git curl libssl-dev pkg-config libudev-dev libv4l-dev ; apt update ; apt install -y git curl libssl-dev pkg-config libudev-dev libv4l-dev" - name: Install rust requirements in tarpaulin instance run: docker exec $(cat container_id.txt) sh -c "rustup component add rustfmt" - name: Run tarpaulin run: docker exec $(cat container_id.txt) sh -c "RUST_LOG=trace cargo tarpaulin -v --all-features --out Xml" - - name: Upload report to codecov - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - run: bash <(curl -s https://codecov.io/bash) + + - name: Upload report to codecov for push + if: (!(startsWith(github.event_name, 'pull_request'))) + uses: codecov/codecov-action@v1 + with: + token: ${{secrets.CODECOV_TOKEN}} + fail_ci_if_error: true + verbose: true + + - name: Archive code coverage results + uses: actions/upload-artifact@v1 + with: + name: code-coverage-report + path: cobertura.xml diff --git a/.github/workflows/run-test-cases.yml b/.github/workflows/run-test-cases.yml index c37edd901..5146cf700 100644 --- a/.github/workflows/run-test-cases.yml +++ b/.github/workflows/run-test-cases.yml @@ -2,6 +2,7 @@ name: Test K3s, Kubernetes, and MicroK8s on: workflow_dispatch: + inputs: pull_request: branches: [ main ] paths: @@ -12,27 +13,10 @@ on: - .github/workflows/run-test-cases.yml - build/containers/Dockerfile.agent - build/containers/Dockerfile.controller + - deployment/helm/** - agent/** - controller/** - shared/** - - build/setup.sh - - version.txt - - build/akri-containers.mk - - Makefile - pull_request_target: - branches: [ main ] - paths: - - test/run-end-to-end.py - - test/run-conservation-of-broker-pod.py - - test/run-helm-install-delete.py - - test/shared_test_code.py - - .github/workflows/run-test-cases.yml - - build/containers/Dockerfile.agent - - build/containers/Dockerfile.controller - - agent/** - - controller/** - - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -46,10 +30,10 @@ on: - .github/workflows/run-test-cases.yml - build/containers/Dockerfile.agent - build/containers/Dockerfile.controller + - deployment/helm/** - agent/** - controller/** - shared/** - - build/setup.sh - version.txt - build/akri-containers.mk - Makefile @@ -58,12 +42,46 @@ on: - published jobs: + build-containers: + runs-on: ubuntu-18.04 + timeout-minutes: 35 + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Build local containers for PR tests + if: startsWith(github.event_name, 'pull_request') + env: + BUILD_AMD64: 1 + BUILD_ARM32: 0 + BUILD_ARM64: 0 + PREFIX: ghcr.io/deislabs/akri + LABEL_PREFIX: pr + run: | + make akri-build + make controller-build-amd64 + make agent-build-amd64 + docker save ${PREFIX}/agent:${LABEL_PREFIX}-amd64 > agent.tar + docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar + + - name: Upload Agent container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: agent.tar + path: agent.tar + - name: Upload Controller container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: controller.tar + path: controller.tar + test-cases: - # Run workflow pull_request if it is NOT a fork, as pull_request_target if it IS a fork - if: >- - ( github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.fork == true ) || - ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false ) || - ( !(startsWith(github.event_name, 'pull_request')) ) + needs: build-containers runs-on: ubuntu-18.04 timeout-minutes: 35 @@ -71,35 +89,41 @@ jobs: fail-fast: false matrix: kube-runtime: - - K3s - - MicroK8s - - Kubernetes + - K3s-1.18 + - K3s-1.19 + - MicroK8s-1.18 + - MicroK8s-1.19 + - Kubernetes-1.16 + - Kubernetes-1.17 + - Kubernetes-1.18 + - Kubernetes-1.19 test-case: - end-to-end include: + - kube-runtime: MicroK8s-1.18 + kube-version: 1.18/stable + - kube-runtime: MicroK8s-1.19 + kube-version: 1.19/stable + - kube-runtime: K3s-1.18 + kube-version: v1.18.9+k3s1 + - kube-runtime: K3s-1.19 + kube-version: v1.19.4+k3s1 + - kube-runtime: Kubernetes-1.16 + kube-version: 1.16.15-00 + - kube-runtime: Kubernetes-1.17 + kube-version: 1.17.14-00 + - kube-runtime: Kubernetes-1.18 + kube-version: 1.18.12-00 + - kube-runtime: Kubernetes-1.19 + kube-version: 1.19.4-00 - test-case: end-to-end test-file: test/run-end-to-end.py steps: - - name: Checkout the merged commit from PR and base branch - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - # pull_request_target is run in the context of the base repository - # of the pull request, so the default ref is master branch and - # ref should be manually set to the head of the PR - ref: refs/pull/${{ github.event.pull_request.number }}/head - - name: Checkout the head commit of the branch - if: ${{ github.event_name != 'pull_request_target' }} uses: actions/checkout@v2 - - - name: Log into dockerhub to avoid throttled anonymous dockerhub pulls - run: echo "${{ secrets.DHPASSWORD }}" | docker login --username "${{ secrets.DHUSERNAME }}" --password-stdin - - - name: Log into ghcr to access intermediate build containers - if: startsWith(github.event_name, 'pull_request') - run: echo "${{ secrets.crPassword }}" | docker login --username "${{ secrets.crUsername }}" ghcr.io --password-stdin + with: + persist-credentials: false - name: Setup Python uses: actions/setup-python@v2 @@ -110,25 +134,21 @@ jobs: python -m pip install --upgrade pip pip install kubernetes - - name: Build local containers for PR tests + - name: Download Agent container artifact if: startsWith(github.event_name, 'pull_request') - env: - BUILD_AMD64: 1 - BUILD_ARM32: 0 - BUILD_ARM64: 0 - PREFIX: ghcr.io/deislabs/akri - LABEL_PREFIX: pr - run: | - make akri-build - make controller-build-amd64 - make agent-build-amd64 - docker save ${PREFIX}/agent:${LABEL_PREFIX}-amd64 > agent.tar - docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar + uses: actions/download-artifact@v2 + with: + name: agent.tar + - name: Download Controller container artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/download-artifact@v2 + with: + name: controller.tar - - if: matrix.kube-runtime == 'K3s' + - if: startsWith(matrix.kube-runtime, 'K3s') name: Install K3s env: - INSTALL_K3S_VERSION: v1.18.9+k3s1 + INSTALL_K3S_VERSION: ${{ matrix.kube-version }} run: | sudo curl -sfL https://get.k3s.io | sh - sudo addgroup k3s-admin @@ -138,22 +158,29 @@ jobs: sudo chmod g+r /etc/rancher/k3s/k3s.yaml sudo chmod 666 /etc/rancher/k3s/* mkdir -p ~/.kube/ && cp /etc/rancher/k3s/k3s.yaml ~/.kube/config - echo '--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock' > /tmp/cri_args_to_test.txt + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C $(pwd) + rm -f crictl-$VERSION-linux-amd64.tar.gz + echo '--set agent.host.crictl=$(pwd)/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock' > /tmp/cri_args_to_test.txt echo 'kubectl' > /tmp/runtime_cmd_to_test.txt echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for k3s to become ready"; sleep 10; done - - if: (startsWith(github.event_name, 'pull_request')) && (matrix.kube-runtime == 'K3s') + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'K3s')) name: Import local agent and controller to K3s run: | sudo k3s ctr image import agent.tar sudo k3s ctr image import controller.tar - - if: matrix.kube-runtime == 'Kubernetes' + - if: startsWith(matrix.kube-runtime, 'Kubernetes') name: Install Kubernetes run: | sudo apt-get update -y - sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni + sudo apt-get install -y --allow-downgrades kubelet=${{ matrix.kube-version }} kubeadm=${{ matrix.kube-version }} kubectl=${{ matrix.kube-version }} + kubectl version && echo "kubectl return code: $?" || echo "kubectl return code: $?" + kubeadm version && echo "kubeadm return code: $?" || echo "kubeadm return code: $?" + kubelet --version && echo "kubelet return code: $?" || echo "kubelet return code: $?" sudo swapoff -a sudo kubeadm init sudo mkdir -p $HOME/.kube @@ -165,10 +192,16 @@ jobs: echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for kubernetes to become ready"; sleep 10; done - - if: matrix.kube-runtime == 'MicroK8s' + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'Kubernetes')) + name: Import local agent and controller to Kubernetes + run: | + sudo docker load --input agent.tar + sudo docker load --input controller.tar + + - if: startsWith(matrix.kube-runtime, 'MicroK8s') name: Install MicroK8s run: | - sudo snap install microk8s --classic --channel=1.18/stable + sudo snap install microk8s --classic --channel=${{ matrix.kube-version }} sudo microk8s status --wait-ready sudo usermod -a -G microk8s $USER sudo ls -la $HOME/.kube @@ -179,9 +212,6 @@ jobs: sudo microk8s.enable helm3 sudo microk8s.enable rbac sudo microk8s.enable dns - echo "--allow-privileged=true" | sudo tee -a /var/snap/microk8s/current/args/kube-apiserver - sudo microk8s.stop - sudo microk8s.start sudo microk8s.status --wait-ready VERSION="v1.17.0" curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz @@ -191,32 +221,26 @@ jobs: echo 'microk8s kubectl' > /tmp/runtime_cmd_to_test.txt echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt - - if: (startsWith(github.event_name, 'pull_request')) && (matrix.kube-runtime == 'MicroK8s') + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'MicroK8s')) name: Import local agent and controller to MicroK8s run: | - sudo microk8s ctr image import agent.tar - sudo microk8s ctr image import controller.tar + sleep 15 # 60, 30, 15, and 5 all work in simple tests ... no sleep fails for 1.19.3 + sudo microk8s.status --wait-ready + sudo microk8s ctr images ls + sudo microk8s ctr --debug --timeout 10s images import agent.tar + sudo microk8s ctr --debug --timeout 10s images import controller.tar + sudo microk8s ctr images ls - name: Add Akri Helm Chart run: helm repo add akri-helm-charts https://deislabs.github.io/akri/ - # When event_name==release, the Helm chart is named 'akri' - - if: github.event_name == 'release' - name: Pull akri helm chart when event_name == release - run: echo akri > /tmp/chart_name.txt - # When event_name!=release, the Helm chart is named 'akri-dev' - - if: github.event_name != 'release' - name: Pull akri-dev helm chart when event_name != release - run: echo akri-dev > /tmp/chart_name.txt - # For push and release, we need to wait for the Helm chart and # associated containers to build. - if: github.event_name == 'push' || github.event_name == 'release' name: Set sleep duration before running script to 1500 run: echo 1500 > /tmp/sleep_duration.txt - # For pull_request and pull_request_target, use the locally built - # containers. + # For pull_request, use the locally built containers. - if: startsWith(github.event_name, 'pull_request') name: Tell Helm to use the 'local' labels for container images run: | @@ -229,6 +253,26 @@ jobs: name: Use current version for push run: cat version.txt > /tmp/version_to_test.txt + # For workflow_dispatch and pull_request, use the files in deployment/helm + # as basis for helm install ... this enables us to test any changes made to + # the helm chart files in a PR (where no helm chart is published) + - if: github.event_name != 'push' && github.event_name != 'release' + name: Tell Helm to use the files in deployment/helm to build chart + run: | + echo './deployment/helm' > /tmp/helm_chart_location.txt + # For push, use a specific version of the `akri-dev` charts that are built and + # published by the helm workflow. + - if: github.event_name == 'push' + name: Tell Helm to use the `akri-dev` published charts + run: | + echo "akri-helm-charts/akri-dev --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt + # For release, use a specific version of the `akri` charts that are built and + # published by the helm workflow. + - if: github.event_name == 'release' + name: Tell Helm to use the `akri` published charts + run: | + echo "akri-helm-charts/akri --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt + - name: Execute test script ${{ matrix.test-file }} run: python ${{ matrix.test-file }} - name: Upload Agent log as artifact diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c73fdd2d..4ac27b876 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,43 @@ +# v0.0.44 + +## Announcing Akri v0.0.44! +Akri v0.0.44 is a pre-release of Akri. + +To find out more about Akri, check out our [README](https://github.com/deislabs/akri/blob/v0.0.44/README.md) and start [contributing](https://github.com/deislabs/akri/blob/v0.0.44/docs/contributing.md) today! + +## New Features +The v0.0.44 release introduces a number of significant improvements! + +* Enable Akri for armv7 +* Create separate Helm charts for releases (akri) and merges (akri-dev) +* Parameterize Helm for udev beyond simple video scenario +* Expand udev discovery by supporting filtering by udev rules that look up the device hierarchy such as SUBSYSTEMS, ATTRIBUTES, DRIVERS, KERNELS, and TAGS +* Parameterize Helm for udev to allow security context +* Remove requirement for agent to execute in privileged container + +View the [full change log](https://github.com/deislabs/akri/compare/v0.0.35...v0.0.44) + +## Breaking Changes +N/A + +## Known Issues +* Documented Helm settings are not currently compatible with K3s v1.19.4+k3s1 + +## Validated With + +| Distribution | Version | +|---|---| +| Kubernetes | v1.19.4 | +| K3s | v1.18.9+k3s1 | +| MicroK8s | 1.18/stable | + +## What's next? +Check out our [roadmap](https://github.com/deislabs/akri/blob/v0.0.44/docs/roadmap.md) to see the features we are looking forward to! + +## Release history +See [CHANGELOG.md](https://github.com/deislabs/akri/blob/v0.0.44/CHANGELOG.md) for more information on what changed in this and previous releases. + + # v0.0.35 ## Announcing the Akri v0.0.35 pre-release! diff --git a/Cargo.lock b/Cargo.lock index 72504aecb..9f5168135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,7 +17,7 @@ checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "agent" -version = "0.0.41" +version = "0.0.44" dependencies = [ "akri-shared", "async-stream", @@ -72,7 +72,7 @@ dependencies = [ [[package]] name = "akri-shared" -version = "0.0.41" +version = "0.0.44" dependencies = [ "async-trait", "bytes 0.5.6", @@ -413,7 +413,7 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "controller" -version = "0.0.41" +version = "0.0.44" dependencies = [ "akri-shared", "async-std", @@ -2907,7 +2907,7 @@ dependencies = [ [[package]] name = "udev-video-broker" -version = "0.0.41" +version = "0.0.44" dependencies = [ "akri-shared", "env_logger", diff --git a/Cross.toml b/Cross.toml index b209cd12c..ec196e59d 100644 --- a/Cross.toml +++ b/Cross.toml @@ -11,4 +11,4 @@ image = "ghcr.io/deislabs/akri/rust-crossbuild:x86_64-unknown-linux-gnu-0.1.16-0 image = "ghcr.io/deislabs/akri/rust-crossbuild:armv7-unknown-linux-gnueabihf-0.1.16-0.0.7" [target.aarch64-unknown-linux-gnu] -image = "ghcr.io/deislabs/akri/rust-crossbuild:aarch64-unknown-linux-gnu-0.1.16-0.0.7" \ No newline at end of file +image = "ghcr.io/deislabs/akri/rust-crossbuild:aarch64-unknown-linux-gnu-0.1.16-0.0.7" diff --git a/README.md b/README.md index f3c740366..db0c6319f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Slack channel #akri](https://img.shields.io/badge/slack-akri-blueviolet.svg?logo=slack)](https://kubernetes.slack.com/messages/akri) [![Rust Version](https://img.shields.io/badge/rustc-1.41.0-blue.svg)](https://blog.rust-lang.org/2020/01/30/Rust-1.41.0.html) [![Kubernetes Version](https://img.shields.io/badge/kubernetes-≥%201.16-blue.svg)](https://v1-16.docs.kubernetes.io/) -[![codecov](https://codecov.io/gh/deislabs/akri/branch/main/graph/badge.svg?token=V468HO7CDE)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Tarpaulin+Code+Coverage%22) +[![codecov](https://codecov.io/gh/deislabs/akri/branch/main/graph/badge.svg?token=V468HO7CDE)](https://codecov.io/gh/deislabs/akri) [![Check Rust](https://github.com/deislabs/akri/workflows/Check%20Rust/badge.svg?branch=main&event=push)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Check+Rust%22) [![Tarpaulin Code Coverage](https://github.com/deislabs/akri/workflows/Tarpaulin%20Code%20Coverage/badge.svg?branch=main&event=push)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Tarpaulin+Code+Coverage%22) diff --git a/agent/Cargo.toml b/agent/Cargo.toml index d9731be89..d6697111b 100644 --- a/agent/Cargo.toml +++ b/agent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "agent" -version = "0.0.41" +version = "0.0.44" authors = ["Kate Goldenring ", ""] edition = "2018" diff --git a/agent/src/protocols/http/discovery_handler.rs b/agent/src/protocols/http/discovery_handler.rs new file mode 100644 index 000000000..892ee6241 --- /dev/null +++ b/agent/src/protocols/http/discovery_handler.rs @@ -0,0 +1,79 @@ +use super::super::{DiscoveryHandler, DiscoveryResult}; + +use akri_shared::akri::configuration::HTTPDiscoveryHandlerConfig; +use async_trait::async_trait; +use failure::Error; +use reqwest::get; +use std::collections::HashMap; + +const BROKER_NAME: &str = "AKRI_HTTP"; +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +pub struct HTTPDiscoveryHandler { + discovery_handler_config: HTTPDiscoveryHandlerConfig, +} +impl HTTPDiscoveryHandler { + pub fn new(discovery_handler_config: &HTTPDiscoveryHandlerConfig) -> Self { + trace!("[http:new] Entered"); + HTTPDiscoveryHandler { + discovery_handler_config: discovery_handler_config.clone(), + } + } +} +#[async_trait] + +impl DiscoveryHandler for HTTPDiscoveryHandler { + async fn discover(&self) -> Result, failure::Error> { + trace!("[http:discover] Entered"); + + let url = self.discovery_handler_config.discovery_endpoint.clone(); + trace!("[http:discover] url: {}", &url); + + match get(&url).await { + Ok(resp) => { + trace!( + "[http:discover] Connected to discovery endpoint: {:?} => {:?}", + &url, + &resp + ); + + // Reponse is a newline separated list of devices (host:port) or empty + let device_list = &resp.text().await?; + + let result = device_list + .lines() + .map(|endpoint| { + trace!("[http:discover:map] Creating DiscoverResult: {}", endpoint); + trace!( + "[http:discover] props.inserting: {}, {}", + BROKER_NAME, + DEVICE_ENDPOINT, + ); + let mut props = HashMap::new(); + props.insert(BROKER_NAME.to_string(), "http".to_string()); + props.insert(DEVICE_ENDPOINT.to_string(), endpoint.to_string()); + DiscoveryResult::new(endpoint, props, true) + }) + .collect::>(); + trace!("[protocol:http] Result: {:?}", &result); + Ok(result) + } + Err(err) => { + trace!( + "[http:discover] Failed to connect to discovery endpoint: {}", + &url + ); + trace!("[http:discover] Error: {}", err); + + Err(failure::format_err!( + "Failed to connect to discovery endpoint results: {:?}", + err + )) + } + } + } + fn are_shared(&self) -> Result { + trace!("[http:are_shared] Entered"); + Ok(true) + } +} diff --git a/agent/src/protocols/http/mod.rs b/agent/src/protocols/http/mod.rs new file mode 100644 index 000000000..090496fd3 --- /dev/null +++ b/agent/src/protocols/http/mod.rs @@ -0,0 +1,2 @@ +mod discovery_handler; +pub use self::discovery_handler::HTTPDiscoveryHandler; diff --git a/agent/src/protocols/udev/discovery_impl.rs b/agent/src/protocols/udev/discovery_impl.rs index 14ab960ed..93d1adc86 100644 --- a/agent/src/protocols/udev/discovery_impl.rs +++ b/agent/src/protocols/udev/discovery_impl.rs @@ -1,7 +1,8 @@ extern crate udev; use super::udev_device::{ - get_devnode, get_devpath, get_driver, get_property_value, get_sysname, DeviceExt, + get_attribute_value, get_devnode, get_devpath, get_driver, get_parent, get_property_value, + get_subsystem, get_sysname, DeviceExt, }; use super::udev_enumerator::Enumerator; use pest::iterators::Pair; @@ -43,7 +44,6 @@ pub fn do_parse_and_find( /// Udev discovery is only interested in match operations ("==", "!="), so all action ("=" , "+=" , "-=" , ":=") operations /// will be ignored. /// Udev discovery is only interested in match fields, so all action fields, such as TEST, are ignored -/// Some match fields that look up the device hierarchy, such as SUBSYSTEMS, are yet to be supported and are also ignored fn parse_udev_rule(udev_rule_string: &str) -> Result, failure::Error> { info!( "parse_udev_rule - enter for udev rule string {}", @@ -152,6 +152,10 @@ fn find_devices( if let Some(devnode) = get_devnode(&device) { Some(devnode.to_str().unwrap().to_string()) } else { + trace!( + "find_devices - ignoring device with devpath {:?} due to having no devnode", + get_devpath(&device) + ); None } }) @@ -252,7 +256,7 @@ fn filter_by_nomatch_udev_filters( } /// This iterates over devices returned by filtered Enumerator and inspects the device's fields to see if they match/don't match -/// the fields in the remaining UdevFilters that cound not be applied to Enumerator. +/// the fields in the remaining UdevFilters that could not be applied to Enumerator. fn filter_by_remaining_udev_filters( devices: Vec, udev_filters: Vec<&UdevFilter>, @@ -263,41 +267,30 @@ fn filter_by_remaining_udev_filters( ); let mut mutable_devices = devices; for udev_filter in udev_filters { + let value_regex = Regex::new(&udev_filter.value).unwrap(); + let is_equality = udev_filter.operation == Rule::equality; match udev_filter.field.as_rule() { Rule::devpath => { - let re = Regex::new(&udev_filter.value).unwrap(); // Filter for inequality. Equality already accounted for in filter_by_match_udev_filters mutable_devices = mutable_devices .into_iter() .filter(|device| { let devpath = get_devpath(device).to_str().unwrap(); - match re.find(devpath) { - Some(found_string) => { - found_string.start() != 0 || found_string.end() != devpath.len() - } - None => true, - } + !is_regex_match(devpath, &value_regex) }) .collect(); } Rule::kernel => { - let re = Regex::new(&udev_filter.value).unwrap(); // Filter for inequality. Equality already accounted for in filter_by_match_udev_filters mutable_devices = mutable_devices .into_iter() .filter(|device| { let sysname = get_sysname(device).to_str().unwrap(); - match re.find(sysname) { - Some(found_string) => { - found_string.start() != 0 || found_string.end() != sysname.len() - } - None => true, - } + !is_regex_match(sysname, &value_regex) }) .collect(); } Rule::tag => { - let re = Regex::new(&udev_filter.value).unwrap(); mutable_devices = mutable_devices .into_iter() .filter(|device| { @@ -307,12 +300,9 @@ fn filter_by_remaining_udev_filters( // Return false if discover a tag that should be excluded let mut include = true; for tag in tags { - if let Some(found_string) = re.find(tag) { - if found_string.start() == 0 && found_string.end() == tag.len() - { - include = false; - break; - } + if is_regex_match(tag, &value_regex) { + include = false; + break; } } include @@ -333,20 +323,13 @@ fn filter_by_remaining_udev_filters( .next() .unwrap() .as_str(); - let re = Regex::new(&udev_filter.value).unwrap(); // Filter for inequality. Equality already accounted for in filter_by_match_udev_filters mutable_devices = mutable_devices .into_iter() .filter(|device| { if let Some(property_value) = get_property_value(device, key) { let property_value_str = property_value.to_str().unwrap(); - match re.find(property_value_str) { - Some(found_string) => { - found_string.start() != 0 - || found_string.end() != property_value_str.len() - } - None => true, - } + !is_regex_match(property_value_str, &value_regex) } else { true } @@ -354,26 +337,82 @@ fn filter_by_remaining_udev_filters( .collect(); } Rule::driver => { - let re = Regex::new(&udev_filter.value).unwrap(); - let is_equality = udev_filter.operation == Rule::equality; mutable_devices = mutable_devices .into_iter() .filter(|device| match get_driver(device) { Some(driver) => { let driver = driver.to_str().unwrap(); - match re.find(driver) { - Some(found_string) => { - let is_match = found_string.start() == 0 - && found_string.end() == driver.len(); - (is_equality && is_match) || (!is_equality && !is_match) - } - None => !is_equality, - } + filter_equality_check(is_equality, is_regex_match(driver, &value_regex)) } None => !is_equality, }) .collect(); } + Rule::subsystems => { + mutable_devices = mutable_devices + .into_iter() + .filter(|device| { + filter_equality_check( + is_equality, + device_or_parents_have_subsystem(device, &value_regex), + ) + }) + .collect(); + } + Rule::attributes => { + let key = udev_filter + .field + .clone() + .into_inner() + .next() + .unwrap() + .into_inner() + .next() + .unwrap() + .as_str(); + mutable_devices = mutable_devices + .into_iter() + .filter(|device| { + filter_equality_check( + is_equality, + device_or_parents_have_attribute(device, key, &value_regex), + ) + }) + .collect(); + } + Rule::drivers => { + mutable_devices = mutable_devices + .into_iter() + .filter(|device| { + filter_equality_check( + is_equality, + device_or_parents_have_driver(device, &value_regex), + ) + }) + .collect(); + } + Rule::kernels => { + mutable_devices = mutable_devices + .into_iter() + .filter(|device| { + filter_equality_check( + is_equality, + device_or_parents_have_sysname(device, &value_regex), + ) + }) + .collect(); + } + Rule::tags => { + mutable_devices = mutable_devices + .into_iter() + .filter(|device| { + filter_equality_check( + is_equality, + device_or_parents_have_tag(device, &value_regex), + ) + }) + .collect(); + } _ => { error!("filter_by_remaining_udev_filters - encountered unsupported field"); } @@ -382,6 +421,129 @@ fn filter_by_remaining_udev_filters( mutable_devices } +/// Check whether the device should be selected based on equality and field matching +fn filter_equality_check(is_equality: bool, is_match: bool) -> bool { + (is_equality && is_match) || (!is_equality && !is_match) +} + +/// Check to see if the current value is a regex match of the requested value. +/// Ensure that the match is exclusively on the value to be tested. For example, for the regex `video[0-9]*`, +/// the values `video0` and `video10` should match; however, `blahvideo0blah` should not be accepted as a match. +fn is_regex_match(test_value: &str, value_regex: &Regex) -> bool { + if let Some(value_containing_match) = value_regex.find(test_value) { + value_containing_match.start() == 0 && value_containing_match.end() == test_value.len() + } else { + false + } +} + +/// Recursively look up a device's hierarchy to see if it or one of its ancestors has a specified subsystem. +fn device_or_parents_have_subsystem(device: &impl DeviceExt, value_regex: &Regex) -> bool { + match get_subsystem(device) { + Some(subsystem) => { + let subsystem_str = subsystem.to_str().unwrap(); + if is_regex_match(subsystem_str, value_regex) { + true + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_subsystem(&parent, value_regex), + None => false, + } + } + } + None => match get_parent(device) { + Some(parent) => device_or_parents_have_subsystem(&parent, value_regex), + None => false, + }, + } +} + +/// Recursively look up a device's hierarchy to see if it or one of its ancestors has a specified attribute. +fn device_or_parents_have_attribute( + device: &impl DeviceExt, + key: &str, + value_regex: &Regex, +) -> bool { + match get_attribute_value(device, key) { + Some(attribute_value) => { + let attribute_value_str = attribute_value.to_str().unwrap(); + if is_regex_match(attribute_value_str, value_regex) { + true + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_attribute(&parent, key, value_regex), + None => false, + } + } + } + None => match get_parent(device) { + Some(parent) => device_or_parents_have_attribute(&parent, key, value_regex), + None => false, + }, + } +} + +/// Recursively look up a device's hierarchy to see if it or one of its ancestors has a specified driver. +fn device_or_parents_have_driver(device: &impl DeviceExt, value_regex: &Regex) -> bool { + match get_driver(device) { + Some(driver) => { + let driver_str = driver.to_str().unwrap(); + if is_regex_match(driver_str, value_regex) { + true + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_driver(&parent, value_regex), + None => false, + } + } + } + None => match get_parent(device) { + Some(parent) => device_or_parents_have_driver(&parent, value_regex), + None => false, + }, + } +} + +/// Recursively look up a device's hierarchy to see if it or one of its ancestors has a specified sysname aka kernel. +fn device_or_parents_have_sysname(device: &impl DeviceExt, value_regex: &Regex) -> bool { + let sysname = get_sysname(device).to_str().unwrap(); + if is_regex_match(sysname, value_regex) { + true + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_sysname(&parent, value_regex), + None => false, + } + } +} + +/// Recursively look up a device's hierarchy to see if or one of its ancestors has a specified tag. +fn device_or_parents_have_tag(device: &impl DeviceExt, value_regex: &Regex) -> bool { + if let Some(tags) = get_property_value(device, TAGS) { + let tags = tags.to_str().unwrap().split(':'); + let mut has_tag = false; + for tag in tags { + if is_regex_match(tag, value_regex) { + has_tag = true; + break; + } + } + if has_tag { + true + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_tag(&parent, value_regex), + None => false, + } + } + } else { + match get_parent(device) { + Some(parent) => device_or_parents_have_tag(&parent, value_regex), + None => false, + } + } +} + #[cfg(test)] mod discovery_tests { use super::super::udev_enumerator::{create_enumerator, MockEnumerator}; @@ -393,13 +555,16 @@ mod discovery_tests { io::{prelude::*, BufReader}, path::Path, }; - + #[derive(Clone)] pub struct MockDevice<'a> { pub devpath: String, pub devnode: String, pub sysname: String, pub properties: std::collections::HashMap, pub driver: Option<&'a OsStr>, + pub subsystem: Option<&'a OsStr>, + pub attributes: std::collections::HashMap, + pub parent: Box>>, } impl<'a> DeviceExt for MockDevice<'a> { @@ -419,9 +584,44 @@ mod discovery_tests { None } } + fn mockable_attribute_value(&self, property: &str) -> Option<&OsStr> { + if let Some(value) = self.attributes.get(property) { + Some(OsStr::new(value)) + } else { + None + } + } fn mockable_driver(&self) -> Option<&OsStr> { self.driver } + fn mockable_subsystem(&self) -> Option<&OsStr> { + self.subsystem + } + fn mockable_parent(&self) -> Option { + *self.parent.clone() + } + } + + fn create_mock_device<'a>( + devpath: &str, + devnode: &str, + sysname: &str, + properties: HashMap, + attributes: HashMap, + driver: Option<&'a OsStr>, + subsystem: Option<&'a OsStr>, + parent: Option>, + ) -> MockDevice<'a> { + MockDevice { + devpath: devpath.to_string(), + devnode: devnode.to_string(), + sysname: sysname.to_string(), + properties, + attributes, + driver, + subsystem, + parent: Box::new(parent), + } } #[test] @@ -557,48 +757,66 @@ mod discovery_tests { tag_exclude_properties.insert("TAGS".to_string(), "tag3:other:tag2".to_string()); let mut id_exclude_properties = std::collections::HashMap::new(); id_exclude_properties.insert("ID".to_string(), "id_num".to_string()); - let mock_device_to_exclude0 = MockDevice { - devpath: "/devices/path/exclude".to_string(), - devnode: "/dev/exclude".to_string(), - sysname: "/sys/mock0".to_string(), - properties: HashMap::new(), - driver: Some(OsStr::new("include")), - }; - let mock_device_to_exclude1 = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/exclude".to_string(), - sysname: "/sys/mock1".to_string(), - properties: HashMap::new(), - driver: Some(OsStr::new("exclude")), - }; - let mock_device_to_include1 = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/include".to_string(), - sysname: "/sys/mock2".to_string(), - properties: include_properties, - driver: Some(OsStr::new("include")), - }; - let mock_device_to_exclude3 = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/include".to_string(), - sysname: "/sys/mock3".to_string(), - properties: tag_exclude_properties, - driver: Some(OsStr::new("include")), - }; - let mock_device_to_include2 = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/include".to_string(), - sysname: "/sys/mock4".to_string(), - properties: HashMap::new(), - driver: Some(OsStr::new("include")), - }; - let mock_device_to_exclude4 = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/include".to_string(), - sysname: "/sys/mock5".to_string(), - properties: id_exclude_properties, - driver: Some(OsStr::new("include")), - }; + let mock_device_to_exclude0 = create_mock_device( + "/devices/path/exclude", + "/dev/exclude", + "mock0", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("include")), + None, + None, + ); + let mock_device_to_exclude1 = create_mock_device( + "/devices/path/include", + "/dev/exclude", + "mock1", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("exclude")), + None, + None, + ); + let mock_device_to_include1 = create_mock_device( + "/devices/path/include", + "/dev/include", + "mock2", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("include")), + None, + None, + ); + let mock_device_to_exclude3 = create_mock_device( + "/devices/path/include", + "/dev/include", + "mock3", + tag_exclude_properties, + HashMap::new(), + Some(OsStr::new("include")), + None, + None, + ); + let mock_device_to_include2 = create_mock_device( + "/devices/path/include", + "/dev/include", + "mock4", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("include")), + None, + None, + ); + let mock_device_to_exclude4 = create_mock_device( + "/devices/path/include", + "/dev/include", + "mock5", + id_exclude_properties, + HashMap::new(), + Some(OsStr::new("include")), + None, + None, + ); let devices = vec![ mock_device_to_exclude0, mock_device_to_exclude1, @@ -612,27 +830,460 @@ mod discovery_tests { let filtered_devices = filter_by_remaining_udev_filters(devices, udev_filters); assert_eq!(filtered_devices.len(), 2); + assert_eq!(get_sysname(&filtered_devices[0]).to_str().unwrap(), "mock2"); + assert_eq!(get_sysname(&filtered_devices[1]).to_str().unwrap(), "mock4"); + } + + #[test] + fn test_filter_by_driver() { + let match_rule = "DRIVER==\"some driver\""; + let mock_device = create_mock_device( + "/devices/path/include", + "/dev/include", + "mock", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("another driver")), + None, + None, + ); + let udev_filters = parse_udev_rule(match_rule).unwrap(); + let udev_filters_ref: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = + filter_by_remaining_udev_filters(vec![mock_device.clone()], udev_filters_ref); + assert_eq!(filtered_devices.len(), 0); + + let nomatch_rule = "DRIVER!=\"some driver\""; + let udev_filters = parse_udev_rule(nomatch_rule).unwrap(); + let udev_filters_ref: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = + filter_by_remaining_udev_filters(vec![mock_device], udev_filters_ref); + assert_eq!(filtered_devices.len(), 1); + } + + // Test that hierarchy fields also check for match with device OR parent device + #[test] + fn test_filter_by_hierarchy_field() { + let rule = "SUBSYSTEMS==\"usb\", ATTRS{someKey}==\"value\", TAGS==\"tag[0-9]*\", KERNELS==\"usb[0-9]*\", DRIVERS==\"some driver\""; + let mut attributes = std::collections::HashMap::new(); + attributes.insert("someKey".to_string(), "value".to_string()); + let mut properties = std::collections::HashMap::new(); + properties.insert("TAGS".to_string(), "tag0:middle_tag:tag".to_string()); + let mock_device = create_mock_device( + "/devices/path/usb", + "/dev/node", + "usb1", + properties, + attributes, + Some(OsStr::new("some driver")), + Some(OsStr::new("usb")), + None, + ); + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = + filter_by_remaining_udev_filters(vec![mock_device.clone()], udev_filters); + + assert_eq!(filtered_devices.len(), 1); + assert_eq!(get_sysname(&filtered_devices[0]).to_str().unwrap(), "usb1"); + + let rule = "SUBSYSTEMS==\"usb\", ATTRS{someKey}==\"value\", TAGS==\"tag[0-9]*\", KERNELS==\"usb[0-9]*\", DRIVERS!=\"some driver\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(vec![mock_device], udev_filters); + assert_eq!(filtered_devices.len(), 0); + } + + #[test] + fn test_filter_by_subsystems() { + let rule = "SUBSYSTEMS==\"usb\""; + let mock_usb_grandparent = create_mock_device( + "/devices/path/usb", + "/dev/node", + "usb-grandparent", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("usb")), + None, + ); + + let mock_usb_parent = create_mock_device( + "/devices/path/usb", + "/dev/node", + "usb-parent", + HashMap::new(), + HashMap::new(), + None, + None, + Some(mock_usb_grandparent.clone()), + ); + let mock_pci_parent = create_mock_device( + "/devices/path", + "/dev/node", + "pci-parent", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("pci")), + None, + ); + let mock_device_pci_child = create_mock_device( + "/devices/path", + "/dev/node", + "pci-child", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("random")), + Some(mock_pci_parent), + ); + let mock_device_usb_child = create_mock_device( + "/devices/path", + "/dev/node", + "usb-child", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("driver")), + Some(OsStr::new("random")), + Some(mock_usb_parent.clone()), + ); + let devices = vec![mock_device_pci_child, mock_device_usb_child]; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + + assert_eq!(filtered_devices.len(), 1); assert_eq!( get_sysname(&filtered_devices[0]).to_str().unwrap(), - "/sys/mock2" + "usb-child" ); + + let rule = "SUBSYSTEMS==\"pci\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + assert_eq!(filtered_devices.len(), 1); assert_eq!( - get_sysname(&filtered_devices[1]).to_str().unwrap(), - "/sys/mock4" - ); - - let rule = "DRIVER==\"include\""; - let mock_device = MockDevice { - devpath: "/devices/path/include".to_string(), - devnode: "/dev/include".to_string(), - sysname: "/sys/mock3".to_string(), - properties: HashMap::new(), - driver: Some(OsStr::new("not_included")), - }; + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "pci-child" + ); + + let rule = "SUBSYSTEMS!=\"pci\""; let udev_filters = parse_udev_rule(rule).unwrap(); let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); - let filtered_devices = filter_by_remaining_udev_filters(vec![mock_device], udev_filters); - assert_eq!(filtered_devices.len(), 0); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "usb-child" + ); + } + + #[test] + fn test_filter_by_attrs() { + let rule = "ATTRS{someKey}==\"value\""; + let mut attributes = std::collections::HashMap::new(); + attributes.insert("someKey".to_string(), "value".to_string()); + let mut attributes2 = std::collections::HashMap::new(); + attributes2.insert("someKey".to_string(), "value2".to_string()); + let mock_usb_grandparent = create_mock_device( + "/devices/path", + "/dev/node", + "usb-grandparent", + HashMap::new(), + attributes, + None, + None, + None, + ); + let mock_usb_parent = create_mock_device( + "/devices/path", + "/dev/node", + "usb-parent", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("usb")), + Some(mock_usb_grandparent), + ); + let mock_pci_parent = create_mock_device( + "/devices/path", + "/dev/node", + "pci-parent", + HashMap::new(), + attributes2, + None, + Some(OsStr::new("pci")), + None, + ); + let mock_device_pci_child = create_mock_device( + "/devices/path", + "/dev/node", + "pci-child", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("random")), + Some(mock_pci_parent), + ); + let mock_device_usb_child = create_mock_device( + "/devices/path", + "/dev/node", + "usb-child", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("driver")), + Some(OsStr::new("random")), + Some(mock_usb_parent), + ); + let devices = vec![mock_device_pci_child, mock_device_usb_child]; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "usb-child" + ); + + let rule = "ATTRS{someKey}!=\"value\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices, udev_filters); + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "pci-child" + ); + } + + #[test] + fn test_filter_by_drivers() { + let rule = "DRIVERS==\"some driver\""; + let mock_usb_grandparent = create_mock_device( + "/devices/path", + "/dev/node", + "usb1", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("some driver")), + Some(OsStr::new("usb")), + None, + ); + let mock_parent = create_mock_device( + "/devices/path", + "/dev/node", + "random", + HashMap::new(), + HashMap::new(), + None, + None, + Some(mock_usb_grandparent), + ); + let mock_pci_parent = create_mock_device( + "/devices/path", + "/dev/node", + "pci1", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("pci")), + None, + ); + let mock_device_pci_child = create_mock_device( + "/devices/path", + "/dev/node", + "pci-child", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("random")), + Some(mock_pci_parent), + ); + let mock_device_usb_child = create_mock_device( + "/devices/path", + "/dev/node", + "usb-child", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("driver")), + Some(OsStr::new("random")), + Some(mock_parent), + ); + let devices = vec![mock_device_pci_child, mock_device_usb_child]; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "usb-child" + ); + + let rule = "DRIVERS!=\"some driver\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices, udev_filters); + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "pci-child" + ); + } + + #[test] + fn test_filter_by_tags() { + let rule = "TAGS==\"tag[0-9]*\""; + let mut properties = std::collections::HashMap::new(); + properties.insert("TAGS".to_string(), "tag0:middle_tag:tag".to_string()); + let mock_usb_grandparent = create_mock_device( + "/devices/path", + "/dev/node", + "usb1", + properties, + HashMap::new(), + Some(OsStr::new("some driver")), + Some(OsStr::new("usb")), + None, + ); + let mock_parent = create_mock_device( + "/devices/path", + "/dev/node", + "random", + HashMap::new(), + HashMap::new(), + None, + None, + Some(mock_usb_grandparent), + ); + let mock_pci_parent = create_mock_device( + "/devices/path", + "/dev/node", + "pci1", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("pci")), + None, + ); + let mock_device_pci_child = create_mock_device( + "/devices/path", + "/dev/node", + "pci-child", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("random")), + Some(mock_pci_parent), + ); + let mock_device_usb_child = create_mock_device( + "/devices/path", + "/dev/node", + "usb-child", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("driver")), + Some(OsStr::new("random")), + Some(mock_parent), + ); + let devices = vec![mock_device_pci_child, mock_device_usb_child]; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "usb-child" + ); + + let rule = "TAGS!=\"tag0\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices, udev_filters); + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "pci-child" + ); + } + + #[test] + fn test_filter_by_kernels() { + let rule = "KERNELS==\"usb[0-9]*\""; + let mock_usb_grandparent = create_mock_device( + "/devices/path", + "/dev/node", + "usb1", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("usb")), + None, + ); + let mock_parent = create_mock_device( + "/devices/path", + "/dev/node", + "random", + HashMap::new(), + HashMap::new(), + None, + None, + Some(mock_usb_grandparent), + ); + let mock_pci_parent = create_mock_device( + "/devices/path", + "/dev/node", + "pci1", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("pci")), + None, + ); + let mock_device_pci_child = create_mock_device( + "/devices/path", + "/dev/node", + "pci-child", + HashMap::new(), + HashMap::new(), + None, + Some(OsStr::new("random")), + Some(mock_pci_parent), + ); + let mock_device_usb_child = create_mock_device( + "/devices/path", + "/dev/node", + "usb-child", + HashMap::new(), + HashMap::new(), + Some(OsStr::new("driver")), + Some(OsStr::new("random")), + Some(mock_parent), + ); + let devices = vec![mock_device_pci_child, mock_device_usb_child]; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices.clone(), udev_filters); + + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "usb-child" + ); + + let rule = "KERNELS!=\"usb[0-9]*\""; + let udev_filters = parse_udev_rule(rule).unwrap(); + let udev_filters: Vec<&UdevFilter> = udev_filters.iter().collect(); + let filtered_devices = filter_by_remaining_udev_filters(devices, udev_filters); + assert_eq!(filtered_devices.len(), 1); + assert_eq!( + get_sysname(&filtered_devices[0]).to_str().unwrap(), + "pci-child" + ); } // Only tests that proper match calls were made diff --git a/agent/src/protocols/udev/mod.rs b/agent/src/protocols/udev/mod.rs index 35c268037..37afd0de0 100644 --- a/agent/src/protocols/udev/mod.rs +++ b/agent/src/protocols/udev/mod.rs @@ -8,12 +8,17 @@ pub mod udev_device { use std::{ffi::OsStr, path::Path}; /// Extension Trait for udev::Device. Enables creation of MockDevice for testing. - pub trait DeviceExt { + pub trait DeviceExt: Sized { fn mockable_devpath(&self) -> &OsStr; fn mockable_devnode(&self) -> Option<&Path>; fn mockable_sysname(&self) -> &OsStr; fn mockable_property_value(&self, property: &str) -> Option<&OsStr>; + fn mockable_attribute_value(&self, attribute: &str) -> Option<&OsStr>; fn mockable_driver(&self) -> Option<&OsStr>; + fn mockable_subsystem(&self) -> Option<&OsStr>; + fn mockable_parent(&self) -> Option + where + Self: Sized; } impl DeviceExt for udev::Device { @@ -29,9 +34,18 @@ pub mod udev_device { fn mockable_property_value(&self, property: &str) -> Option<&OsStr> { self.property_value(property) } + fn mockable_attribute_value(&self, attribute: &str) -> Option<&OsStr> { + self.attribute_value(attribute) + } fn mockable_driver(&self) -> Option<&OsStr> { self.driver() } + fn mockable_subsystem(&self) -> Option<&OsStr> { + self.subsystem() + } + fn mockable_parent(&self) -> Option { + self.parent() + } } pub fn get_devpath(device: &impl DeviceExt) -> &OsStr { @@ -53,9 +67,24 @@ pub mod udev_device { device.mockable_property_value(property) } + pub fn get_attribute_value<'a, 'b>( + device: &'a impl DeviceExt, + attribute: &'b str, + ) -> Option<&'a OsStr> { + device.mockable_attribute_value(attribute) + } + pub fn get_driver(device: &impl DeviceExt) -> Option<&OsStr> { device.mockable_driver() } + + pub fn get_subsystem(device: &impl DeviceExt) -> Option<&OsStr> { + device.mockable_subsystem() + } + + pub fn get_parent(device: &impl DeviceExt) -> Option { + device.mockable_parent() + } } pub mod udev_enumerator { @@ -63,7 +92,7 @@ pub mod udev_enumerator { use mockall::predicate::*; use mockall::*; - /// Wrap udev::Enumerator functions in a trait to inable mocking for testing. + /// Wrap udev::Enumerator functions in a trait to enable mocking for testing. #[automock] pub trait Enumerator { fn match_subsystem(&mut self, value: &str) -> std::io::Result<()>; diff --git a/agent/src/protocols/udev/udev_rule_grammar.pest b/agent/src/protocols/udev/udev_rule_grammar.pest index 677390b11..37f3e090f 100644 --- a/agent/src/protocols/udev/udev_rule_grammar.pest +++ b/agent/src/protocols/udev/udev_rule_grammar.pest @@ -4,9 +4,9 @@ WHITESPACE = _{ " " } udev_rule = { SOI ~ (inner_rule)* ~ EOI } inner_rule = { udev_filter ~ ("," ~ udev_filter)* } udev_filter = ${ field ~ operation ~ quoted_value } -field = { unsupported_field | devpath | kernel | tag | driver | subsystem | attribute | property } -action_field = { run | label | goto | import | options | owner | group | mode | wait_for } -unsupported_field = { action | name | symlink | test | program | result | sysctl | kernels | subsystems | drivers | attributes | tags | constant | seclabel | action_field } +field = { unsupported_field | attributes | attribute | devpath | drivers | driver | kernels | kernel | property | subsystems | subsystem | tags | tag } +action_field = { label | goto | group | import | options | owner | mode | run | wait_for } +unsupported_field = { action | action_field | constant | name | program | result | seclabel | symlink | sysctl | test } bounded_key = {"{" ~ key ~ "}"} // remove ! on key and value rules if want to allow spaces between ""/{} and key/value (ie: { DEVPATH } vs {DEVPATH}) key = !{ (ASCII_ALPHANUMERIC | SPACE_SEPARATOR | "$" | "." | "_" | "*" | "?" | "[" | "]" | "-" | "|" | "\\" | "/" )* } @@ -23,44 +23,44 @@ removal = { "-=" } final_assignment = { ":=" } // Supported fields +attributes = { "ATTRS" ~ bounded_key } +attribute = { "ATTR" ~ bounded_key } // {key} devpath = { "DEVPATH" } -kernel = { "KERNEL" } -tag = { "TAG" } +drivers = { "DRIVERS" } driver = { "DRIVER" } -subsystem = { "SUBSYSTEM" } -attribute = { "ATTR" ~ bounded_key } // {key} +kernels = { "KERNELS" } +kernel = { "KERNEL" } property = { "ENV" ~ bounded_key } // {key} +subsystems = { "SUBSYSTEMS" } +subsystem = { "SUBSYSTEM" } +tags = { "TAGS" } +tag = { "TAG" } + // // Unsupported fields // // Unsupported action only fields -run = { "RUN" ~ bounded_key } // {type} where type = program | builtin -label = { "LABEL" } goto = { "GOTO" } +group = { "GROUP" } +label = { "LABEL" } import = { "IMPORT" ~ bounded_key } // {type} where type = program | builtin | file | db | cmdline | parent +mode = { "MODE" } options = { "OPTIONS" } owner = { "OWNER" } -group = { "GROUP" } -mode = { "MODE" } +run = { "RUN" ~ bounded_key } // {type} where type = program | builtin wait_for = { "WAIT_FOR" } // Other unsupported match (and action) fields action = { "ACTION" } +constant = { "CONST" ~ bounded_key } // {key} where key = "arch" | "virt" name = { "NAME" } -symlink = { "SYMLINK" } -test = { "TEST" ~ bounded_key } // {octal mode mask} program = { "PROGRAM" } result = { "RESULT" } -kernels = { "KERNELS" } -subsystems = { "SUBSYSTEMS" } -drivers = { "DRIVERS" } -attributes = { "ATTRS" ~ bounded_key } -tags = { "TAGS" } -constant = { "CONST" ~ bounded_key } // {key} where key = "arch" | "virt" seclabel = { "SECLABEL" ~ bounded_key } // {module} +symlink = { "SYMLINK" } sysctl = { "SYSCTL" ~ bounded_key } // {kernel key} - +test = { "TEST" ~ bounded_key } // {octal mode mask} diff --git a/agent/src/util/constants.rs b/agent/src/util/constants.rs index 1883ed737..4cf5f6d95 100644 --- a/agent/src/util/constants.rs +++ b/agent/src/util/constants.rs @@ -9,7 +9,7 @@ pub const UNHEALTHY: &str = "Unhealthy"; /// Current version of the API supported by kubelet. pub const K8S_DEVICE_PLUGIN_VERSION: &str = "v1beta1"; -/// DevicePluginPath is the folder the kubelet expects to find Device-Plugin sockets. Only privileged pods have access to this path. +/// DevicePluginPath is the folder the kubelet expects to find Device-Plugin sockets. pub const DEVICE_PLUGIN_PATH: &str = "/var/lib/kubelet/device-plugins"; /// Path of the Kubelet registry socket diff --git a/build/containers/intermediate/Dockerfile.opencvsharp-build b/build/containers/intermediate/Dockerfile.opencvsharp-build index 478021948..bd80d1112 100644 --- a/build/containers/intermediate/Dockerfile.opencvsharp-build +++ b/build/containers/intermediate/Dockerfile.opencvsharp-build @@ -13,6 +13,9 @@ ARG PLATFORM_TAG=3.1-buster-slim FROM mcr.microsoft.com/dotnet/core/aspnet:${PLATFORM_TAG} AS base WORKDIR /app +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + # Copy over container legal notice COPY ./build/container-images-legal-notice.md . diff --git a/build/containers/intermediate/Dockerfile.rust-crossbuild-amd64 b/build/containers/intermediate/Dockerfile.rust-crossbuild-amd64 index 744ff1eb3..bb6843e08 100644 --- a/build/containers/intermediate/Dockerfile.rust-crossbuild-amd64 +++ b/build/containers/intermediate/Dockerfile.rust-crossbuild-amd64 @@ -16,5 +16,8 @@ RUN apt-get update && \ g++ ca-certificates curl libssl-dev \ libv4l-dev libudev-dev +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + # Copy over container legal notice COPY ./build/container-images-legal-notice.md . \ No newline at end of file diff --git a/build/containers/intermediate/Dockerfile.rust-crossbuild-arm32v7 b/build/containers/intermediate/Dockerfile.rust-crossbuild-arm32v7 index 5363e2862..671ab5b9c 100644 --- a/build/containers/intermediate/Dockerfile.rust-crossbuild-arm32v7 +++ b/build/containers/intermediate/Dockerfile.rust-crossbuild-arm32v7 @@ -21,5 +21,8 @@ RUN sed -i 's/^deb h'/'deb [arch=amd64,i386] h/' /etc/apt/sources.list && \ g++ ca-certificates curl libssl-dev:armhf \ libv4l-dev:armhf libudev-dev:armhf +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + # Copy over container legal notice COPY ./build/container-images-legal-notice.md . diff --git a/build/containers/intermediate/Dockerfile.rust-crossbuild-arm64v8 b/build/containers/intermediate/Dockerfile.rust-crossbuild-arm64v8 index 9bb301355..e5fee77a8 100644 --- a/build/containers/intermediate/Dockerfile.rust-crossbuild-arm64v8 +++ b/build/containers/intermediate/Dockerfile.rust-crossbuild-arm64v8 @@ -21,5 +21,8 @@ RUN sed -i 's/^deb h'/'deb [arch=amd64,i386] h/' /etc/apt/sources.list && \ g++ ca-certificates curl libssl-dev:arm64 \ libv4l-dev:arm64 libudev-dev:arm64 +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + # Copy over container legal notice COPY ./build/container-images-legal-notice.md . \ No newline at end of file diff --git a/controller/Cargo.toml b/controller/Cargo.toml index f3ba0ae6e..c9993559f 100644 --- a/controller/Cargo.toml +++ b/controller/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "controller" -version = "0.0.41" +version = "0.0.44" authors = [""] edition = "2018" diff --git a/deployment/helm/Chart.yaml b/deployment/helm/Chart.yaml index 19a357cac..00f7c6bab 100644 --- a/deployment/helm/Chart.yaml +++ b/deployment/helm/Chart.yaml @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.41 +version: 0.0.44 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.0.41 +appVersion: 0.0.44 diff --git a/deployment/helm/templates/agent.yaml b/deployment/helm/templates/agent.yaml index 61d174a01..16c4fba45 100644 --- a/deployment/helm/templates/agent.yaml +++ b/deployment/helm/templates/agent.yaml @@ -37,6 +37,10 @@ spec: {{- end }} {{- end }} imagePullPolicy: {{ .Values.agent.image.pullPolicy }} + {{- if .Values.agent.securityContext }} + securityContext: + {{- toYaml .Values.agent.securityContext | nindent 10 }} + {{- end}} env: {{- if .Values.agent.allowDebugEcho }} - name: ENABLE_DEBUG_ECHO @@ -61,8 +65,6 @@ spec: mountPath: /host/var/run/dockershim.sock - name: devices mountPath: /run/udev - securityContext: - privileged: true {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} diff --git a/deployment/helm/templates/udev.yaml b/deployment/helm/templates/udev.yaml index 219def167..53289148e 100644 --- a/deployment/helm/templates/udev.yaml +++ b/deployment/helm/templates/udev.yaml @@ -31,7 +31,11 @@ spec: {{- end }} {{- end }} securityContext: + {{- if .Values.udev.brokerPod.securityContext }} + {{- toYaml .Values.udev.brokerPod.securityContext | nindent 8 }} + {{- else}} privileged: true + {{- end}} resources: limits: {{`"{{PLACEHOLDER}}"`}} : "1" diff --git a/deployment/helm/values.yaml b/deployment/helm/values.yaml index c2c370d95..74d6b3305 100644 --- a/deployment/helm/values.yaml +++ b/deployment/helm/values.yaml @@ -54,6 +54,7 @@ agent: tag: # pullPolicy is the Akri Agent pull policy pullPolicy: Always + securityContext: {} host: # kubeletDevicePlugins is the location of the kubelet device-plugin sockets kubeletDevicePlugins: /var/lib/kubelet/device-plugins @@ -197,6 +198,7 @@ udev: # repository is the container reference repository: pullPolicy: Always + securityContext: {} # createInstanceServices is specified if a service should automatically be # created for each broker pod createInstanceServices: true diff --git a/docs/customizing-akri-installation.md b/docs/customizing-akri-installation.md index c106d327e..3eef725ba 100644 --- a/docs/customizing-akri-installation.md +++ b/docs/customizing-akri-installation.md @@ -12,7 +12,7 @@ protocol Configuration using Helm (more information about the Akri Helm charts c To install Akri without any protocol Configurations, run this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true ``` This will start the Akri controller and deploy Akri Agents. @@ -25,7 +25,7 @@ our Helm chart, we suggest creating a Configuration file using Helm and then man For example, to create an ONVIF Configuration file, run the following. (To instead create a udev Configuration, substitute `onvif.enabled` with `udev.enabled` and add a udev rule.) ```bash -helm template akri akri-helm-charts/akri-dev \ +helm template akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository=nginx \ @@ -45,7 +45,7 @@ If you want your end application to consume frames from both IP cameras and loca installed from the start with both the ONVIF and udev Configurations like so: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set udev.enabled=true \ @@ -70,7 +70,7 @@ state** of Akri and replace `helm install` with `helm upgrade`. Using the ONVIF say an IP camera with IP address 10.0.0.1 is malfunctioning and should be filtered out of discovery, the following command could be run: ```bash -helm upgrade akri akri-helm-charts/akri-dev \ +helm upgrade akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository= \ @@ -155,7 +155,7 @@ template` and kubectl. Another Configuration can be added to the cluster by using `helm upgrade`. For example, if you originally installed just the ONVIF Configuration and now also want to discover local cameras via udev, as well, simply run the following: ```bash -helm upgrade akri akri-helm-charts/akri-dev \ +helm upgrade akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set udev.enabled=true \ @@ -174,7 +174,7 @@ A Configuration can be deleted from a cluster using `helm upgrade`. For example, have been installed in a cluster, the udev Configuration can be deleted by only specifying the ONVIF Configuration in a `helm upgrade` command like the following: ```bash -helm upgrade akri akri-helm-charts/akri-dev \ +helm upgrade akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true ``` diff --git a/docs/end-to-end-demo-gce.md b/docs/end-to-end-demo-gce.md index d0697c6c1..4c2cbcaae 100644 --- a/docs/end-to-end-demo-gce.md +++ b/docs/end-to-end-demo-gce.md @@ -115,7 +115,7 @@ gcloud compute ssh ${INSTANCE} \ > **NOTE** `HOSTPORT` can be the same as `NODEPORT` if this is available on your host. -The port-forwarding only works while the ssh sessions is running. So, while the previous command is running in one shell, browse the demo's HTTP endpoint: +The port-forwarding only works while the ssh session is running. So, while the previous command is running in one shell, browse the demo's HTTP endpoint: ```console http://localhost:${HOSTPORT}/ @@ -141,4 +141,4 @@ If you wish to delete the entire Google Cloud Platform project: gcloud projects delete ${PROJECT} ``` -> **WARNING** Both these commands are irrevocable. \ No newline at end of file +> **WARNING** Both these commands are irrevocable. diff --git a/docs/end-to-end-demo-rpi4.md b/docs/end-to-end-demo-rpi4.md index 91ae122c8..cfaa40122 100644 --- a/docs/end-to-end-demo-rpi4.md +++ b/docs/end-to-end-demo-rpi4.md @@ -68,7 +68,7 @@ This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all 1. Install Akri Helm chart and enable the udev video configuration which will search for all video devices on the node, as specified by the udev rule `KERNEL=="video[0-9]*"` in the configuration. Since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. Watch two broker pods spin up, one for each camera. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ - helm install akri akri-helm-charts/akri-dev \ + helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.name=akri-udev-video \ @@ -85,7 +85,7 @@ This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all ```sh kubectl get akrii -o yaml ``` -1. Deploy the steaming web application and watch a pod spin up for the app. +1. Deploy the streaming web application and watch a pod spin up for the app. ```sh # This file url is not available while the Akri repo is private. To get a valid url, open # https://github.com/deislabs/akri/blob/main/deployment/samples/akri-video-streaming-app.yaml @@ -140,4 +140,4 @@ This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all 1. Apply the [ONVIF configuration](onvif-configuration.md) and make the streaming app display footage from both the local video devices and onvif cameras. To do this, modify the [video streaming yaml](../deployment/samples/akri-video-streaming-app.yaml) as described in the inline comments in order to create a larger service that aggregates the output from both the `udev-camera-svc` service and `onvif-camera-svc` service. 1. Add more nodes to the cluster. 1. [Modify the udev rule](udev-video-sample.md#modifying-the-udev-rule) to find a more specific subset of cameras. -1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev protocol [here](udev-configuration.md). \ No newline at end of file +1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev protocol [here](udev-configuration.md). diff --git a/docs/end-to-end-demo.md b/docs/end-to-end-demo.md index be5cdd50c..6eaf804b5 100644 --- a/docs/end-to-end-demo.md +++ b/docs/end-to-end-demo.md @@ -2,11 +2,21 @@ In this guide, you will deploy Akri end-to-end, all the way from discovering local video cameras to the footage being streamed on a Web application. You will explore how Akri can dynamically discover devices, deploy brokers pods to perform some action on a device (in this case grabbing video frames and serving them over gRPC), and deploy broker services for obtaining the results of that action. ## Set up mock udev video devices -1. Install a kernel module to make v4l2 loopback video devices. Learn more about this module [here](https://github.com/umlaeute/v4l2loopback). - ```sh +1. Acquire an Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS environment to run the + commands. If you would like to deploy the demo to a cloud-based VM, see the + instructions for [DigitalOcean](end-to-end-demo-do.md) or [Google Compute + Engine](end-to-end-demo-gce.md) (and you can skip the rest of the steps in + this document). +1. To make dummy video4linux devices, install the v4l2loopback kernel module and its prerequisites. Learn more about v4l2 loopback [here](https://github.com/umlaeute/v4l2loopback) + ```sh + sudo apt update + sudo apt -y install linux-modules-extra-$(uname -r) + sudo apt -y install dkms curl http://deb.debian.org/debian/pool/main/v/v4l2loopback/v4l2loopback-dkms_0.12.5-1_all.deb -o v4l2loopback-dkms_0.12.5-1_all.deb sudo dpkg -i v4l2loopback-dkms_0.12.5-1_all.deb ``` + When running on Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS, do NOT install v4l2loopback through `sudo apt install -y v4l2loopback-dkms`, you will get an older version (0.12.3). 0.12.5-1 is required for gstreamer to work properly. + 1. Insert the kernel module, creating /dev/video1 and /dev/video2 devnodes. To create different number video devices modify the `video_nr` argument. ```sh sudo modprobe v4l2loopback exclusive_caps=1 video_nr=1,2 @@ -32,10 +42,9 @@ In this guide, you will deploy Akri end-to-end, all the way from discovering loc ## Set up a cluster **Note:** Feel free to deploy on any Kubernetes distribution. Here, find instructions for K3s and MicroK8s. Select and -carry out one or the other (or adopt to your distribution), then continue on with the rest of the steps. +carry out one or the other (or adapt to your distribution), then continue on with the rest of the steps. ### Option 1: Set up single node cluster using K3s -1. Acquire a Linux distro that is supported by K3s, these steps work for Ubuntu. 1. Install [K3s](https://k3s.io/) v1.18.9+k3s1. ```sh curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.18.9+k3s1 sh - @@ -65,7 +74,6 @@ carry out one or the other (or adopt to your distribution), then continue on wit ``` ### Option 2: Set up single node cluster using MicroK8s -1. Acquire an Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS environment to run the commands. If you would like to deploy the demo to a cloud-based VM, see the instructions for [DigitalOcean](end-to-end-demo-do.md) or [Google Compute Engine](end-to-end-demo-gce.md). 1. Install [MicroK8s](https://microk8s.io/docs). ```sh sudo snap install microk8s --classic --channel=1.18/stable @@ -89,7 +97,10 @@ carry out one or the other (or adopt to your distribution), then continue on wit alias kubectl='microk8s kubectl' alias helm='microk8s helm3' ``` -1. Enable privileged pods and restart microk8s. +1. For the sake of this demo, the udev video broker pods run privileged to easily grant them access to video devices, so + enable privileged pods and restart MicroK8s. More explicit device access could have been configured by setting the + appropriate [security context](udev-configuration.md#setting-the-broker-pod-security-context) in the broker PodSpec + in the Configuration. ```sh echo "--allow-privileged=true" >> /var/snap/microk8s/current/args/kube-apiserver microk8s.stop @@ -110,7 +121,7 @@ carry out one or the other (or adopt to your distribution), then continue on wit 1. Use Helm to install Akri and create a Configuration to discover local video devices. Create your Configuration by setting values in your install command. Enable the udev Configuration which will search the Linux device filesystem as specified by a udev rule and give it a name. Since we want to find only video devices on the node, specify a udev rule of `KERNEL=="video[0-9]*"`. Also, specify the broker image you want to be deployed to discovered devices. In this case we will use Akri's sample frame server. Since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. Watch two broker pods spin up, one for each camera. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ - helm install akri akri-helm-charts/akri-dev \ + helm install akri akri-helm-charts/akri \ $AKRI_HELM_CRICTL_CONFIGURATION \ --set useLatestContainers=true \ --set udev.enabled=true \ diff --git a/docs/extensibility-http-grpc.md b/docs/extensibility-http-grpc.md new file mode 100644 index 000000000..aa192ca0f --- /dev/null +++ b/docs/extensibility-http-grpc.md @@ -0,0 +1,463 @@ +# Deeper dive into HTTP-based Device brokers + +3 different broker implementations have been created for the HTTP protocol in the http-extensibility branch, 2 in Rust and 1 in Go: +* The standalone broker is a self-contained scenario that demonstrates the ability to interact with HTTP-based devices by `curl`ing a device's endpoints. This type of solution would be applicable in batch-like scenarios where the broker performs a predictable set of processing steps for a device. +* The second scenario uses gRPC. gRPC is an increasingly common alternative to REST-like APIs and supports high-throughput and streaming methods. gRPC is not a requirement for broker implements in Akri but is used here as one of many mechanisms that may be used. The gRPC-based broker has a companion client. This is a more realistic scenario in which the broker proxies client requests using gRPC to HTTP-based devices. The advantage of this approach is that device functionality is encapsulated by an API that is exposed by the broker. In this case the API has a single method but in practice, there could be many methods implemented. +* The third implemnentation is a gRPC-based broker and companion client implemented in Golang. This is functionally equivalent to the Rust implementation and shares a protobuf definition. For this reason, you may combine the Rust broker and client with the Golang broker and client arbitrarily. The Golang broker is described in the [`http-apps`](./samples/apps/http-apps/README.md) directory. + +The first option, a standalone broker, is described in docs/extensibility.md. + +The two gRPC brokers are implemented here as well. This document will describe the second option, a Rust gRPC broker. + +Please read docs/extensibility.md before reading this document. This document will not cover [creating and deploying mock HTTP-based Devices](docs/extensibility.md#create-some-http-devices), [how to add the HTTP protocol to Akri](docs/extensibility.md#new-discoveryhandler-implementation), or [how to deploy the updated Akri](docs/extensibility.md#deploy-akri). + +## Creating a Rust gRPC broker (and client) + +First, we need to create a project. We can use `cargo` to create our project by navigating to `samples/brokers` and running `cargo new http`. Once the http project has been created, it can be added to the greater Akri project by adding `"samples/brokers/http"` to the **members** in `./Cargo.toml`. + +The broker implementation can be split into parts: + +1. Accessing the HTTP-based Device data +1. Exposing the data to the cluster + +We also provide a gRPC client implementation that can be used to access the brokered data. + +1. Reading the data in the cluster + +### Accessing the data +To access the HTTP-based Device data, we first need to retrieve any discovery information. Any information stored in the DiscoveryResult properties map will be transferred into the broker container's environment variables. Retrieving them is simply a matter of querying environment variables like this: + +```rust +let device_url = env::var("AKRI_HTTP_DEVICE_ENDPOINT")?; +``` + +For our HTTP-based Device broker, the data can be generated with an http get. In fact, the code we used in `discover` can be adapted for what we need: + +```rust +async fn read_sensor( + &self, + _rqst: Request, +) -> Result, Status> { + match get(&self.device_url).await { + Ok(resp) => { + let body = resp.text().await.unwrap(); + Ok(Response::new(ReadSensorResponse { value: body })) + } + Err(err) => { + Err(Status::new(Code::Unavailable, "device is unavailable")) + } + } +} +``` + +### Exposing the data to the cluster +For a gRPC service, we need to do several things: + +1. Create a proto file describing our gRPC service +1. Create a build file that a gRPC library like Tonic can use +1. Leverage the output of our gRPC library build + +The first step is fairly simple for our Http devices (create this in `samples/brokers/http/proto/http.proto`): + +```proto +syntax = "proto3"; + +option go_package = "github.com/deislabs/akri/http-extensibility/proto"; + +package http; + +service DeviceService { + rpc ReadSensor (ReadSensorRequest) returns (ReadSensorResponse); +} + +message ReadSensorRequest { + string name = 1; +} +message ReadSensorResponse { + string value = 1; +} +``` + +The second step, assuming Tonic (though there are several very good gRPC libraries) is to create `samples/brokers/http/build.rs`: + +```rust +fn main() -> Result<(), Box> { + tonic_build::compile_protos("proto/http.proto")?; + Ok(()) +} +``` + +With the gRPC implementation created, we can now start utilizing it. Tonic has made this very simple, we can leverage a simple macro like this: + +```rust +pub mod http { + tonic::include_proto!("http"); +} +``` + +We can tie these pieces together in our main and retrieve the endpoint from the environment variables in `samples/brokers/http/src/broker.rs` (notice that we specify broker.rs, as main.rs is used for our standalone broker). Here we use the generated gRPC service code to listen for gRPC requests: + +```rust +pub mod http { + tonic::include_proto!("http"); +} + +use clap::{App, Arg}; +use http::{ + device_service_server::{DeviceService, DeviceServiceServer}, + ReadSensorRequest, ReadSensorResponse, +}; +use reqwest::get; +use std::env; +use std::net::SocketAddr; +use tonic::{transport::Server, Code, Request, Response, Status}; + +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +#[derive(Default)] +pub struct Device { + device_url: String, +} + +#[tonic::async_trait] +impl DeviceService for Device { + async fn read_sensor( + &self, + _rqst: Request, + ) -> Result, Status> { + match get(&self.device_url).await { + Ok(resp) => { + let body = resp.text().await.unwrap(); + println!("[read_sensor] Response body: {:?}", body); + Ok(Response::new(ReadSensorResponse { value: body })) + } + Err(err) => { + Err(Status::new(Code::Unavailable, "device is unavailable")) + } + } + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("[main] Entered"); + let matches = App::new("broker") + .arg( + Arg::with_name("grpc_endpoint") + .long("grpc_endpoint") + .value_name("ENDPOINT") + .help("Endpoint address that the gRPC server will listen on.") + .required(true), + ) + .get_matches(); + let grpc_endpoint = matches.value_of("grpc_endpoint").unwrap(); + let addr: SocketAddr = grpc_endpoint.parse().unwrap(); + let device_url = env::var(DEVICE_ENDPOINT)?; + println!("[main] gRPC service proxying: {}", device_url); + let device_service = Device { device_url }; + let service = DeviceServiceServer::new(device_service); + + Server::builder() + .add_service(service) + .serve(addr) + .await + .expect("unable to start http-prtocol gRPC server"); + + Ok(()) +} +``` + +To ensure that the broker builds, update `samples/brokers/http/Cargo.toml` with the broker `[[bin]]` and dependencies: + +```toml +[[bin]] +name = "broker" +path = "src/grpc/broker.rs" + +[dependencies] +clap = "2.33.3" +futures = "0.3" +futures-util = "0.3" +prost = "0.6" +reqwest = "0.10.8" +tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } +tonic = "0.1" + +[build-dependencies] +tonic-build = "0.1.1" +``` + +### Reading the data in the cluster + +The steps to generate a gRPC client are very similar to creating a broker. We will start here, with the assumption that a broker has been created and leverage the directory structure and files that have already been created. + +Having already created out gRPC implementation, we can now start using it with the Tonic macros: + +```rust +pub mod http { + tonic::include_proto!("http"); +} +``` + +This provides an easy way to query our HTTP-based Device gRPC in `samples/brokers/http/src/client.rs` (notice, again, that we use client.rs rather than main.rs or broker.rs). Here we create a simlpe loop that calls into the generated gRPC client code to read our HTTP-based Device data: + +```rust +pub mod http { + tonic::include_proto!("http"); +} + +use clap::{App, Arg}; +use http::{device_service_client::DeviceServiceClient, ReadSensorRequest}; +use tokio::{time, time::Duration}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let matches = App::new("client") + .arg( + Arg::with_name("grpc_endpoint") + .long("grpc_endpoint") + .value_name("ENDPOINT") + .help("Endpoint address of the gRPC server.") + .required(true), + ) + .get_matches(); + let grpc_endpoint = matches.value_of("grpc_endpoint").unwrap(); + let endpoint = format!("http://{}", grpc_endpoint); + let mut client = DeviceServiceClient::connect(endpoint).await?; + + loop { + let rqst = tonic::Request::new(ReadSensorRequest { + name: "/".to_string(), + }); + println!("[main:loop] Calling read_sensor"); + let resp = client.read_sensor(rqst).await?; + println!("[main:loop] Response: {:?}", resp); + time::delay_for(Duration::from_secs(10)).await; + } + Ok(()) +} +``` + +To ensure that our client builds, we have update `samples/brokers/http/Cargo.toml` with the client `[[bin]]`: + +```toml +[[bin]] +name = "broker" +path = "src/grpc/broker.rs" + +[[bin]] +name = "client" +path = "src/grpc/client.rs" + +[dependencies] +clap = "2.33.3" +futures = "0.3" +futures-util = "0.3" +prost = "0.6" +reqwest = "0.10.8" +tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } +tonic = "0.1" + +[build-dependencies] +tonic-build = "0.1.1" +``` + +## Build and Deploy gRPC broker and client + +To build the broker and client, we create simple Dockerfiles + +`samples/brokers/http/Dockerfiles/grpc.broker` +```dockerfile +FROM amd64/rust:1.47 as build +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu +RUN USER=root cargo new --bin http +WORKDIR /http +COPY ./samples/brokers/http/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=broker \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/http* +COPY ./samples/brokers/http . +RUN cargo build \ + --bin=broker \ + --release +FROM amd64/debian:buster-slim +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean +COPY --from=build /http/target/release/broker /broker +LABEL org.opencontainers.image.source https://github.com/deislabs/akri +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG broker +ENTRYPOINT ["/broker"] +``` + +`samples/brokers/http/Dockerfiles/grpc.client` +```dockerfile +FROM amd64/rust:1.47 as build +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu +RUN USER=root cargo new --bin http +WORKDIR /http +COPY ./samples/brokers/http/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=client \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/http* +COPY ./samples/brokers/http . +RUN cargo build \ + --bin=client \ + --release +FROM amd64/debian:buster-slim +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean +COPY --from=build /http/target/release/client /client +LABEL org.opencontainers.image.source https://github.com/deislabs/akri +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG client +ENTRYPOINT ["/client"] +``` + +We can build the containers using `docker build` and make them available to our cluster with `docker push`: +```bash +HOST="ghcr.io" +USER=[[GITHUB-USER]] +BROKER="http-broker" +TAGS="v1" + +for APP in "broker" "client" +do + docker build \ + --tag=${HOST}/${USER}/${REPO}-grpc-${APP}:${TAGS} \ + --file=./samples/brokers/http/Dockerfiles/grpc.${APP} \ + . && \ + docker push ${HOST}/${USER}/${REPO}-grpc-${APP}:${TAGS} +done +``` + +Now we can deploy the gRPC-enabled broker using an Akri Configuration, `samples/brokers/http/kubernetes/http.grpc.broker.yaml` (being sure to update **image** according to the last steps): + +```yaml +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http-grpc-broker-rust +spec: + protocol: + http: + discoveryEndpoint: http://discovery:8080/discovery + capacity: 1 + brokerPodSpec: + imagePullSecrets: # GitHub Container Registry secret + - name: SECRET + containers: + - name: http-grpc-broker-rust + image: IMAGE + args: + - --grpc_endpoint=0.0.0.0:50051 + resources: + limits: + "{{PLACEHOLDER}}": "1" + instanceServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 + configurationServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 +``` + +With this Akri Configuration, we can use `kubectl` to update the cluster: + +```bash +kubectl apply --filename=./kubernetes/http.grpc.broker.yaml +``` + +Assuming that you have [created and deployed mock HTTP-based Devices](docs/extensibility.md#create-some-http-devices), you can query the broker's logs and should see the gRPC starting and then pending: + +```bash +kubectl logs pod/akri-http-...-pod +[main] Entered +[main] gRPC service proxying: http://device-7:8080 +``` + +> Optional: you can test the gRPC service using [`grpcurl`](https://github.com/fullstorydev/grpcurl/releases) +> +> ```bash +> BROKER=$( kubectl get service/http-svc --output=jsonpath="{.spec.clusterIP}") +> +> ./grpcurl \ +> --plaintext \ +> -proto ./http.proto \ +> ${BROKER}:50051 \ +> http.DeviceService.ReadSensor +> { +> "value": "0.4871220658001621" +> } +> ``` +> +> This uses the `configurationServiceSepc` service name (`http-svc`) which randomly picks one of the HTTP brokers and it uses the service's ClusterIP because the cluster DNS is inaccessible to `grpcurl`. + +The gRPC client can be deployed as any Kubernetes workload. For our example, we create a Deployment, `samples/brokers/http/kubernetes/http.grpc.client.yaml` (updating **image** according to the previous `docker push` commands): +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http-grpc-client-rust +spec: + replicas: 1 + selector: + matchLabels: + id: akri-http-client-rust + template: + metadata: + labels: + id: akri-http-client-rust + name: http-grpc-client-rust + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: http-grpc-client-rust + image: IMAGE + args: + - --grpc_endpoint=http-svc:50051 +``` + +You may then deploy the gRPC client: + +```bash +kubectl apply --filename=./kubernetes/http.grpc.client.yaml +``` + +This uses the `configurationServiceSpec` service name (`http-svc`) which randomly picks one of the HTTP brokers. + +You may check the client's logs: + +```bash +kubectl logs deployment/http-grpc-client-rust +``` + +Yielding something of the form: + +```console +[main:loop] Calling read_sensor +[main:loop] Response: Response { metadata: MetadataMap { headers: {"content-type": "application/grpc", "date": "Wed, 11 Nov 2020 17:46:55 GMT", "grpc-status": "0"} }, message: ReadSensorResponse { value: "0.6088971084079992" } } +[main:loop] Constructing Request +[main:loop] Calling read_sensor +[main:loop] Response: Response { metadata: MetadataMap { headers: {"content-type": "application/grpc", "date": "Wed, 11 Nov 2020 17:47:05 GMT", "grpc-status": "0"} }, message: ReadSensorResponse { value: "0.9686970038897007" } } +``` + diff --git a/docs/extensibility.md b/docs/extensibility.md index 91b6bf3ad..be4516491 100644 --- a/docs/extensibility.md +++ b/docs/extensibility.md @@ -1,564 +1,662 @@ -# Extensibility -While Akri has several [currently supported discovery protocols](./roadmap.md#currently-supported-protocols) and sample brokers and applications to go with them, the protocol you want to use to discover resources may not be implemented yet. This walks you through all the development steps needed to implement a new protocol and sample broker. It will also cover the steps to get your protocol and broker[s] added to Akri, should you wish to contribute them back. - -To add a new protocol implementation, three things are needed: -1. Add a new DiscoveryHandler implementation in the Akri Agent -1. Update the Configuration CRD to include the new DiscoveryHandler implementation -1. Create a protocol broker for the new capability - -## The mythical Loch Ness resource -To demonstrate how new protocols can be added, we will create a protocol to discover Nessie, a mythical Loch Ness monster that lives at a specific url. - -For reference, we have created a [nessie branch](https://github.com/deislabs/akri/tree/nessie) with the implementation defined below. For convenience, you can [compare the nessie branch with main here](https://github.com/deislabs/akri/compare/nessie). - -### Container Registry Setup -Any docker-compatible container registry should work (dockerhub, Github Container Registry, Azure Container Registry, etc). - -For this sample, we are using the [GitHub container registry](https://github.blog/2020-09-01-introducing-github-container-registry/). You can follow the [getting started guide here to enable it for yourself](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry). - -### New DiscoveryHandler implementation -If the resource you are interested in defining is not accessible through the [included protocols](./roadmap.md#currently-supported-protocols), then you will need to create a DiscoveryHandler for your new protocol. For the sake of demonstration, we will create a discovery handler in order to discover mythical Nessie resources. - -New protocols require new implementations of the DiscoveryHandler: - -```rust -#[async_trait] -pub trait DiscoveryHandler { - async fn discover(&self) -> Result, Error>; - fn are_shared(&self) -> Result; -} -``` - -To create a new protocol type, a new struct and impl block is required. To that end, create a new folder for our Nessie code: `agent/src/protocols/nessie` and add a reference this new module in `agent/src/protocols/mod.rs`: - -```rust -mod debug_echo; -mod nessie; // <--- Our new Nessie module -mod onvif; -``` - -Next, add a few files to our new nessie folder: - -`agent/src/protocols/nessie/discovery_handler.rs`: -```rust -use super::super::{DiscoveryHandler, DiscoveryResult}; -use akri_shared::akri::configuration::NessieDiscoveryHandlerConfig; -use async_trait::async_trait; -use failure::Error; -use std::collections::HashMap; - -pub struct NessieDiscoveryHandler { - discovery_handler_config: NessieDiscoveryHandlerConfig, -} - -impl NessieDiscoveryHandler { - pub fn new(discovery_handler_config: &NessieDiscoveryHandlerConfig) -> Self { - NessieDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } -} - -#[async_trait] -impl DiscoveryHandler for NessieDiscoveryHandler { - async fn discover(&self) -> Result, failure::Error> { - let src = self.discovery_handler_config.nessie_url.clone(); - let mut results = Vec::new(); - - match reqwest::get(&src).await { - Ok(resp) => { - trace!("Found nessie url: {:?} => {:?}", &src, &resp); - // If the Nessie URL can be accessed, we will return a DiscoveryResult - // instance - let mut props = HashMap::new(); - props.insert("nessie_url".to_string(), src.clone()); - - results.push(DiscoveryResult::new(&src, props, true)); - } - Err(err) => { - println!("Failed to establish connection to {}", &src); - println!("Error: {}", err); - return Ok(results); - } - }; - Ok(results) - } - fn are_shared(&self) -> Result { - Ok(true) - } -} -``` - -`agent/src/protocols/nessie/mod.rs`: -```rust -mod discovery_handler; -pub use self::discovery_handler::NessieDiscoveryHandler; -``` - -In order to enable the nessie discovery handler to access https, we need to make a couple changes to `build/containers/Dockerfile.agent`: -* Add installation of `ca-certificates` -* Add `SSL_CERT_FILE` and `SSL_CERT_DIR` ENV lines - -```dockerfile -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates libssl-dev openssl && apt-get clean -COPY ./target/${CROSS_BUILD_TARGET}/release/agent /agent - -ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt -ENV SSL_CERT_DIR=/etc/ssl/certs -ENV RUST_LOG agent,akri_shared -CMD ["./agent"] -``` - -The next step is to update `inner_get_discovery_handler` in `agent/src/protocols/mod.rs` to create a NessieDiscoveryHandler: - -```rust -match discovery_handler_config { - ProtocolHandler::nessie(nessie) => { - Ok(Box::new(nessie::NessieDiscoveryHandler::new(&nessie))) - } - ... -``` - -### Update Configuration CRD -Now we need to update the Configuration CRD so that we can pass some properties to our new protocol handler. First, lets create our data structures. - -The first step is to create a DiscoveryHandler configuration struct. This struct will be used to deserialize the CRD contents and will be passed on to our NessieDiscoveryHandler. Here we are specifying that users must pass in the url for where Nessie lives. This means that Agent is not doing any discovery work besides validating a URL, but this is the scenario we are using to simplify the example. Add this code to `shared/src/akri/configuration.rs`: - -```rust -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(rename_all = "camelCase")] -pub struct NessieDiscoveryHandlerConfig { - pub nessie_url: String, -} -``` - -Next, we need to update the Akri protocol handler enum to include Nessie: - -```rust -pub enum ProtocolHandler { - nessie(NessieDiscoveryHandlerConfig), - ... -} -``` - -Finally, we need to add Nessie to the CRD yaml so that Kubernetes can properly validate any one attempting to configure Akri to search for Nessie. To do this, we need to modify `deployment/helm/crds/akri-configuration-crd.yaml`: - -```yaml -openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - protocol: # {{ProtocolHandler}} - type: object - properties: - nessie: # {{NessieDiscoveryHandler}} <--- add this line - type: object # <--- add this line - properties: # <--- add this line - nessieUrl: # <--- add this line - type: string # <--- add this line... -``` - -### Create a sample protocol broker -The final step, is to create a protocol broker that will make Nessie available to the cluster. The broker can be written in any language as it will be deployed as an individual pod; however, for this example, we will make a Rust broker. We can use cargo to create our project by navigating to `samples/brokers` and running `cargo new nessie`. Once the nessie project has been created, it can be added to the greater Akri project by adding `"samples/brokers/nessie"` to the **members** in `./Cargo.toml`. - -As a simple strategy, we can split the broker implementation into parts: - -1. Create a shared buffer for the data -1. Accessing the "nessie" data -1. Exposing the "nessie" data to the cluster - -For the first step, we looked for a simple non-blocking, ring buffer ... we can add this to a module like `util` by creating `samples/brokers/nessie/src/util/mod.rs`: - -```rust -pub mod nessie; -pub mod nessie_service; - -use arraydeque::{ArrayDeque, Wrapping}; -// Create a wrapping (non-blocking) ring buffer with a capacity of 10 -pub type FrameBuffer = ArrayDeque<[Vec; 10], Wrapping>; -``` - -To access the "nessie" data, we first need to retrieve any discovery information. Any information stored in the DiscoveryResult properties map will be transferred into the broker container's environment variables. Retrieving them is simply a matter of querying environment variables like this: - -```rust -fn get_nessie_url() -> String { - env::var("nessie_url").unwrap() -} -``` - -For our Nessie broker, the "nessie" data can be generated with an http get. In fact, the code we used in `discover` can be adapted for what we need: - -```rust -async fn get_nessie(nessie_url: &String, frame_buffer: Arc>) { - match reqwest::get(nessie_url).await { - Ok(res) => { - println!("reqwest result: {:?}", res); - let bytes = match res.bytes().await { - Ok(bytes) => bytes, - Err(err) => { - println!("Failed to get nessie bytes from {}", &nessie_url); - println!("Error: {}", err); - return; - } - }; - frame_buffer.lock().unwrap().push_back(bytes.to_vec()); - } - Err(err) => { - println!("Failed to establish connection to {}", &nessie_url); - println!("Error: {}", err); - return; - } - }; -} -``` - -Finally, to expose data to the cluster, we suggest a simple gRPC service. For a gRPC service, we need to do several things: - -1. Create a Nessie proto file describing our gRPC service -1. Create a build file that a gRPC library like Tonic can use -1. Leverage the output of our gRPC library build - -The first step is fairly simple for Nessie (create this in `samples/brokers/nessie/nessie.proto`): - -```proto -syntax = "proto3"; - -option csharp_namespace = "Nessie"; - -package nessie; - -service Nessie { - rpc GetNessieNow (NotifyRequest) returns (NotifyResponse); -} - -message NotifyRequest { -} - -message NotifyResponse { - bytes frame = 1; -} -``` - -The second step, assuming Tonic (though there are several very good gRPC libraries) is to create `samples/brokers/nessie/build.rs`: - -```rust -fn main() { - tonic_build::configure() - .build_client(true) - .out_dir("./src/util") - .compile(&["./nessie.proto"], &["."]) - .expect("failed to compile protos"); -} -``` - -This build file will compile `nessie.proto` into a rust source file `samples/brokers/nessie/src/util/nessie.rs`. - -Next, we need to include the gRPC generated code in by adding a reference to `nessie` in `samples/brokers/nessie/src/util/mod.rs`: - -```rust -pub mod nessie; -``` - -With the gRPC implementation created, we can now start utilizing it. - -First, we need to leverage the generated gRPC code by creating `samples/brokers/nessie/src/util/nessie_service.rs`: - -```rust -use super::{ - nessie::{ - nessie_server::{Nessie, NessieServer}, - NotifyRequest, NotifyResponse, - }, - FrameBuffer, -}; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; -use tonic::{transport::Server, Request, Response}; - -pub const NESSIE_SERVER_ADDRESS: &str = "0.0.0.0"; -pub const NESSIE_SERVER_PORT: &str = "8083"; - -pub struct NessieService { - frame_rx: Arc>, -} - -#[tonic::async_trait] -impl Nessie for NessieService { - async fn get_nessie_now( - &self, - _request: Request, - ) -> Result, tonic::Status> { - Ok(Response::new(NotifyResponse { - frame: match self.frame_rx.lock().unwrap().pop_front() { - Some(data) => data, - _ => vec![], - }, - })) - } -} - -pub async fn serve(frame_rx: Arc>) -> Result<(), String> { - let nessie = NessieService { frame_rx }; - let service = NessieServer::new(nessie); - - let addr_str = format!("{}:{}", NESSIE_SERVER_ADDRESS, NESSIE_SERVER_PORT); - let addr: SocketAddr = match addr_str.parse() { - Ok(sock) => sock, - Err(e) => { - return Err(format!("Unable to parse socket: {:?}", e)); - } - }; - - tokio::spawn(async move { - Server::builder() - .add_service(service) - .serve(addr) - .await - .expect("couldn't build server"); - }); - Ok(()) -} -``` - -Once the gRPC code is utilized, we need to include our nessie server code by adding a reference to `nessie_service` in `samples/brokers/nessie/src/util/mod.rs`: - -```rust -pub mod nessie_service; -``` - - -Finally, we can tie all the pieces together in our main and retrieve the url from the Configuration in `samples/brokers/nessie/src/main.rs`: - -```rust -mod util; - -use arraydeque::ArrayDeque; -use std::{ - env, - sync::{Arc, Mutex}, -}; -use tokio::{time, time::Duration}; -use util::{nessie_service, FrameBuffer}; - -fn get_nessie_url() -> String { - env::var("nessie_url").unwrap() -} - -async fn get_nessie(nessie_url: &String, frame_buffer: Arc>) { - match reqwest::get(nessie_url).await { - Ok(res) => { - println!("reqwest result: {:?}", res); - let bytes = match res.bytes().await { - Ok(bytes) => bytes, - Err(err) => { - println!("Failed to get nessie bytes from {}", &nessie_url); - println!("Error: {}", err); - return; - } - }; - frame_buffer.lock().unwrap().push_back(bytes.to_vec()); - } - Err(err) => { - println!("Failed to establish connection to {}", &nessie_url); - println!("Error: {}", err); - return; - } - }; -} - -#[tokio::main] -async fn main() { - let frame_buffer: Arc> = Arc::new(Mutex::new(ArrayDeque::new())); - let nessie_url = get_nessie_url(); - println!("nessie url: {:?}", &nessie_url); - - nessie_service::serve(frame_buffer.clone()).await.unwrap(); - - let mut tasks = Vec::new(); - tasks.push(tokio::spawn(async move { - loop { - time::delay_for(Duration::from_secs(10)).await; - get_nessie(&nessie_url, frame_buffer.clone()).await; - } - })); - futures::future::join_all(tasks).await; -} -``` - -and ensure that we have the required dependencies in `samples/brokers/nessie/Cargo.toml`: - -```toml -[dependencies] -arraydeque = "0.4" -bytes = "0.5" -futures = "0.3" -futures-util = "0.3" -prost = "0.6" -akri-shared = { path = "../../../shared" } -reqwest = "0.10" -tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } -tonic = "0.1" -tower = "0.3" - -[build-dependencies] -tonic-build = "0.1.1" -``` - -To build the Nessie container, we need to create a Dockerfile, `/samples/brokers/nessie/Dockerfile`: - -```dockerfile -FROM amd64/rust:1.41 as build -RUN apt-get update && apt-get install -y --no-install-recommends \ - g++ ca-certificates curl libssl-dev pkg-config -RUN rustup component add rustfmt --toolchain 1.41.1-x86_64-unknown-linux-gnu - -WORKDIR /nessie -RUN echo '[workspace]' > ./Cargo.toml && \ - echo 'members = ["shared", "samples/brokers/nessie"]' >> ./Cargo.toml -COPY ./samples/brokers/nessie ./samples/brokers/nessie -COPY ./shared ./shared -RUN cargo build - -FROM amd64/debian:buster-slim -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates libssl-dev openssl && \ - apt-get clean -COPY --from=build /nessie/target/debug/nessie /nessie - -# Expose port used by broker service -EXPOSE 8083 - -# Enable HTTPS from https://github.com/rust-embedded/cross/issues/119 -ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt -ENV SSL_CERT_DIR=/etc/ssl/certs - -ENTRYPOINT ["/nessie"] -``` - -Akri's `.dockerignore` is configured so that docker will ignore most files in our repository, some exceptions will need to be added to build the nessie broker: - -```yaml -!shared -!samples/brokers/nessie -``` - -Now you are ready to **build the nessie broker**! To do so, we simply need to run this step from the base folder of the Akri repo: - -```sh -docker build -t nessie:extensibility -f samples/brokers/nessie/Dockerfile . -``` - -Having built the nessie container, in order to use it in a cluster, you need to **push the nessie broker** to a container repo: - -```sh -# Log into your container repo ... in this case, ghcr using your Github username -# and a Github PAT created to access ghcr -echo | docker login -u ghcr.io --password-stdin -# Create a container tag corresponding to your container repo -docker tag nessie:extensibility ghcr.io//nessie:extensibility -# Push the nessie container to your container repo -docker push ghcr.io//nessie:extensibility -``` - -### Create a new Configuration -Once the nessie broker has been created (assuming `ghcr.io//nessie:extensibility`), the next question is how to deploy it. For this, we need to create a Configuration called `nessie.yaml` that leverages our new protocol. - -Please update the yaml below to: -* Specify a value for the imagePullSecrets. This can be any name and will correspond to a Kubernetes secret you create, which will contain your container repo credentials. Make note of the name you choose, as this will be used later in `kubectl create secret` and `helm install` commands. -* Specify a value for your container image that corresponds to the container repo you are using - -```yaml -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: nessie -spec: - protocol: - nessie: - nessieUrl: https://www.lochness.co.uk/livecam/img/lochness.jpg - capacity: 5 - brokerPodSpec: - hostNetwork: true - imagePullSecrets: - - name: - containers: - - name: nessie-broker - image: "ghcr.io//nessie:extensibility" - resources: - limits: - "{{PLACEHOLDER}}" : "1" - instanceServiceSpec: - ports: - - name: grpc - port: 80 - targetPort: 8083 - configurationServiceSpec: - ports: - - name: grpc - port: 80 - targetPort: 8083 -``` - -### Installing Akri with your new Configuration -Before you can install Akri and apply your Nessie Configuration, you must first build both the Controller and Agent containers and push them to your own container repository. You can use any container registry to host your container repository. - -We have provided makefiles for building and pushing containers for the various components of Akri. See the [development document](./development.md) for example make commands and details on how to install the prerequisites needed for cross-building Akri components. First, you need build containers used to cross-build Rust x64, run the following (after installing cross): - -```sh -# Build and push ghcr.io//rust-crossbuild to container repo -PREFIX=ghcr.io/ BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make rust-crossbuild -``` - -Update Cross.toml to use your intermediate cross-building container: - -```toml -[target.x86_64-unknown-linux-gnu] -image = "ghcr.io//rust-crossbuild:x86_64-unknown-linux-gnu-0.1.16-" -``` - -Now build the Controller and Agent for x64 by running the following: - -```sh -# Build and push ghcr.io//agent:nessie to container repo -LABEL_PREFIX=extensibility PREFIX=ghcr.io/ BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-agent -# Build and push ghcr.io//controller:nessie to container repo -LABEL_PREFIX=extensibility PREFIX=ghcr.io/ BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-controller -``` - -In order to deploy the new, nessie-enabled Akri, we need to build a new Helm chart. You can follow [these instructions to generate a new Akri chart](./development.md#helm-package). The new Helm chart will be generated in a tgz file called `akri-.tgz` which can be copied to your Kubernetes environment. - -Assuming you have a Kubernetes cluster running (assuming amd64 for this sample), you can start Akri and apply your Nessie Configuration and watch as broker pods are created. - -```sh -# Add secret to give Kubernetes access to your container repo -kubectl create secret docker-registry --docker-server=ghcr.io --docker-username= --docker-password= -# Use Helm to install your nessie-enabled agent and controller -helm install akri akri-.tgz \ - --set imagePullSecrets[0].name="" \ - --set agent.image.repository="ghcr.io//agent" \ - --set agent.image.tag="extensibility-amd64" \ - --set controller.image.repository="ghcr.io//controller" \ - --set controller.image.tag="extensibility-amd64" -# Apply nessie Akri Configuration -kubectl apply -f nessie.yaml -# Watch as agent, controller, and nessie Pods start -watch kubectl get pods -o wide -``` - -## Contributing your Protocol Implementation back to Akri -Now that you have a working protocol implementation and broker, we'd love for you to contribute your code to Akri. The following steps will need to be completed to do so: -1. Create an Issue with a feature request for this protocol. -2. Create a proposal and put in PR for it to be added to the [proposals folder](./proposals). -3. Implement your protocol and provide a full end to end sample. -4. Create a pull request, updating the minor version of akri. See [contributing](./contributing.md#versioning) to learn more about our versioning strategy. - -For a protocol to be considered fully implemented the following must be included in the PR. Note how the Nessie protocol above only has completed the first 3 requirements. -1. A new DiscoveryHandler implementation in the Akri Agent -1. An update to the Configuration CRD to include the new `ProtocolHandler` -1. A sample protocol broker for the new resource -1. A sample Configuration that uses the new protocol in the form of a Helm template and values -1. (Optional) A sample end application that utilizes the services exposed by the Configuration -1. Dockerfile[s] for broker [and sample app] and associated update to the [makefile](../build/akri-containers.mk) -1. Github workflow[s] for broker [and sample app] to build containers and push to Akri container repository -1. Documentation on how to use the new sample Configuration, like the [udev Configuration document](./udev-configuration.md) +# Extensibility + +While Akri has several [currently supported discovery protocols](./roadmap.md#currently-supported-protocols) and sample brokers and applications to go with them, the protocol you want to use to discover resources may not be implemented yet. This walks you through all the development steps needed to implement a new protocol and sample broker. It will also cover the steps to get your protocol and broker[s] added to Akri, should you wish to contribute them back. + +To add a new protocol implementation, several things are needed: + +1. Add a new DiscoveryHandler implementation in Akri Agent +1. Update Configuration CRD to include the new DiscoveryHandler implementation +1. Build versions of Akri agent and controller that understand the new DiscoveryHandler +1. Create a (protocol) Broker for the new capability + +This document is intended to demonstrate how a new protocol can be implemented.For reference, we have created a [http-extensibility branch](https://github.com/deislabs/akri/tree/http-extensibility) with the implementation defined below. For convenience, you can [compare the http-extensibility branch with main here](https://github.com/deislabs/akri/compare/http-extensibility). + +Here, we will create a protocol to discover **HTTP-based devices** that publish random sensor data. An implementation of these devices and a discovery protocol is described in [this README in the http-extensibility branch](https://github.com/deislabs/akri/blob/http-extensibility/samples/apps/http-apps/README.md). + +Any Docker-compatible container registry will work (dockerhub, Github Container Registry, Azure Container Registry, etc). For this sample, we are using the [GitHub Container Registry](https://github.blog/2020-09-01-introducing-github-container-registry/). You can follow the [getting started guide here to enable it for yourself](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry). + +## New DiscoveryHandler implementation +If the resource you are interested in defining is not accessible through the [included protocols](./roadmap.md#currently-supported-protocols), then you will need to create a DiscoveryHandler for your new protocol. Here, we will create a discovery handler in order to discover HTTP resources. + +New protocols require new implementations of the DiscoveryHandler: + +```rust +#[async_trait] +pub trait DiscoveryHandler { + async fn discover(&self) -> Result, Error>; + fn are_shared(&self) -> Result; +} +``` + +To create a new protocol type, a new struct and impl block is required. To that end, create a new folder for the HTTP code: `agent/src/protocols/http` and add a reference this new module in `agent/src/protocols/mod.rs`: + +```rust +mod debug_echo; +mod http; // <--- Our new http module +mod onvif; +``` + +Next, add a few files to the new http folder: + +To provide an implementation for the HTTP protocol discovery, create `agent/src/protocols/http/discovery_handler.rs` and define **HTTPDiscoveryHandler** and its HttpDiscoveryHandler.discover implementation. For the HTTP protocol, the discovery handler will perform an HTTP GET on the protocol's discovery service URL: +```rust +use super::super::{DiscoveryHandler, DiscoveryResult}; + +use akri_shared::akri::configuration::HTTPDiscoveryHandlerConfig; +use async_trait::async_trait; +use failure::Error; +use reqwest::get; +use std::collections::HashMap; + +const BROKER_NAME: &str = "AKRI_HTTP"; +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +pub struct HTTPDiscoveryHandler { + discovery_handler_config: HTTPDiscoveryHandlerConfig, +} +impl HTTPDiscoveryHandler { + pub fn new(discovery_handler_config: &HTTPDiscoveryHandlerConfig) -> Self { + HTTPDiscoveryHandler { + discovery_handler_config: discovery_handler_config.clone(), + } + } +} +#[async_trait] + +impl DiscoveryHandler for HTTPDiscoveryHandler { + async fn discover(&self) -> Result, failure::Error> { + let url = self.discovery_handler_config.discovery_endpoint.clone(); + match get(&url).await { + Ok(resp) => { + // Reponse is a newline separated list of devices (host:port) or empty + let device_list = &resp.text().await?; + + let result = device_list + .lines() + .map(|endpoint| { + let mut props = HashMap::new(); + props.insert(BROKER_NAME.to_string(), "http".to_string()); + props.insert(DEVICE_ENDPOINT.to_string(), endpoint.to_string()); + DiscoveryResult::new(endpoint, props, true) + }) + .collect::>(); + Ok(result) + } + Err(err) => { + Err(failure::format_err!( + "Failed to connect to discovery endpoint results: {:?}", + err + )) + } + } + } + fn are_shared(&self) -> Result { + Ok(true) + } +} +``` + +To ensure that the HttpDiscoveryHandler is available to the rest of agent, we need to update `agent/src/protocols/http/mod.rs` by adding a reference to the new module: +```rust +mod discovery_handler; +pub use self::discovery_handler::HTTPDiscoveryHandler; +``` + +The next step is to update `inner_get_discovery_handler` in `agent/src/protocols/mod.rs` to create an instance of HttpDiscoveryHandler: +```rust +fn inner_get_discovery_handler( + discovery_handler_config: &ProtocolHandler, + query: &impl EnvVarQuery, +) -> Result, Error> { + match discovery_handler_config { + ProtocolHandler::http(http) => Ok(Box::new(http::HTTPDiscoveryHandler::new(&http))), + } +} +``` + +Finally, we need to update `./agent/Cargo.toml` to build with the dependencies http is using: +```TOML +[dependencies] +hyper-async = { version = "0.13.5", package = "hyper" } +reqwest = "0.10.8" +``` + +## Update Configuration CRD +Now we need to update the Configuration CRD so that we can pass some properties to our new protocol handler. First, lets create our data structures. + +The first step is to create a DiscoveryHandler configuration struct. This struct will be used to deserialize the CRD contents and will be passed on to our HttpDiscoveryHandler. Here we are specifying that users must pass in the URL of a discovery service which will be queried to find our HTTP-based Devices. Add this code to `shared/src/akri/configuration.rs`: + +```rust +/// This defines the HTTP data stored in the Configuration +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct HTTPDiscoveryHandlerConfig { + pub discovery_endpoint: String, +} +``` + +Next, we need to update the Akri protocol handler enum to include http: + +```rust +pub enum ProtocolHandler { + http(HTTPDiscoveryHandlerConfig), + ... +} +``` + +Finally, we need to add http to the CRD yaml so that Kubernetes can properly validate any one attempting to configure Akri to search for HTTP devices. To do this, we need to modify `deployment/helm/crds/akri-configuration-crd.yaml`: + +> **NOTE** Making this change means you must `helm install` a copy of this directory **not** deislabs/akri hosted + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: configurations.akri.sh +spec: + group: akri.sh +... + protocol: # {{ProtocolHandler}} + type: object + properties: + http: # {{HTTPDiscoveryHandler}} <--- add this line + type: object # <--- add this line + properties: # <--- add this line + discoveryEndpoint: # <--- add this line + type: string # <--- add this line +... + oneOf: + - required: ["http"] # <--- add this line +``` + +## Building Akri Agent|Controller +Having successfully updated the Akri agent and controller to understand our HTTP resource, the agent and controller need to be built. Running the following `make` commands will build and push new versions of the agent and controller to your container registry (in this case ghcr.io/[[GITHUB-USER]]/agent and ghcr.io/[[GITHUB-USER]]/controller). + +```bash +USER=[[GTHUB-USER]] +PREFIX=ghcr.io/${USER} BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-agent +PREFIX=ghcr.io/${USER} BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-controller +``` + +> **NOTE** These commands build for amd64 (`BUILD_AMD64=1`), other archs can be built by setting `BUILD_*` differently. + +## Create a sample protocol broker +The final step, is to create a protocol broker that will make the HTTP-based Device data available to the cluster. The broker can be written in any language as it will be deployed as an individual pod. + +3 different broker implementations have been created for the HTTP protocol in the [http-extensibility branch](https://github.com/deislabs/akri/tree/http-extensibility), 2 in Rust and 1 in Go: +* The standalone broker is a self-contained scenario that demonstrates the ability to interact with HTTP-based devices by `curl`ing a device's endpoints. This type of solution would be applicable in batch-like scenarios where the broker performs a predictable set of processing steps for a device. +* The second scenario uses gRPC. gRPC is an increasingly common alternative to REST-like APIs and supports high-throughput and streaming methods. gRPC is not a requirement for broker implements in Akri but is used here as one of many mechanisms that may be used. The gRPC-based broker has a companion client. This is a more realistic scenario in which the broker proxies client requests using gRPC to HTTP-based devices. The advantage of this approach is that device functionality is encapsulated by an API that is exposed by the broker. In this case the API has a single method but in practice, there could be many methods implemented. +* The third implemnentation is a gRPC-based broker and companion client implemented in Golang. This is functionally equivalent to the Rust implementation and shares a protobuf definition. For this reason, you may combine the Rust broker and client with the Golang broker and client arbitrarily. The Golang broker is described in the [`http-apps`](https://github.com/deislabs/akri/blob/http-extensibility/samples/apps/http-apps/README.md) directory. + +For this, we will describe the first option, a standalone broker. For a more detailed look at the other gRPC options, please look at [extensibility-http-grpc.md in the http-extensibility branch](https://github.com/deislabs/akri/blob/http-extensibility/docs/extensibility-http-grpc.md). + +First, lets create a new Rust project for our sample broker. We can use cargo to create our project by navigating to `samples/brokers` and running: + +```bash +cargo new http +``` + +Once the http project has been created, it can be added to the greater Akri project by adding `"samples/brokers/http"` to the **members** in `./Cargo.toml`. + +To access the HTTP-based Device data, we first need to retrieve the discovery information. Any information stored in the DiscoveryResult properties map will be transferred into the broker container's environment variables. Retrieving them is simply a matter of querying environment variables like this: + +```rust +let device_url = env::var("AKRI_HTTP_DEVICE_ENDPOINT")?; +``` + +For our HTTP broker, the data can be retrieved with a simple GET: + +```rust +async fn read_sensor(device_url: &str) { + match get(device_url).await { + Ok(resp) => { + let body = resp.text().await; + } + Err(err) => println!("Error: {:?}", err), + }; +} +``` + +We can tie all the pieces together in our main and retrieve the url from the Configuration in `samples/brokers/http/src/main.rs`: + +```rust +use reqwest::get; +use std::env; +use tokio::{time, time::Duration}; + +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +async fn read_sensor(device_url: &str) { + match get(device_url).await { + Ok(resp) => { + let body = resp.text().await; + println!("[main:read_sensor] Response body: {:?}", body); + } + Err(err) => println!("Error: {:?}", err), + }; +} +#[tokio::main] +async fn main() -> Result<(), Box> { + let device_url = env::var(DEVICE_ENDPOINT)?; + let mut tasks = Vec::new(); + tasks.push(tokio::spawn(async move { + loop { + time::delay_for(Duration::from_secs(10)).await; + read_sensor(&device_url[..]).await; + } + })); + futures::future::join_all(tasks).await; + Ok(()) +} +``` + +and ensure that we have the required dependencies in `samples/brokers/http/Cargo.toml`: + +```toml +[[bin]] +name = "standalone" +path = "src/main.rs" + +[dependencies] +futures = "0.3" +reqwest = "0.10.8" +tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } +``` + +To build the HTTP broker, we need to create a Dockerfile, `samples/brokers/http/Dockerfiles/standalone`: + +```dockerfile +FROM amd64/rust:1.47 as build +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu +RUN USER=root cargo new --bin http +WORKDIR /http + +COPY ./samples/brokers/http/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=standalone \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/standalone* +COPY ./samples/brokers/http . +RUN cargo build \ + --bin=standalone \ + --release + +FROM amd64/debian:buster-slim +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean + +COPY --from=build /http/target/release/standalone /standalone +LABEL org.opencontainers.image.source https://github.com/deislabs/akri +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG standalone + +ENTRYPOINT ["/standalone"] +``` + +Akri's `.dockerignore` is configured so that docker will ignore most files in our repository, some exceptions will need to be added to build the HTTP broker: + +```console +!samples/brokers/http +``` + +Now you are ready to **build the HTTP broker**! To do so, we simply need to run this step from the base folder of the Akri repo: + +```bash +HOST="ghcr.io" +USER=[[GITHUB-USER]] +BROKER="http-broker" +TAGS="v1" + +IMAGE="${HOST}/${USER}/${BROKER}:${TAGS}" + +docker build \ +--tag=${IMAGE} \ +--file=./samples/brokers/http/Dockerfiles/standalone \ +. && \ +docker push ${IMAGE} +``` + +To deploy the standalone broker, we'll need to create an Akri Configuration `./samples/brokers/http/kubernetes/http.yaml` (be sure to update **image**): +```yaml +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http +spec: + protocol: + http: + discoveryEndpoint: http://discovery:9999/discovery + capacity: 1 + brokerPodSpec: + imagePullSecrets: # Container Registry secret + - name: SECRET + containers: + - name: http-broker + image: IMAGE + resources: + limits: + "{{PLACEHOLDER}}": "1" +``` + +> **NOTE** If you're using a non-public repo, you can create an `imagePullSecrets` to authenticate + + +# Create some HTTP devices +At this point, we've extended Akri to include discovery for our HTTP protocol and we've created an HTTP broker that can be deployed. To really test our new discovery and brokers, we need to create something to discover. + +For this exercise, we can create an HTTP service that listens to various paths. Each path can simulate a different device by publishing some value. With this, we can create a single Kubernetes pod that can simulate multiple devices. To make our scenario more realistic, we can add a discovery endpoint as well. Further, we can create a series of Kubernetes services that create facades for the various paths, giving the illusion of multiple devices and a separate discovery service. + +To that end, lets: + +1. Create a web service that mocks HTTP devices and a discovery service +1. Deploy, start, and expose our mock HTTP devices and discovery service + +## Mock HTTP devices and Discovery service +To simulate a set of discoverable HTTP devices and a discovery service, create a simple HTTP server (`samples/apps/http-apps/cmd/device/main.go`). The application will accept a list of `path` arguments, which will define endpoints that the service will respond to. These endpoints represent devices in our HTTP protocol. The application will also accept a set of `device` arguments, which will define the set of discovered devices. + +```go +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net" + "net/http" + "time" + "strings" +) + +const ( + addr = ":8080" +) + +// RepeatableFlag is an alias to use repeated flags with flag +type RepeatableFlag []string + +// String is a method required by flag.Value interface +func (e *RepeatableFlag) String() string { + result := strings.Join(*e, "\n") + return result +} + +// Set is a method required by flag.Value interface +func (e *RepeatableFlag) Set(value string) error { + *e = append(*e, value) + return nil +} +var _ flag.Value = (*RepeatableFlag)(nil) +var paths RepeatableFlag +var devices RepeatableFlag + +func main() { + flag.Var(&paths, "path", "Repeat this flag to add paths for the device") + flag.Var(&devices, "device", "Repeat this flag to add devices to the discovery service") + flag.Parse() + + // At a minimum, respond on `/` + if len(paths) == 0 { + paths = []string{"/"} + } + log.Printf("[main] Paths: %d", len(paths)) + + seed := rand.NewSource(time.Now().UnixNano()) + entr := rand.New(seed) + + handler := http.NewServeMux() + + // Create handler for the discovery endpoint + handler.HandleFunc("/discovery", func(w http.ResponseWriter, r *http.Request) { + log.Printf("[discovery] Handler entered") + fmt.Fprintf(w, "%s\n", html.EscapeString(devices.String())) + }) + // Create handler for each endpoint + for _, path := range paths { + log.Printf("[main] Creating handler: %s", path) + handler.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + log.Printf("[device] Handler entered: %s", path) + fmt.Fprint(w, entr.Float64()) + }) + } + + s := &http.Server{ + Addr: addr, + Handler: handler, + } + listen, err := net.Listen("tcp", addr) + if err != nil { + log.Fatal(err) + } + + log.Printf("[main] Starting Device: [%s]", addr) + log.Fatal(s.Serve(listen)) +} +``` + +To ensure that our GoLang project builds, we need to create `samples/apps/http-apps/go.mod`: + +``` +module github.com/deislabs/akri/http-extensibility + +go 1.15 +``` + +## Build and Deploy devices and discovery +To build and deploy the mock devices and discovery, a simple Dockerfile can be created that buidls and exposes our mock server `samples/apps/http-apps/Dockerfiles/device`: +```dockerfile +FROM golang:1.15 as build +WORKDIR /http-extensibility +COPY go.mod . +RUN go mod download +COPY . . +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/device \ + github.com/deislabs/akri/http-extensibility/cmd/device +FROM gcr.io/distroless/base-debian10 +COPY --from=build /bin/device / +USER 999 +EXPOSE 8080 +ENTRYPOINT ["/device"] +CMD ["--path=/","--path=/sensor","--device=device:8000","--device=device:8001"] +``` + +And to deploy, use `docker build` and `docker push`: +```bash +cd ./samples/apps/http-apps + +HOST="ghcr.io" +USER=[[GITHUB-USER]] +PREFIX="http-apps" +TAGS="v1" +IMAGE="${HOST}/${USER}/${PREFIX}-device:${TAGS}" + +docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/device \ + . +docker push ${IMAGE} +``` + +The mock devices can be deployed with a Kubernetes Deployment `samples/apps/http-apps/kubernetes/device.yaml` (update **image** based on the ${IMAGE}): +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: device +spec: + replicas: 1 + selector: + matchLabels: + id: akri-http-device + template: + metadata: + labels: + id: akri-http-device + name: device + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: device + image: IMAGE + imagePullPolicy: Always + args: + - --path=/ + - --device=http://device-1:8080 + - --device=http://device-2:8080 + - --device=http://device-3:8080 + - --device=http://device-4:8080 + - --device=http://device-5:8080 + - --device=http://device-6:8080 + - --device=http://device-7:8080 + - --device=http://device-8:8080 + - --device=http://device-9:8080 + ports: + - name: http + containerPort: 8080 +``` + +Then apply `device.yaml` to create a Deployment (called `device`) and a Pod (called `device-...`): + +```bash +kubectl apply --filename=./samples/apps/http-apps/kubernetes/device.yaml +``` + +> **NOTE** We're using one Deployment|Pod to represent 9 devices AND a discovery service ... we will create 9 (distinct) Services against it (1 for each mock device) and 1 Service to present the disvoery service. + +Then create 9 mock device Services: + +```bash +for NUM in {1..9} +do + # Services are uniquely named + # The service uses the Pods port: 8080 + kubectl expose deployment/device \ + --name=device-${NUM} \ + --port=8080 \ + --target-port=8080 \ + --labels=id=akri-http-device +done +``` + +> Optional: check one the services: +> +> ```bash +> kubectl run curl -it --rm --image=curlimages/curl -- sh +> ``` +> +> Then, pick a value for `X` between 1 and 9: +> +> ```bash +> X=6 +> curl device-${X}:8080 +> ``` +> +> Any or all of these should return a (random) 'sensor' value. + +Then create a Service (called `discovery`) using the deployment: + +```bash +kubectl expose deployment/device \ +--name=discovery \ +--port=8080 \ +--target-port=8080 \ +--labels=id=akri-http-device +``` + +> Optional: check the service to confirm that it reports a list of devices correctly using: +> +> ```bash +> kubectl run curl -it --rm --image=curlimages/curl -- sh +> ``` +> +> Then, curl the service's endpoint: +> +> ```bash +> curl discovery:8080/discovery +> ``` +> +> This should return a list of 9 devices, of the form `http://device-X:8080` + + +# Where the rubber meets the road! +At this point, we've extended Akri to include discovery for our HTTP protocol and we've created an HTTP broker that can be deployed. Let's take HTTP for a spin!! + +## Deploy Akri + +> Optional: If you've previous installed Akri and wish to reset, you may: +> +> ```bash +> # Delete Akri Helm +> sudo microk8s.helm3 uninstall akri +> +> # Delete Akri CRDs +> kubectl delete crd/configurations.akri.sh +> kubectl delete crd/instances.akri.sh +> ``` + +Deploy the revised (!) Helm Chart to your cluster: + +```bash +HOST="ghcr.io" +USER="[[GITHUB-USER]]" +REPO="${HOST}/${USER}" +VERS="v$(cat version.txt)-amd64" + +sudo microk8s.helm3 install akri ./akri/deployment/helm \ + --set imagePullSecrets[0].name="${HOST}" \ + --set agent.image.repository="${REPO}/agent" \ + --set agent.image.tag="${VERS}" \ + --set controller.image.repository="${REPO}/controller" \ + --set controller.image.tag="${VERS}" +``` + +> **NOTE** the Akri SemVer (e.g. `0.0.41`) is reflected in `./version.txt` but the tags must be prefixed with `v` and postfixed with the architecture (e.g. `-amd64`) + +Check using `kubectl get pods` and look for a pod named `akri-agent-...` and another named `akri-controller...` and that they're both `RUNNING`. + +Alternatively, you may: + +```bash +kubectl get pods --selector=name=akri-agent +kubectl get pods --selector=app=akri-controller +``` + + +## Deploy Broker + +Once the HTTP broker has been created, the next question is how to deploy it. For this, we need the Configuration we created earlier `samples/brokers/http/kubernetes/http.yaml`. To deploy, use a simple `kubectl` command like this: +```bash +kubectl apply --filename=./samples/brokers/http/kubernetes/http.yaml +``` + +We can watch as the broker pods get deployed: +```bash +microk8s watch kubectl get pods -o wide +``` + + +## Contributing your Protocol Implementation back to Akri +Now that you have a working protocol implementation and broker, we'd love for you to contribute your code to Akri. The following steps will need to be completed to do so: +1. Create an Issue with a feature request for this protocol. +2. Create a proposal and put in PR for it to be added to the [proposals folder](./proposals). +3. Implement your protocol and provide a full end to end sample. +4. Create a pull request, updating the minor version of akri. See [contributing](./contributing.md#versioning) to learn more about our versioning strategy. + +For a protocol to be considered fully implemented the following must be included in the PR. Note that the HTTP protocol above has not completed all of the requirements. +1. A new DiscoveryHandler implementation in the Akri Agent +1. An update to the Configuration CRD to include the new `ProtocolHandler` +1. A sample protocol broker for the new resource +1. A sample Configuration that uses the new protocol in the form of a Helm template and values +1. (Optional) A sample end application that utilizes the services exposed by the Configuration +1. Dockerfile[s] for broker [and sample app] and associated update to the [makefile](../build/akri-containers.mk) +1. Github workflow[s] for broker [and sample app] to build containers and push to Akri container repository +1. Documentation on how to use the new sample Configuration, like the [udev Configuration document](./udev-configuration.md) diff --git a/docs/onvif-configuration.md b/docs/onvif-configuration.md index 1eb158177..701969bd5 100644 --- a/docs/onvif-configuration.md +++ b/docs/onvif-configuration.md @@ -15,7 +15,7 @@ To use the default ONVIF Configuration in your Akri-enabled cluster, you simply ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" @@ -41,7 +41,7 @@ will allow you to either include or exclude specific IP addresses, MAC addresses For example, you can enable cluster access for every camera that does not have an IP address of 10.0.0.1 by using this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ @@ -52,7 +52,7 @@ helm install akri akri-helm-charts/akri-dev \ You can enable cluster access for every camera with a specific name, you can modify the Configuration like so: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ @@ -66,7 +66,7 @@ The ONVIF protocol will search for up to `discoveryTimeoutSeconds` for IP camera decreased as desired, and defaults to 1 second if left unconfigured. It can be set in the Configuration like this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ @@ -79,7 +79,7 @@ property to reflect the correct number. For example, if your high availability pod, you can update the Configuration like this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set onvif.enabled=true \ --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ diff --git a/docs/requesting-akri-resources.md b/docs/requesting-akri-resources.md index e44a253b5..19a2de101 100644 --- a/docs/requesting-akri-resources.md +++ b/docs/requesting-akri-resources.md @@ -8,7 +8,7 @@ Lets walk through how this works for some protocol named `protocolA`. Install Ak omitting a broker pod image. Note, `protocolA` must be a supported Akri discovery protocol -- currently udev or ONVIF. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set protocolA.enabled=true ``` @@ -52,8 +52,6 @@ spec: containers: - name: protocolA-broker image: nginx - securityContext: - privileged: true resources: limits: akri.sh/protocolA-device-: "1" diff --git a/docs/udev-configuration.md b/docs/udev-configuration.md index fc329278d..66eabf8a4 100644 --- a/docs/udev-configuration.md +++ b/docs/udev-configuration.md @@ -10,7 +10,7 @@ rules](https://wiki.archlinux.org/index.php/Udev) into a Configuration. Akri has [grammar](../agent/src/protocols/udev/udev_rule_grammar.pest) for parsing the rules, expecting them to be formatted according to the [Linux Man pages](https://man7.org/linux/man-pages/man7/udev.7.html). While udev rules are normally used to both find devices and perform actions on devices, the Akri udev discovery handler is only interested in finding devices. -Consequently, the discovery handler will throw an error if any of the rules contain an action operation ("=" , "+=" , "-=" , ":=") or action fields such as `IMPORT` in the udev rules. You should only use match operations ("==", "!=") and the following udev fields: `DEVPATH`, `KERNEL`, `TAG`, `DRIVER`, `SUBSYSTEM`, `ATTRIBUTE`, `PROPERTY`. There are some match fields that look up the device hierarchy, such as `SUBSYSTEMS`, that are yet to be supported and will throw an error if used. Support for these will be added soon. +Consequently, the discovery handler will throw an error if any of the rules contain an action operation ("=" , "+=" , "-=" , ":=") or action fields such as `IMPORT` in the udev rules. You should only use match operations ("==", "!=") and the following udev fields: `ATTRIBUTE`, `ATTRIBUTE`, `DEVPATH`, `DRIVER`, `DRIVERS`, `KERNEL`, `KERNELS`, `ENV`, `SUBSYSTEM`, `SUBSYSTEMS`, `TAG`, and `TAGS`. To see some examples, reference our example [supported rules](../test/example.rules) and [unsupported rules](../test/example-unsupported.rules) that we run some tests against. ## Choosing a udev rule To see what devices will be discovered on a specific node by a udev rule, you can use `udevadm`. For example, to find @@ -41,19 +41,23 @@ To test which devices Akri will discover with a udev rule, you can run the rule ``` 1. Reload the udev rules and trigger them. ```sh - udevadm control --reload - udevadm trigger + sudo udevadm control --reload + sudo udevadm trigger ``` -1. List the devices that have been tagged, which Akri will discover. +1. List the devices that have been tagged, which Akri will discover. Akri will only discover devices with device nodes (devices within the `/dev` directory). These device node paths will be mounted into broker Pods so the brokers can utilize the devices. ```sh - udevadm trigger --verbose --dry-run --type=devices --tag-match=akri_tag + udevadm trigger --verbose --dry-run --type=devices --tag-match=akri_tag | xargs -l bash -c 'if [ -e $0/dev ]; then echo $0/dev; fi' + ``` +1. Explore the attributes of each device in order to decide how to refine your udev rule. + ```sh + udevadm trigger --verbose --dry-run --type=devices --tag-match=akri_tag | xargs -l bash -c 'if [ -e $0/dev ]; then echo $0; fi' | xargs -l bash -c 'udevadm info --path=$0 --attribute-walk' | less ``` 1. Modify the rule as needed, being sure to reload and trigger the rules each time. 1. Remove the tag from the devices -- note how `+=` turns to `-=` -- and reload and trigger the udev rules. Alternatively, if you are trying to discover devices with fields that Akri does not yet support, such as `ATTRS`, you could leave the tag and add it to the rule in your Configuration with `TAG=="akri_tag"`. ```sh sudo echo 'SUBSYSTEM=="sound", KERNEL=="card[0-9]*", TAG-="akri_tag"' | sudo tee -a /etc/udev/rules.d/90-akri.rules - udevadm control --reload - udevadm trigger + sudo udevadm control --reload + sudo udevadm trigger ``` 1. Confirm that the tag has been removed and no devices are listed. ```sh @@ -74,7 +78,7 @@ Later, we will discuss [how to add a custom broker to the Configuration](./#adding-a-custom-broker-to-the-configuration). ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' @@ -97,7 +101,7 @@ our Helm chart, we suggest creating a Configuration file using Helm and then man The udev protocol will find all devices that are described by ANY of the udev rules. For example, to discover devices made by either Great Vendor or Awesome Vendor, you could add a second udev rule. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ @@ -120,7 +124,7 @@ environment variable and proceed to interact with the device. To add a broker to empty nginx pod for each instance. Instead, you can point to your image, say `ghcr.io//sound-broker`. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ @@ -132,6 +136,21 @@ Installation](./customizing-akri-installation.md) to learn how to [modify the br spec](./customizing-akri-installation.md#modifying-the-brokerpodspec) and [service specs](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec) in the Configuration. +### Setting the broker Pod security context +By default in the generic udev Configuration, the udev broker is run in privileged security context. This container +[security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) can be customized via +Helm. For example, to instead run all processes in the Pod with user ID 1000 and group 1000, do the following: +```bash +helm repo add akri-helm-charts https://deislabs.github.io/akri/ +helm install akri akri-helm-charts/akri \ + --set useLatestContainers=true \ + --set udev.enabled=true \ + --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ + --set udev.brokerPod.image.repository=nginx \ + --set udev.brokerPod.securityContext.runAsUser=1000 \ + --set udev.brokerPod.securityContext.runAsGroup=1000 +``` + ## Disabling automatic service creation By default, the generic udev Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. Disable the create of Instance level services and Configuration level services by setting `--set udev.createInstanceServices=false` and `--set udev.createConfigurationService=false`, respectively. diff --git a/docs/udev-video-sample.md b/docs/udev-video-sample.md index d67f63b32..99a718647 100644 --- a/docs/udev-video-sample.md +++ b/docs/udev-video-sample.md @@ -7,7 +7,7 @@ Udev is a device manager for the Linux kernel. The udev discovery handler parses To use create a udev Configuration for video devices for your cluster, you can simply set `udev.enabled=true` and a udev rule of `--set udev.udevRules[0]='KERNEL==\"video[0-9]*\"'` when installing the Akri Helm chart. Optionally, set a name for your generated Configuration by setting `--set udev.name=akri-udev-video` and add a broker image in the case you want a workload automatically deployed to discovered devices. More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.name=akri-udev-video \ @@ -29,7 +29,7 @@ Instead of finding all video4linux device nodes, the udev rule can be modified t For example, the rule can be narrowed by matching cameras with specific properties. To see the properties of a camera on a node, do `udevadm info --query=property --name /dev/video0`, passing in the proper devnode name. In this example, `ID_VENDOR=Microsoft` was one of the outputted properties. To only find cameras made by Microsoft, the rule can be modified like the following: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_VENDOR}=="Microsoft"' \ @@ -39,7 +39,7 @@ helm install akri akri-helm-charts/akri-dev \ As another example, to make sure that the camera has a capture capability rather than just being a video output device, modify the udev rule as follows: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri-dev \ +helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_V4L_CAPABILITIES}=="*:capture:*"' \ @@ -59,7 +59,7 @@ udev: ``` Now, tell the broker to stream JPEG format, 1000x800 resolution, and 30 frames per second by setting those environment variables when installing Akri. ```bash - helm install akri akri-helm-charts/akri-dev \ + helm install akri akri-helm-charts/akri \ --set useLatestContainers=true \ --set udev.enabled=true \ --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ @@ -67,7 +67,9 @@ Now, tell the broker to stream JPEG format, 1000x800 resolution, and 30 frames p -f env.yaml ``` -**Note:** that udev broker pods must run as privileged in order for udev to be able to access the video device. +**Note:** The udev video broker pods run privileged in order to access the video devices. More explicit device access + could have been configured by setting the appropriate [security + context](udev-configuration.md#setting-the-broker-pod-security-context) in the broker PodSpec in the Configuration. Reference [Customizing an Akri Installation](./customizing-akri-installation.md#modifying-the-brokerpodspec) for more examples of how the broker spec can be modified. diff --git a/docs/user-guide.md b/docs/user-guide.md index 238defc6d..421786050 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -20,7 +20,7 @@ helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri-dev ``` -Starting after Release v0.0.35, an **akri** Helm chart will be published for each +Starting in Release v0.0.44, an **akri** Helm chart will be published for each [Release](https://github.com/deislabs/akri/releases). Releases will generally reflect milestones and will have more rigorous testing. You can deploy Release versions of Akri with this command (note: **akri**): ```sh @@ -47,13 +47,13 @@ helm install akri akri-helm-charts/akri # Configure Akri to use K3s' embedded crictl and CRI socket export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock" ``` - 1. If using **MicroK8s**, enable CoreDNS, RBAC (optional), Helm, and privileged Pods. Also, install crictl, and - configure Akri to use MicroK8s' CRI socket. + 1. If using **MicroK8s**, enable CoreDNS, RBAC (optional), and Helm. If your broker Pods must run privileged, enable + privileged Pods. Also, install crictl, and configure Akri to use MicroK8s' CRI socket. ```sh # Enable CoreDNS, RBAC and Helm microk8s enable dns rbac helm3 - # Enable privileged pods and restart MicroK8s. + # Optionally enable privileged pods (if your broker Pods must run privileged) and restart MicroK8s. echo "--allow-privileged=true" >> /var/snap/microk8s/current/args/kube-apiserver sudo microk8s stop && microk8s start @@ -96,7 +96,7 @@ helm install akri akri-helm-charts/akri optionally specifying the image for the broker pod that should be deployed to utilize each discovered device. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ - helm install akri akri-helm-charts/akri-dev \ + helm install akri akri-helm-charts/akri \ $AKRI_HELM_CRICTL_CONFIGURATION \ --set useLatestContainers=true \ --set .enabled=true \ diff --git a/samples/apps/http-apps/Dockerfiles/device b/samples/apps/http-apps/Dockerfiles/device new file mode 100644 index 000000000..f9592dfc7 --- /dev/null +++ b/samples/apps/http-apps/Dockerfiles/device @@ -0,0 +1,31 @@ +FROM golang:1.15 as build + +ARG PROJECT="http-extensibility" +ARG MODULE="github.com/deislabs/akri/${PROJECT}" + +WORKDIR /${PROJECT} + +# Copy go.mod first and install dependencies +COPY go.mod . +RUN go mod download + +# Copy all sources +COPY . . + +# Compile Go binary +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/device \ + ${MODULE}/cmd/device + + +FROM gcr.io/distroless/base-debian10 + +COPY --from=build /bin/device / + +USER 999 +EXPOSE 8080 + + +ENTRYPOINT ["/device"] +CMD ["--path=/","--path=/sensor"] diff --git a/samples/apps/http-apps/Dockerfiles/discovery b/samples/apps/http-apps/Dockerfiles/discovery new file mode 100644 index 000000000..89db2120e --- /dev/null +++ b/samples/apps/http-apps/Dockerfiles/discovery @@ -0,0 +1,31 @@ +FROM golang:1.15 as build + +ARG PROJECT="http-extensibility" +ARG MODULE="github.com/deislabs/akri/${PROJECT}" + +WORKDIR /${PROJECT} + +# Copy go.mod first and install dependencies +COPY go.mod . +RUN go mod download + +# Copy all sources +COPY . . + +# Compile Go binary +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/discovery \ + ${MODULE}/cmd/discovery + + +FROM gcr.io/distroless/base-debian10 + +COPY --from=build /bin/discovery / + +USER 999 +EXPOSE 9999 + + +ENTRYPOINT ["/discovery"] +CMD ["--device=device:8000","--device=device:8001"] diff --git a/samples/apps/http-apps/Dockerfiles/grpc.broker b/samples/apps/http-apps/Dockerfiles/grpc.broker new file mode 100644 index 000000000..0719a9b27 --- /dev/null +++ b/samples/apps/http-apps/Dockerfiles/grpc.broker @@ -0,0 +1,48 @@ +FROM golang:1.15 as build + +ARG PROJECT="http-extensibility" +ARG MODULE="github.com/deislabs/akri/${PROJECT}" + +WORKDIR /${PROJECT} + +# Copy go.mod first and install dependencies +COPY go.mod . +RUN go mod download + +# Copy sources +COPY . . + +# Installs protoc and plugins: protoc-gen-go +ARG VERS="3.14.0" +ARG ARCH="linux-x86_64" +ARG NAME="protoc-${VERS}-${ARCH}" +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${VERS}/${NAME}.zip --output-document=./${NAME}.zip && \ + apt update && apt install -y unzip && \ + unzip -o ${NAME}.zip -d ${NAME} && \ + mv ${NAME}/bin/* /usr/local/bin && \ + mv ${NAME}/include/* /usr/local/include && \ + go get -u github.com/golang/protobuf/protoc-gen-go + +# Generates the Golang protobuf files +RUN protoc \ + --proto_path=./proto \ + --go_out=plugins=grpc,module=${MODULE}:. \ + ./proto/http.proto + +# Compile Go binary +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/broker \ + ${MODULE}/cmd/grpc/broker + + +FROM gcr.io/distroless/base-debian10 + +COPY --from=build /bin/broker / + +USER 999 + +EXPOSE 50051 + +ENTRYPOINT ["/broker"] +CMD ["--grpc_endpoint=:50051"] diff --git a/samples/apps/http-apps/Dockerfiles/grpc.client b/samples/apps/http-apps/Dockerfiles/grpc.client new file mode 100644 index 000000000..4e4aade71 --- /dev/null +++ b/samples/apps/http-apps/Dockerfiles/grpc.client @@ -0,0 +1,48 @@ +FROM golang:1.15 as build + +ARG PROJECT="http-extensibility" +ARG MODULE="github.com/deislabs/akri/${PROJECT}" + +WORKDIR /${PROJECT} + +# Copy go.mod first and install dependencies +COPY go.mod . +RUN go mod download + +# Copy sources +COPY . . + +# Installs protoc and plugins: protoc-gen-go +ARG VERS="3.14.0" +ARG ARCH="linux-x86_64" +ARG NAME="protoc-${VERS}-${ARCH}" +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${VERS}/${NAME}.zip --output-document=./${NAME}.zip && \ + apt update && apt install -y unzip && \ + unzip -o ${NAME}.zip -d ${NAME} && \ + mv ${NAME}/bin/* /usr/local/bin && \ + mv ${NAME}/include/* /usr/local/include && \ + go get -u github.com/golang/protobuf/protoc-gen-go + +# Generates the Golang protobuf files +RUN protoc \ + --proto_path=./proto \ + --go_out=plugins=grpc,module=${MODULE}:. \ + ./proto/http.proto + +# Compile Go binary +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/client \ + ${MODULE}/cmd/grpc/client + + +FROM gcr.io/distroless/base-debian10 + +COPY --from=build /bin/client / + +USER 999 + +EXPOSE 50051 + +ENTRYPOINT ["/client"] +CMD ["--grpc_endpoint=:50051"] diff --git a/samples/apps/http-apps/README.md b/samples/apps/http-apps/README.md new file mode 100644 index 000000000..4bcf156f0 --- /dev/null +++ b/samples/apps/http-apps/README.md @@ -0,0 +1,231 @@ +# HTTP Protocol Sample Device|Discovery apps + +This directory provides implementations of IoT devices and a discovery service that can be used to test the Akri HTTP Protocol Broker. + +This directory includes an alternative gRPC implementation of the Akri HTTP Protocol gRPC Broker and a Client too. + +## Environment + +```bash +export REGISTRY="ghcr.io" +export USER=[[GITHUB-USER]] +export PREFIX="http-apps" +export TAG="v1" +``` + +## Build + +The images are built by GitHub Actions in the repository but, you may also build them yourself using: + +```bash +./build.sh +``` + +This will generate 4 images: + ++ `${PREFIX}-device` ++ `${PREFIX}-discovery` ++ `${PREFIX}-grpc-broker` ++ `${PREFIX}-grpc-client` + +## Device|Discovery Services + +There are two applications: + ++ `device` ++ `discovery` + +### Docker + +You may run the images standalone: + +```bash +# Create devices on ports 8000:8009 +DISCOVERY=() +for PORT in {8000..8009} +do + # Create the device on ${PORT} + # For Docker only: name each device: device-${PORT} + docker run \ + --rm --detach=true \ + --name=device-${PORT} \ + --publish=${PORT}:8080 \ + ${REGISTRY}/${USER}/${PREFIX}-device:${TAG} \ + --path="/" + # Add the device to the discovery document + DISCOVERY+=("--device=http://localhost:${PORT} ") +done + +# Create a discovery server for these devices +docker run \ + --rm --detach=true \ + --name=discovery \ + --publish=9999:9999 \ + ${REGISTRY}/${USER}/${PREFIX}-discovery:${TAG} ${DISCOVERY[@]} +``` + +Test: + +```bash +curl http://localhost:9999/ +http://localhost:8000 +http://localhost:8001 +http://localhost:8002 +http://localhost:8003 +http://localhost:8004 +http://localhost:8005 +http://localhost:8006 +http://localhost:8007 +http://localhost:8008 +http://localhost:8009 + +curl http://localhost:8006/sensor +``` + +To stop: + +```bash +# Delete devices on ports 8000:8009 +for PORT in {8000..8009} +do + docker stop device-${PORT} +done + +# Delete discovery server +docker stop discovery +``` + +### Kubernetes + +And most useful on Kubernetes because one (!) or more devices can be created and then discovery can be created with correct DNS names. + +Ensure the `image` references are updated in `./kubernetes/device.yaml` and `./kubernetes/discovery.yaml` + +```bash +for APP in "device" "discovery" +do + IMAGE="$(docker inspect --format='{{index .RepoDigests 0}}' ${REGISTRY}/${USER}/${PREFIX}-${APP}:${TAG})" + sed \ + --in-place \ + "s|IMAGE|${IMAGE}|g" + ./kubernetes/${APP}.yaml +done +``` + +Then: + +```bash + +# Create one device deployment +kubectl apply --filename=./device.yaml + +# But multiple Services against the single Pod +for NUM in {1..9} +do + # Services are uniquely named + # The service uses the Pods port: 8080 + kubectl expose deployment/device \ + --name=device-${NUM} \ + --port=8080 \ + --target-port=8080 +done +service/device-1 exposed +service/device-2 exposed +service/device-3 exposed +service/device-4 exposed +service/device-5 exposed +service/device-6 exposed +service/device-7 exposed +service/device-8 exposed +service/device-9 exposed + +# Create one discovery deployment +kubectl apply --filename=./discovery.yaml + +# Expose Discovery as a service on its default port: 9999 +# The Discovery service spec is statically configured for devices 1-9 +kubectl expose deployment/discovery \ +--name=discovery \ +--port=9999 \ +--target-port=9999 + +kubectl run curl --image=radial/busyboxplus:curl --stdin --tty --rm +curl http://discovery:9999 +http://device-1:8080 +http://device-2:8080 +http://device-3:8080 +http://device-4:8080 +http://device-5:8080 +http://device-6:8080 +http://device-7:8080 +http://device-8:8080 +http://device-9:8080 +``` + +Delete: + +```bash +kubectl delete deployment/discovery +kubectl delete deployment/device + +kubectl delete service/discovery + +for NUM in {1..9} +do + kubectl delete service/device-${NUM} +done +``` + +## gRPC Broker|Client + +This is a Golang implementation of the Broker gRPC server and client. It is an alternative implementation to the Rust gRPC server and client found in `./samples/brokers/http/src/grpc`. + +### Docker + +These are containerized too: + +```bash +docker run \ +--rm --interactive --tty \ +--net=host \ +--name=grpc-broker-golang \ +--env=AKRI_HTTP_DEVICE_ENDPOINT=localhost:8005 \ +${REGISTRY}/${USER}/${PREFIX}-grpc-broker:${TAG} \ +--grpc_endpoint=:50051 +``` + +And: + +```bash +docker run \ +--rm --interactive --tty \ +--net=host \ +--name=grpc-client-golang \ +${REGISTRY}/${USER}/${PREFIX}-grpc-client:${TAG} \ +--grpc_endpoint=:50051 +``` + +### Kubernetes + +You will need to replace `IMAGE` and `SECRET` in the Kubernetes configs before you deploy them. + +`SECRET` should be replaced with the value (if any) of the Kubernetes Secret that provides the token to your registry. + +```bash +for APP in "broker" "client" +do + IMAGE="$(docker inspect --format='{{index .RepoDigests 0}}' ${REGISTRY}/${USER}/${PREFIX}-grpc-${APP}:${TAG})" + sed \ + --in-place \ + "s|IMAGE|${IMAGE}|g" + ./kubernetes/grpc.${APP}.yaml +done +``` + +Then: + +```bash +kubectl apply --filename=./kubernetes/gprc.broker.yaml +kubectl apply --filename=./kubernetes/grpc.client.yaml +``` + diff --git a/samples/apps/http-apps/build.sh b/samples/apps/http-apps/build.sh new file mode 100755 index 000000000..efa9ac501 --- /dev/null +++ b/samples/apps/http-apps/build.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +: "${REGISTRY:?Need to export REGISTRY e.g. ghcr.io}" +: "${USER:?Need to export USER e.g. ghcr.io/deislabs/...}" +: "${PREFIX:?Need to export PREFIX e.g. ${REGISTRY}/${USER}/http-apps...}" +: "${TAG:?Need to export TAG e.g. v1}" + +for APP in "device" "discovery" +do + IMAGE="${REGISTRY}/${USER}/${PREFIX}-${APP}:${TAG}" + docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/${APP} \ + . + docker push ${IMAGE} +done + +for APP in "broker" "client" +do + IMAGE="${REGISTRY}/${USER}/${PREFIX}-grpc-${APP}-golang:${TAG}" + docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/grpc.${APP} \ + . + docker push ${IMAGE} +done \ No newline at end of file diff --git a/samples/apps/http-apps/cmd/device/main.go b/samples/apps/http-apps/cmd/device/main.go new file mode 100644 index 000000000..6de076937 --- /dev/null +++ b/samples/apps/http-apps/cmd/device/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net" + "net/http" + "time" + + "github.com/deislabs/akri/http-extensibility/shared" +) + +const ( + addr = ":8080" +) + +var _ flag.Value = (*shared.RepeatableFlag)(nil) +var paths shared.RepeatableFlag + +func main() { + flag.Var(&paths, "path", "Repeat this flag to add paths for the device") + flag.Parse() + + // At a minimum, respond on `/` + if len(paths) == 0 { + paths = []string{"/"} + } + log.Printf("[main] Paths: %d", len(paths)) + + seed := rand.NewSource(time.Now().UnixNano()) + entr := rand.New(seed) + + handler := http.NewServeMux() + + // Create handler for each endpoint + for _, path := range paths { + log.Printf("[main] Creating handler: %s", path) + handler.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + log.Printf("[main:handler] Handler entered: %s", path) + fmt.Fprint(w, entr.Float64()) + }) + } + + s := &http.Server{ + Addr: addr, + Handler: handler, + } + listen, err := net.Listen("tcp", addr) + if err != nil { + log.Fatal(err) + } + + log.Printf("[main] Starting Device: [%s]", addr) + log.Fatal(s.Serve(listen)) +} diff --git a/samples/apps/http-apps/cmd/discovery/main.go b/samples/apps/http-apps/cmd/discovery/main.go new file mode 100644 index 000000000..72d7d5446 --- /dev/null +++ b/samples/apps/http-apps/cmd/discovery/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/deislabs/akri/http-extensibility/shared" +) + +const ( + addr = ":9999" +) + +var _ flag.Value = (*shared.RepeatableFlag)(nil) +var devices shared.RepeatableFlag + +func main() { + flag.Var(&devices, "device", "Repeat this flag to add devices to the discovery service") + flag.Parse() + + handler := http.NewServeMux() + handler.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + log.Printf("[discovery] Handler entered") + fmt.Fprintf(w, "%s\n", html.EscapeString(devices.String())) + }) + + s := &http.Server{ + Addr: addr, + Handler: handler, + } + listen, err := net.Listen("tcp", addr) + if err != nil { + log.Fatal(err) + } + + log.Printf("[createDiscoveryService] Starting Discovery Service: %s", addr) + log.Fatal(s.Serve(listen)) +} diff --git a/samples/apps/http-apps/cmd/grpc/broker/main.go b/samples/apps/http-apps/cmd/grpc/broker/main.go new file mode 100644 index 000000000..ca7794ca4 --- /dev/null +++ b/samples/apps/http-apps/cmd/grpc/broker/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "flag" + "log" + "net" + "os" + + pb "github.com/deislabs/akri/http-extensibility/prots" + + "google.golang.org/grpc" +) + +const ( + deviceEndpoint = "AKRI_HTTP_DEVICE_ENDPOINT" +) + +var ( + grpcEndpoint = flag.String("grpc_endpoint", "", "The endpoint of this gRPC server.") +) + +func main() { + log.Println("[main] Starting gRPC server") + + flag.Parse() + if *grpcEndpoint == "" { + log.Fatal("[main] Unable to start server. Requires gRPC endpoint.") + } + + deviceURL := os.Getenv(deviceEndpoint) + if deviceURL == "" { + log.Fatalf("Unable to determine Device URL using environment: %s", deviceEndpoint) + } + + serverOpts := []grpc.ServerOption{} + grpcServer := grpc.NewServer(serverOpts...) + + pb.RegisterDeviceServiceServer(grpcServer, NewServer(deviceURL)) + + listen, err := net.Listen("tcp", *grpcEndpoint) + if err != nil { + log.Fatal(err) + } + log.Printf("[main] Starting gRPC Listener [%s]\n", *grpcEndpoint) + log.Fatal(grpcServer.Serve(listen)) +} diff --git a/samples/apps/http-apps/cmd/grpc/broker/server.go b/samples/apps/http-apps/cmd/grpc/broker/server.go new file mode 100644 index 000000000..085b34258 --- /dev/null +++ b/samples/apps/http-apps/cmd/grpc/broker/server.go @@ -0,0 +1,50 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net/http" + + pb "github.com/deislabs/akri/http-extensibility/protos" +) + +var _ pb.DeviceServiceServer = (*Server)(nil) + +// Server is a type that implements pb.DeviceServiceServer +type Server struct { + DeviceURL string +} + +// NewServer is a function that returns a new Server +func NewServer(deviceURL string) *Server { + return &Server{ + DeviceURL: deviceURL, + } +} + +// ReadSensor is a method that implements the pb.HTTPServer interface +func (s *Server) ReadSensor(ctx context.Context, rqst *pb.ReadSensorRequest) (*pb.ReadSensorResponse, error) { + log.Println("[read_sensor] Entered") + resp, err := http.Get(s.DeviceURL) + if err != nil { + return &pb.ReadSensorResponse{}, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + log.Printf("[read_sensor] Response status: %d", resp.StatusCode) + return &pb.ReadSensorResponse{}, fmt.Errorf("response code: %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return &pb.ReadSensorResponse{}, err + } + + log.Printf("[read_sensor] Response body: %s", body) + return &pb.ReadSensorResponse{ + Value: string(body), + }, nil +} diff --git a/samples/apps/http-apps/cmd/grpc/client/main.go b/samples/apps/http-apps/cmd/grpc/client/main.go new file mode 100644 index 000000000..d856fcc25 --- /dev/null +++ b/samples/apps/http-apps/cmd/grpc/client/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "context" + "flag" + "log" + "time" + + pb "github.com/deislabs/akri/http-extensibility/proto" + + "google.golang.org/grpc" +) + +var ( + grpcEndpoint = flag.String("grpc_endpoint", "", "The endpoint of the gRPC server.") +) + +func main() { + log.Println("[main] Starting gRPC client") + defer func() { + log.Println("[main] Stopping gRPC client") + }() + + flag.Parse() + if *grpcEndpoint == "" { + log.Fatal("[main] Unable to start client. Requires endpoint to a gRPC Server.") + } + + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + } + log.Printf("Connecting to gRPC server [%s]", *grpcEndpoint) + conn, err := grpc.Dial(*grpcEndpoint, dialOpts...) + if err != nil { + log.Fatal(err) + } + defer conn.Close() + + client := pb.NewDeviceServiceClient(conn) + ctx := context.Background() + + for { + log.Println("[main:loop]") + + // Call Service + { + rqst := &pb.ReadSensorRequest{ + Name: "/", + } + log.Println("[main:loop] Calling read_sensor") + resp, err := client.ReadSensor(ctx, rqst) + if err != nil { + log.Fatal(err) + } + + log.Printf("[main:loop] Success: %+v", resp) + } + + // Add a pause between iterations + log.Println("[main:loop] Sleep") + time.Sleep(10 * time.Second) + } +} diff --git a/samples/apps/http-apps/go.mod b/samples/apps/http-apps/go.mod new file mode 100644 index 000000000..57ad18608 --- /dev/null +++ b/samples/apps/http-apps/go.mod @@ -0,0 +1,9 @@ +module github.com/deislabs/akri/http-extensibility + +go 1.15 + +require ( + github.com/golang/protobuf v1.4.3 + google.golang.org/grpc v1.33.2 + google.golang.org/protobuf v1.25.0 +) diff --git a/samples/apps/http-apps/kubernetes/device.yaml b/samples/apps/http-apps/kubernetes/device.yaml new file mode 100644 index 000000000..f5cb0910a --- /dev/null +++ b/samples/apps/http-apps/kubernetes/device.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: device +spec: + replicas: 1 + selector: + matchLabels: + project: akri + protocol: http + function: device + template: + metadata: + labels: + project: akri + protocol: http + function: device + name: device + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: device + image: IMAGE + imagePullPolicy: Always + args: + - --path=/ + ports: + - name: http + containerPort: 8080 diff --git a/samples/apps/http-apps/kubernetes/discovery.yaml b/samples/apps/http-apps/kubernetes/discovery.yaml new file mode 100644 index 000000000..878843c3a --- /dev/null +++ b/samples/apps/http-apps/kubernetes/discovery.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: discovery +spec: + replicas: 1 + selector: + matchLabels: + project: akri + protocol: http + function: discovery + template: + metadata: + labels: + project: akri + protocol: http + function: discovery + name: discovery + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: discovery + image: IMAGE + imagePullPolicy: Always + args: + - --device=http://device-1:8080 + - --device=http://device-2:8080 + - --device=http://device-3:8080 + - --device=http://device-4:8080 + - --device=http://device-5:8080 + - --device=http://device-6:8080 + - --device=http://device-7:8080 + - --device=http://device-8:8080 + - --device=http://device-9:8080 + ports: + - name: http + containerPort: 9999 diff --git a/samples/apps/http-apps/kubernetes/grpc.broker.yaml b/samples/apps/http-apps/kubernetes/grpc.broker.yaml new file mode 100644 index 000000000..249a4129e --- /dev/null +++ b/samples/apps/http-apps/kubernetes/grpc.broker.yaml @@ -0,0 +1,30 @@ +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http-grpc-broker-golang +spec: + protocol: + http: + discoveryEndpoint: http://discovery:9999 + capacity: 1 + brokerPodSpec: + imagePullSecrets: # GitHub Container Registry secret + - name: SECRET + containers: + - name: http-grpc-broker-golang + image: IMAGE + args: + - --grpc_endpoint=0.0.0.0:50051 + resources: + limits: + "{{PLACEHOLDER}}": "1" + instanceServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 + configurationServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 diff --git a/samples/apps/http-apps/kubernetes/grpc.client.yaml b/samples/apps/http-apps/kubernetes/grpc.client.yaml new file mode 100644 index 000000000..c7ff8454b --- /dev/null +++ b/samples/apps/http-apps/kubernetes/grpc.client.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http-grpc-client-golang +spec: + replicas: 1 + selector: + matchLabels: + project: akri + protocol: http + function: client + language: golang + template: + metadata: + labels: + project: akri + protocol: http + function: client + language: golang + name: http-grpc-client-golang + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: http-grpc-client-golang + image: IMAGE + args: + - --grpc_endpoint=http-svc:50051 diff --git a/samples/apps/http-apps/proto/http.proto b/samples/apps/http-apps/proto/http.proto new file mode 100644 index 000000000..6d74c7777 --- /dev/null +++ b/samples/apps/http-apps/proto/http.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +option go_package = "github.com/deislabs/akri/http-extensibility/proto"; + +package http; + +service DeviceService { + rpc ReadSensor (ReadSensorRequest) returns (ReadSensorResponse); +} + +message ReadSensorRequest { + string name = 1; +} +message ReadSensorResponse { + string value = 1; +} diff --git a/samples/apps/http-apps/shared/paths.go b/samples/apps/http-apps/shared/paths.go new file mode 100644 index 000000000..d544a910c --- /dev/null +++ b/samples/apps/http-apps/shared/paths.go @@ -0,0 +1,20 @@ +package shared + +import ( + "strings" +) + +// RepeatableFlag is an alias to use repeated flags with flag +type RepeatableFlag []string + +// String is a method required by flag.Value interface +func (e *RepeatableFlag) String() string { + result := strings.Join(*e, "\n") + return result +} + +// Set is a method required by flag.Value interface +func (e *RepeatableFlag) Set(value string) error { + *e = append(*e, value) + return nil +} diff --git a/samples/brokers/http/Cargo.toml b/samples/brokers/http/Cargo.toml new file mode 100644 index 000000000..ca349bdf6 --- /dev/null +++ b/samples/brokers/http/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "http" +version = "0.1.0" +authors = ["DazWilkin "] +edition = "2018" + +[[bin]] +name = "standalone" +path = "src/main.rs" + +[[bin]] +name = "broker" +path = "src/grpc/broker.rs" + +[[bin]] +name = "client" +path = "src/grpc/client.rs" + +[dependencies] +clap = "2.33.3" +futures = "0.3" +futures-util = "0.3" +prost = "0.6" +reqwest = "0.10.8" +tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } +tonic = "0.1" + +[build-dependencies] +tonic-build = "0.1.1" diff --git a/samples/brokers/http/Dockerfiles/grpc.broker b/samples/brokers/http/Dockerfiles/grpc.broker new file mode 100644 index 000000000..2053edea3 --- /dev/null +++ b/samples/brokers/http/Dockerfiles/grpc.broker @@ -0,0 +1,51 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +ARG BROKER=http + +FROM ${PLATFORM}/rust:1.47 as build + +ARG BROKER + +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu + +RUN USER=root cargo new --bin ${BROKER} + +WORKDIR /${BROKER} + +COPY ./samples/brokers/${BROKER}/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=standalone \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/${BROKER}* + +COPY ./samples/brokers/${BROKER} . + +RUN cargo build \ + --bin=broker \ + --release + +FROM amd64/debian:buster-slim + +ARG BROKER + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean + +COPY --from=build /${BROKER}/target/release/broker /broker + +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Expose port used by broker service +EXPOSE 8084 + +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG ${BROKER},akri_shared + +ENTRYPOINT ["/broker"] +CMD ["--grpc_endpoint=0.0.0.0:8084"] diff --git a/samples/brokers/http/Dockerfiles/grpc.client b/samples/brokers/http/Dockerfiles/grpc.client new file mode 100644 index 000000000..09cbd1464 --- /dev/null +++ b/samples/brokers/http/Dockerfiles/grpc.client @@ -0,0 +1,48 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +ARG BROKER=http + +FROM ${PLATFORM}/rust:1.47 as build + +ARG BROKER + +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu + +RUN USER=root cargo new --bin ${BROKER} + +WORKDIR /${BROKER} + +COPY ./samples/brokers/${BROKER}/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=standalone \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/${BROKER}* + +COPY ./samples/brokers/${BROKER} . + +RUN cargo build \ + --bin=client \ + --release + +FROM amd64/debian:buster-slim + +ARG BROKER + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean + +COPY --from=build /${BROKER}/target/release/client /client + +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG ${BROKER},akri_shared + +ENTRYPOINT ["/client"] +CMD ["--grpc_endpoint=:8084"] diff --git a/samples/brokers/http/Dockerfiles/standalone b/samples/brokers/http/Dockerfiles/standalone new file mode 100644 index 000000000..d9bf95955 --- /dev/null +++ b/samples/brokers/http/Dockerfiles/standalone @@ -0,0 +1,48 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +ARG BROKER=http + +FROM ${PLATFORM}/rust:1.47 as build + +ARG BROKER + +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu + +RUN USER=root cargo new --bin ${BROKER} + +WORKDIR /${BROKER} + +COPY ./samples/brokers/${BROKER}/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=standalone \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/${BROKER}* + +COPY ./samples/brokers/${BROKER} . + +RUN cargo build \ + --bin=standalone \ + --release + +FROM amd64/debian:buster-slim + +ARG BROKER + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean + +# Rename ${BROKER} binary to broker +COPY --from=build /${BROKER}/target/release/standalone /broker + +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG ${BROKER},akri_shared + +ENTRYPOINT ["/broker"] diff --git a/samples/brokers/http/build.rs b/samples/brokers/http/build.rs new file mode 100644 index 000000000..a173cec1a --- /dev/null +++ b/samples/brokers/http/build.rs @@ -0,0 +1,4 @@ +fn main() -> Result<(), Box> { + tonic_build::compile_protos("proto/http.proto")?; + Ok(()) +} diff --git a/samples/brokers/http/build.sh b/samples/brokers/http/build.sh new file mode 100755 index 000000000..0199ab176 --- /dev/null +++ b/samples/brokers/http/build.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +: "${REGISTRY:?Need to export REGISTRY e.g. ghcr.io}" +: "${USER:?Need to export USER e.g. ghcr.io/deislabs/...}" +: "${PREFIX:?Need to export PREFIX e.g. ${REGISTRY}/${USER}/http...}" +: "${TAG:?Need to export TAG e.g. v1}" + +# Standalone +( + IMAGE="${REGISTRY}/${USER}/${PREFIX}-broker:${TAG}" + docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/standalone \ + ../../.. + + docker push ${IMAGE} +) + +# gRPC Broker|Client +# Broker +( + IMAGE="${REGISTRY}/${USER}/${PREFIX}-grpc-broker:${TAG}" + docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/grpc.broker \ + ../../.. + + docker push ${IMAGE} +) +# Client +( + IMAGE="${REGISTRY}/${USER}/${PREFIX}-grpc-client:${TAG}" + docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/grpc.client \ + ../../.. + + docker push ${IMAGE} +) diff --git a/samples/brokers/http/kubernetes/http.grpc.broker.yaml b/samples/brokers/http/kubernetes/http.grpc.broker.yaml new file mode 100644 index 000000000..774189169 --- /dev/null +++ b/samples/brokers/http/kubernetes/http.grpc.broker.yaml @@ -0,0 +1,30 @@ +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http-grpc-broker-rust +spec: + protocol: + http: + discoveryEndpoint: http://discovery:9999 + capacity: 1 + brokerPodSpec: + imagePullSecrets: # GitHub Container Registry secret + - name: SECRET + containers: + - name: http-grpc-broker-rust + image: IMAGE + args: + - --grpc_endpoint=0.0.0.0:50051 + resources: + limits: + "{{PLACEHOLDER}}": "1" + instanceServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 + configurationServiceSpec: + ports: + - name: grpc + port: 50051 + targetPort: 50051 diff --git a/samples/brokers/http/kubernetes/http.grpc.client.yaml b/samples/brokers/http/kubernetes/http.grpc.client.yaml new file mode 100644 index 000000000..c0be084ff --- /dev/null +++ b/samples/brokers/http/kubernetes/http.grpc.client.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http-grpc-client-rust +spec: + replicas: 1 + selector: + matchLabels: + project: akri + protocol: http + function: client + language: rust + template: + metadata: + labels: + project: akri + protocol: http + function: client + language: rust + name: http-grpc-client-rust + spec: + imagePullSecrets: + - name: SECRET + containers: + - name: http-grpc-client-rust + image: IMAGE + args: + - --grpc_endpoint=http-svc:50051 diff --git a/samples/brokers/http/kubernetes/http.yaml b/samples/brokers/http/kubernetes/http.yaml new file mode 100644 index 000000000..e86906c79 --- /dev/null +++ b/samples/brokers/http/kubernetes/http.yaml @@ -0,0 +1,18 @@ +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http +spec: + protocol: + http: + discoveryEndpoint: http://discovery:9999 + capacity: 1 + brokerPodSpec: + imagePullSecrets: # Container Registry secret + - name: SECRET + containers: + - name: http-broker + image: IMAGE + resources: + limits: + "{{PLACEHOLDER}}": "1" diff --git a/samples/brokers/http/proto/http.proto b/samples/brokers/http/proto/http.proto new file mode 100644 index 000000000..6d74c7777 --- /dev/null +++ b/samples/brokers/http/proto/http.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +option go_package = "github.com/deislabs/akri/http-extensibility/proto"; + +package http; + +service DeviceService { + rpc ReadSensor (ReadSensorRequest) returns (ReadSensorResponse); +} + +message ReadSensorRequest { + string name = 1; +} +message ReadSensorResponse { + string value = 1; +} diff --git a/samples/brokers/http/src/grpc/broker.rs b/samples/brokers/http/src/grpc/broker.rs new file mode 100644 index 000000000..9a4d9fefa --- /dev/null +++ b/samples/brokers/http/src/grpc/broker.rs @@ -0,0 +1,76 @@ +pub mod http { + tonic::include_proto!("http"); +} + +use clap::{App, Arg}; +use http::{ + device_service_server::{DeviceService, DeviceServiceServer}, + ReadSensorRequest, ReadSensorResponse, +}; +use reqwest::get; +use std::env; +use std::net::SocketAddr; +use tonic::{transport::Server, Code, Request, Response, Status}; + +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +#[derive(Default)] +pub struct Device { + device_url: String, +} + +#[tonic::async_trait] +impl DeviceService for Device { + async fn read_sensor( + &self, + _rqst: Request, + ) -> Result, Status> { + println!("[read_sensor] Entered"); + match get(&self.device_url).await { + Ok(resp) => { + println!("[read_sensor] Response status: {:?}", resp.status()); + let body = resp.text().await.unwrap(); + println!("[read_sensor] Response body: {:?}", body); + Ok(Response::new(ReadSensorResponse { value: body })) + } + Err(err) => { + println!("[read_sensor] Error: {:?}", err); + Err(Status::new(Code::Unavailable, "device is unavailable")) + } + } + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("[main] Entered"); + + let matches = App::new("broker") + .arg( + Arg::with_name("grpc_endpoint") + .long("grpc_endpoint") + .value_name("ENDPOINT") + .help("Endpoint address that the gRPC server will listen on.") + .required(true), + ) + .get_matches(); + let grpc_endpoint = matches.value_of("grpc_endpoint").unwrap(); + + let addr: SocketAddr = grpc_endpoint.parse().unwrap(); + println!("[main] gRPC service endpoint: {}", addr); + + let device_url = env::var(DEVICE_ENDPOINT)?; + println!("[main] gRPC service proxying: {}", device_url); + + let device_service = Device { device_url }; + let service = DeviceServiceServer::new(device_service); + + println!("[main] gRPC service starting"); + Server::builder() + .add_service(service) + .serve(addr) + .await + .expect("unable to start http-prtocol gRPC server"); + + Ok(()) +} diff --git a/samples/brokers/http/src/grpc/client.rs b/samples/brokers/http/src/grpc/client.rs new file mode 100644 index 000000000..6d12d23bd --- /dev/null +++ b/samples/brokers/http/src/grpc/client.rs @@ -0,0 +1,42 @@ +pub mod http { + tonic::include_proto!("http"); +} + +use clap::{App, Arg}; +use http::{device_service_client::DeviceServiceClient, ReadSensorRequest}; +use tokio::{time, time::Duration}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("[main] Entered"); + + let matches = App::new("client") + .arg( + Arg::with_name("grpc_endpoint") + .long("grpc_endpoint") + .value_name("ENDPOINT") + .help("Endpoint address of the gRPC server.") + .required(true), + ) + .get_matches(); + let grpc_endpoint = matches.value_of("grpc_endpoint").unwrap(); + + let endpoint = format!("http://{}", grpc_endpoint); + println!("[main] gRPC client dialing: {}", endpoint); + let mut client = DeviceServiceClient::connect(endpoint).await?; + + loop { + println!("[main:loop] Constructing Request"); + let rqst = tonic::Request::new(ReadSensorRequest { + name: "/".to_string(), + }); + println!("[main:loop] Calling read_sensor"); + let resp = client.read_sensor(rqst).await?; + println!("[main:loop] Response: {:?}", resp); + + println!("[main:loop] Sleep"); + time::delay_for(Duration::from_secs(10)).await; + } + + Ok(()) +} diff --git a/samples/brokers/http/src/main.rs b/samples/brokers/http/src/main.rs new file mode 100644 index 000000000..65a97c8ee --- /dev/null +++ b/samples/brokers/http/src/main.rs @@ -0,0 +1,36 @@ +use reqwest::get; +use std::env; +use tokio::{time, time::Duration}; + +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +async fn read_sensor(device_url: &str) { + println!("[http:read_sensor] Entered"); + match get(device_url).await { + Ok(resp) => { + println!("[main:read_sensor] Response status: {:?}", resp.status()); + let body = resp.text().await; + println!("[main:read_sensor] Response body: {:?}", body); + } + Err(err) => println!("Error: {:?}", err), + }; +} +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("[http:main] Entered"); + + let device_url = env::var(DEVICE_ENDPOINT)?; + println!("[http:main] Device: {}", &device_url); + + let mut tasks = Vec::new(); + tasks.push(tokio::spawn(async move { + loop { + println!("[http:main:loop] Sleep"); + time::delay_for(Duration::from_secs(10)).await; + println!("[http:main:loop] read_sensor({})", &device_url); + read_sensor(&device_url[..]).await; + } + })); + futures::future::join_all(tasks).await; + Ok(()) +} diff --git a/samples/brokers/udev-video-broker/Cargo.toml b/samples/brokers/udev-video-broker/Cargo.toml index 8c9b26586..4ce6bf760 100644 --- a/samples/brokers/udev-video-broker/Cargo.toml +++ b/samples/brokers/udev-video-broker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "udev-video-broker" -version = "0.0.41" +version = "0.0.44" authors = ["Kate Goldenring ", ""] edition = "2018" diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 5ab7e38ab..f15e845e1 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-shared" -version = "0.0.41" +version = "0.0.44" authors = [""] edition = "2018" diff --git a/test/example-unsupported.rules b/test/example-unsupported.rules index 0863621a0..1a16c2a6c 100644 --- a/test/example-unsupported.rules +++ b/test/example-unsupported.rules @@ -13,16 +13,9 @@ SYMLINK+="v4l/by-id/$env{ID_BUS}-$env{ID_SERIAL}-video-index$attr{index}" TEST{key}=="value PROGRAM=="some program" RESULT=="some result" -KERNELS=="gadget" -KERNEL=="video[0-9]", SUBSYSTEMS=="usb" -DRIVERS!="something", IMPORT{builtin}="usb_id" -ATTRS{idVendor}=="0c45" -TAGS=="tags" CONST{key}=="value" SECLABEL{key}=="value SYSCTL{key}=="value" -ATTRS{idVendor}=="09bb", ATTR{idProduct}=="1900", ENV{ID_MM_ERICSSON_MBM}=="1" -ATTR{idVendor}=="0bdd", ATTRS{idProduct}=="1902", ENV{a name}=="Keyboard that has a mouse" TAG-="uaccess" TAG+="uaccess" TAG="uaccess" diff --git a/test/example.rules b/test/example.rules index fa9bce7c5..1488817b6 100644 --- a/test/example.rules +++ b/test/example.rules @@ -6,3 +6,7 @@ 2 ATTR{idProduct}=="1900", ENV{ID_MM_ERICSSON_MBM}=="1" 2 ATTR{idVendor}=="0bdd", ENV{a name}=="Keyboard that has a mouse" 2 KERNEL=="video[0-9]*", ENV{ID_V4L_CAPABILITIES}==":capture:" +1 SUBSYSTEMS=="pci" +2 ATTRS{idVendor}=="0bdd", DRIVERS!="a driver" +1 TAGS=="uaccess" +1 KERNELS=="card[0-9]*" diff --git a/test/run-end-to-end.py b/test/run-end-to-end.py index 9ecdd58f2..a77c2413c 100644 --- a/test/run-end-to-end.py +++ b/test/run-end-to-end.py @@ -25,14 +25,13 @@ def main(): shared_test_code.major_version = "v" + test_version.split(".")[0] print("Testing major version: {}".format(shared_test_code.major_version)) - print("Installing Akri Helm chart: {}".format(test_version)) - helm_chart_name = shared_test_code.get_helm_chart_name() - print("Get Akri Helm chart: {}".format(helm_chart_name)) + helm_chart_location = shared_test_code.get_helm_chart_location() + print("Get Akri Helm chart: {}".format(helm_chart_location)) cri_args = shared_test_code.get_cri_args() print("Providing Akri Helm chart with CRI args: {}".format(cri_args)) extra_helm_args = shared_test_code.get_extra_helm_args() print("Providing Akri Helm chart with extra helm args: {}".format(extra_helm_args)) - helm_install_command = "helm install akri akri-helm-charts/{} --debug --version {} --set debugEcho.enabled=true --set debugEcho.name={} --set debugEcho.shared=false --set agent.allowDebugEcho=true {} {}".format(helm_chart_name, test_version, shared_test_code.DEBUG_ECHO_NAME, cri_args, extra_helm_args) + helm_install_command = "helm install akri {} --set debugEcho.enabled=true --set debugEcho.name={} --set debugEcho.shared=false --set agent.allowDebugEcho=true {} {} --debug ".format(helm_chart_location, shared_test_code.DEBUG_ECHO_NAME, cri_args, extra_helm_args) print("Helm command: {}".format(helm_install_command)) os.system(helm_install_command) diff --git a/test/shared_test_code.py b/test/shared_test_code.py index 9dfd967a5..d8eaa1d3f 100644 --- a/test/shared_test_code.py +++ b/test/shared_test_code.py @@ -16,9 +16,9 @@ RUNTIME_COMMAND_FILE = "/tmp/runtime_cmd_to_test.txt" HELM_CRI_ARGS_FILE = "/tmp/cri_args_to_test.txt" VERSION_FILE = "/tmp/version_to_test.txt" -HELM_CHART_NAME_FILE = "/tmp/chart_name.txt" SLEEP_DURATION_FILE = "/tmp/sleep_duration.txt" EXTRA_HELM_ARGS_FILE = "/tmp/extra_helm_args.txt" +HELM_CHART_LOCATION = "/tmp/helm_chart_location.txt" SLEEP_INTERVAL = 20 CONTROLLER_POD_LABEL_SELECTOR = "app=" + CONTROLLER_POD_NAME @@ -32,6 +32,10 @@ agent_pod_name = "" controller_pod_name = "" +def get_helm_chart_location(): + # Get helm chart location passed in helm install command (i.e. `repo/chart --version X.Y.Z` or `./deployment/helm`) + return open(HELM_CHART_LOCATION, "r").readline().rstrip() + def get_extra_helm_args(): # Get any extra helm args passed from workflow if os.path.exists(EXTRA_HELM_ARGS_FILE): @@ -62,10 +66,6 @@ def get_cri_args(): # Get CRI args for Akri Helm return open(HELM_CRI_ARGS_FILE, "r").readline().rstrip() -def get_helm_chart_name(): - # Get Helm chart name (akri, akri-dev) - return open(HELM_CHART_NAME_FILE, "r").readline().rstrip() - def get_test_version(): # Get version of akri to test if os.path.exists(VERSION_FILE): diff --git a/version.txt b/version.txt index 89a67e8cd..2aa916016 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.0.41 +0.0.44