diff --git a/.flake8 b/.flake8 index 5d254aff4e1ae2..95261687837818 100644 --- a/.flake8 +++ b/.flake8 @@ -11,42 +11,32 @@ exclude = third_party src/test_driver/openiotsdk/* src/test_driver/mbed/* src/test_driver/linux-cirque/* - src/test_driver/esp32/* - src/test_driver/efr32/* build/chip/java/tests/* build/chip/linux/* build/config/linux/* credentials/fetch-paa-certs-from-dcl.py docs/_extensions/external_content.py - examples/chef/chef.py - examples/chef/sample_app_util/zap_file_parser.py examples/common/pigweed/rpc_console/py/chip_rpc/console.py examples/lighting-app/python/lighting.py examples/platform/mbed/ota/generate_ota_list_image.py - examples/platform/nxp/k32w/k32w0/scripts/detokenizer.py scripts/build/build/target.py scripts/build/build/targets.py scripts/build/builders/android.py scripts/build/builders/bouffalolab.py scripts/build/builders/cc13x2x7_26x2x7.py - scripts/build/builders/efr32.py - scripts/build/builders/esp32.py scripts/build/builders/genio.py scripts/build/builders/gn.py scripts/build/builders/imx.py scripts/build/builders/infineon.py scripts/build/builders/nrf.py - scripts/build/test.py scripts/codegen.py scripts/codepregen.py scripts/error_table.py scripts/examples/gn_to_cmakelists.py - scripts/examples/tests/test.py scripts/flashing/bouffalolab_firmware_utils.py scripts/flashing/cyw30739_firmware_utils.py scripts/flashing/nrfconnect_firmware_utils.py scripts/gen_chip_version.py - scripts/gen_test_driver.py scripts/helpers/bloat_check.py scripts/pregenerate/using_codegen.py scripts/pregenerate/using_zap.py @@ -63,37 +53,22 @@ exclude = third_party scripts/py_matter_yamltests/test_yaml_parser.py scripts/run-clang-tidy-on-compile-commands.py scripts/setup/nrfconnect/update_ncs.py - scripts/tests/chiptest/__init__.py - scripts/tests/chiptest/runner.py - scripts/tests/chiptest/test_definition.py scripts/tests/chiptest/yamltest_with_chip_repl_tester.py - scripts/tests/java/base.py - scripts/tests/java/commissioning_test.py - scripts/tests/java/discover_test.py - scripts/tests/run_java_test.py - scripts/tests/run_python_test.py - scripts/tests/run_test_suite.py scripts/tools/check_zcl_file_sync.py scripts/tools/convert_ini.py - scripts/tools/generate_esp32_chip_factory_bin.py scripts/tools/memory/memdf/__init__.py scripts/tools/memory/report_summary.py scripts/tools/nrfconnect/generate_nrfconnect_chip_factory_data.py scripts/tools/nrfconnect/nrfconnect_generate_partition.py scripts/tools/nrfconnect/tests/test_generate_factory_data.py - scripts/tools/nxp/factory_data_generator/custom.py - scripts/tools/nxp/factory_data_generator/default.py - scripts/tools/nxp/factory_data_generator/generate.py scripts/tools/silabs/FactoryDataProvider.py scripts/tools/telink/mfg_tool.py scripts/tools/zap/generate.py scripts/tools/zap/prune_outputs.py - scripts/tools/zap/test_generate.py scripts/tools/zap/version_update.py scripts/tools/zap/zap_download.py scripts/tools/zap_convert_all.py src/app/ota_image_tool.py - src/app/tests/suites/certification/information.py src/app/zap_cluster_list.py src/controller/python/build-chip-wheel.py src/controller/python/chip-device-ctrl.py @@ -123,23 +98,7 @@ exclude = third_party src/controller/python/chip/yaml/__init__.py src/controller/python/chip/yaml/format_converter.py src/controller/python/chip/yaml/runner.py - src/controller/python/test/test_scripts/base.py - src/controller/python/test/test_scripts/cluster_objects.py - src/controller/python/test/test_scripts/mobile-device-test.py - src/controller/python/test/test_scripts/network_commissioning.py - src/controller/python/test/unit_tests/test_cluster_objects.py - src/controller/python/test/unit_tests/test_tlv.py src/lib/asn1/gen_asn1oid.py src/pybindings/pycontroller/build-chip-wheel.py src/pybindings/pycontroller/pychip/__init__.py - src/python_testing/TC_ACE_1_3.py - src/python_testing/TC_ACE_1_4.py - src/python_testing/TC_CGEN_2_4.py - src/python_testing/TC_DA_1_7.py - src/python_testing/TC_RR_1_1.py - src/python_testing/TC_SC_3_6.py - src/python_testing/TC_TestEventTrigger.py - src/python_testing/hello_test.py - src/python_testing/matter_testing_support.py src/setup_payload/python/generate_setup_payload.py - src/setup_payload/tests/run_python_setup_payload_gen_test.py diff --git a/.github/workflows/bloat_check.yaml b/.github/workflows/bloat_check.yaml index 2851452318f263..4c29d07b5b4c2b 100644 --- a/.github/workflows/bloat_check.yaml +++ b/.github/workflows/bloat_check.yaml @@ -33,7 +33,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 steps: - uses: Wandalen/wretry.action@v1.0.36 diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c4a467182fd961..75c38ca1c6b4a4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 @@ -148,7 +148,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 @@ -318,7 +318,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" options: --sysctl "net.ipv6.conf.all.disable_ipv6=0 @@ -485,7 +485,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 diff --git a/.github/workflows/chef.yaml b/.github/workflows/chef.yaml index 52c581ccb5f2b8..6c916fab9e4ef6 100644 --- a/.github/workflows/chef.yaml +++ b/.github/workflows/chef.yaml @@ -30,7 +30,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 options: --user root steps: @@ -66,7 +66,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-esp32:0.6.40 + image: connectedhomeip/chip-build-esp32:0.6.44 options: --user root steps: @@ -102,7 +102,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-nrf-platform:0.6.40 + image: connectedhomeip/chip-build-nrf-platform:0.6.44 options: --user root steps: diff --git a/.github/workflows/cirque.yaml b/.github/workflows/cirque.yaml index 50c9a1af0f631d..6dfffa7f49c750 100644 --- a/.github/workflows/cirque.yaml +++ b/.github/workflows/cirque.yaml @@ -30,7 +30,7 @@ jobs: timeout-minutes: 90 env: - DOCKER_RUN_VERSION: 0.6.40 + DOCKER_RUN_VERSION: 0.6.44 GITHUB_CACHE_PATH: /tmp/cirque-cache runs-on: ubuntu-latest @@ -39,7 +39,7 @@ jobs: # need to run with privilege, which isn't supported by job.XXX.contaner # https://github.com/actions/container-action/issues/2 # container: - # image: connectedhomeip/chip-build-cirque:0.6.40 + # image: connectedhomeip/chip-build-cirque:0.6.44 # volumes: # - "/tmp:/tmp" # - "/dev/pts:/dev/pts" diff --git a/.github/workflows/doxygen.yaml b/.github/workflows/doxygen.yaml index 43c9721ca0d8b3..08950f4da02ab1 100644 --- a/.github/workflows/doxygen.yaml +++ b/.github/workflows/doxygen.yaml @@ -82,7 +82,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build-doxygen:0.6.40 + image: connectedhomeip/chip-build-doxygen:0.6.44 if: github.actor != 'restyled-io[bot]' diff --git a/.github/workflows/examples-ameba.yaml b/.github/workflows/examples-ameba.yaml index b0ac09c037ef3c..c5a6f25ec5a6b2 100644 --- a/.github/workflows/examples-ameba.yaml +++ b/.github/workflows/examples-ameba.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-ameba:0.6.40 + image: connectedhomeip/chip-build-ameba:0.6.44 options: --user root steps: diff --git a/.github/workflows/examples-bouffalolab.yaml b/.github/workflows/examples-bouffalolab.yaml index c9cb6d1a2f348f..9314fd37d4d153 100644 --- a/.github/workflows/examples-bouffalolab.yaml +++ b/.github/workflows/examples-bouffalolab.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-bouffalolab:0.6.40 + image: connectedhomeip/chip-build-bouffalolab:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-cc13x2x7_26x2x7.yaml b/.github/workflows/examples-cc13x2x7_26x2x7.yaml index e4c9b1ec49f2c0..b68b91307edd04 100644 --- a/.github/workflows/examples-cc13x2x7_26x2x7.yaml +++ b/.github/workflows/examples-cc13x2x7_26x2x7.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-ti:0.6.40 + image: connectedhomeip/chip-build-ti:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-cc32xx.yaml b/.github/workflows/examples-cc32xx.yaml index 24a6d05de637ad..5d38038d3aea3b 100644 --- a/.github/workflows/examples-cc32xx.yaml +++ b/.github/workflows/examples-cc32xx.yaml @@ -35,7 +35,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-ti:0.6.40 + image: connectedhomeip/chip-build-ti:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-efr32.yaml b/.github/workflows/examples-efr32.yaml index 7afe0d4e11eec2..a143d172cd3f42 100644 --- a/.github/workflows/examples-efr32.yaml +++ b/.github/workflows/examples-efr32.yaml @@ -39,7 +39,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-efr32:0.6.40 + image: connectedhomeip/chip-build-efr32:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-esp32.yaml b/.github/workflows/examples-esp32.yaml index 2e427a05b920c2..9fdc43e199dcff 100644 --- a/.github/workflows/examples-esp32.yaml +++ b/.github/workflows/examples-esp32.yaml @@ -35,7 +35,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-esp32:0.6.40 + image: connectedhomeip/chip-build-esp32:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" @@ -158,7 +158,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-esp32:0.6.40 + image: connectedhomeip/chip-build-esp32:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/examples-infineon.yaml b/.github/workflows/examples-infineon.yaml index a5747d62728eef..e9dacbf14777d5 100644 --- a/.github/workflows/examples-infineon.yaml +++ b/.github/workflows/examples-infineon.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-infineon:0.6.40 + image: connectedhomeip/chip-build-infineon:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-k32w.yaml b/.github/workflows/examples-k32w.yaml index 672c6ddf711f63..57ee094adc052e 100644 --- a/.github/workflows/examples-k32w.yaml +++ b/.github/workflows/examples-k32w.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-k32w:0.6.40 + image: connectedhomeip/chip-build-k32w:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-linux-arm.yaml b/.github/workflows/examples-linux-arm.yaml index 19122081ca6a61..76733159326c2d 100644 --- a/.github/workflows/examples-linux-arm.yaml +++ b/.github/workflows/examples-linux-arm.yaml @@ -35,7 +35,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-crosscompile:0.6.40 + image: connectedhomeip/chip-build-crosscompile:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/examples-linux-imx.yaml b/.github/workflows/examples-linux-imx.yaml index bd3212b46c32e0..3fe60a104a511c 100644 --- a/.github/workflows/examples-linux-imx.yaml +++ b/.github/workflows/examples-linux-imx.yaml @@ -35,7 +35,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-imx:0.6.40 + image: connectedhomeip/chip-build-imx:0.6.44 steps: - uses: Wandalen/wretry.action@v1.0.36 diff --git a/.github/workflows/examples-linux-standalone.yaml b/.github/workflows/examples-linux-standalone.yaml index 41f5cbc6218207..bdee1f8644b461 100644 --- a/.github/workflows/examples-linux-standalone.yaml +++ b/.github/workflows/examples-linux-standalone.yaml @@ -35,7 +35,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/examples-mbed.yaml b/.github/workflows/examples-mbed.yaml index 1cddae05975659..916a245aa58625 100644 --- a/.github/workflows/examples-mbed.yaml +++ b/.github/workflows/examples-mbed.yaml @@ -41,7 +41,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-mbed-os:0.6.40 + image: connectedhomeip/chip-build-mbed-os:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/examples-mw320.yaml b/.github/workflows/examples-mw320.yaml index 46846ec4eefcff..53fdf00d65abf8 100755 --- a/.github/workflows/examples-mw320.yaml +++ b/.github/workflows/examples-mw320.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-nrfconnect.yaml b/.github/workflows/examples-nrfconnect.yaml index a32ffe09ba8021..0ae8124f5a0cee 100644 --- a/.github/workflows/examples-nrfconnect.yaml +++ b/.github/workflows/examples-nrfconnect.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-nrf-platform:0.6.40 + image: connectedhomeip/chip-build-nrf-platform:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/examples-openiotsdk.yaml b/.github/workflows/examples-openiotsdk.yaml index 0db5651baf4bc4..3d14afe8847384 100644 --- a/.github/workflows/examples-openiotsdk.yaml +++ b/.github/workflows/examples-openiotsdk.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-openiotsdk:0.6.40 + image: connectedhomeip/chip-build-openiotsdk:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" options: --privileged diff --git a/.github/workflows/examples-qpg.yaml b/.github/workflows/examples-qpg.yaml index 1166acd0f7c9ff..ef9ca97121e5a8 100644 --- a/.github/workflows/examples-qpg.yaml +++ b/.github/workflows/examples-qpg.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" steps: diff --git a/.github/workflows/examples-telink.yaml b/.github/workflows/examples-telink.yaml index 6c1865e4ad227d..137b19b51829d4 100644 --- a/.github/workflows/examples-telink.yaml +++ b/.github/workflows/examples-telink.yaml @@ -36,7 +36,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-telink:0.6.40 + image: connectedhomeip/chip-build-telink:0.6.44 volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" @@ -160,6 +160,15 @@ jobs: out/telink-tlsr9518adk80d-pump-controller/zephyr/zephyr.elf \ /tmp/bloat_reports/ + - name: Build example Telink Temperature Measurement App + run: | + ./scripts/run_in_build_env.sh \ + "./scripts/build/build_examples.py --target 'telink-tlsr9518adk80d-temperature-measurement' build" + .environment/pigweed-venv/bin/python3 scripts/tools/memory/gh_sizes.py \ + telink tlsr9518adk80d temperature-measurement-app \ + out/telink-tlsr9518adk80d-temperature-measurement/zephyr/zephyr.elf \ + /tmp/bloat_reports/ + - name: Build example Telink Thermostat App run: | ./scripts/run_in_build_env.sh \ diff --git a/.github/workflows/examples-tizen.yaml b/.github/workflows/examples-tizen.yaml index 3a838c7d85615f..531e646ff067d6 100644 --- a/.github/workflows/examples-tizen.yaml +++ b/.github/workflows/examples-tizen.yaml @@ -34,7 +34,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-tizen:0.6.40 + image: connectedhomeip/chip-build-tizen:0.6.44 options: --user root volumes: - "/tmp/bloat_reports:/tmp/bloat_reports" diff --git a/.github/workflows/full-android.yaml b/.github/workflows/full-android.yaml index 1c625e3fba124e..b8f3802f334c0e 100644 --- a/.github/workflows/full-android.yaml +++ b/.github/workflows/full-android.yaml @@ -37,7 +37,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-android:0.6.40 + image: connectedhomeip/chip-build-android:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" diff --git a/.github/workflows/fuzzing-build.yaml b/.github/workflows/fuzzing-build.yaml index 6266abb91cb7d1..b32479cec23df4 100644 --- a/.github/workflows/fuzzing-build.yaml +++ b/.github/workflows/fuzzing-build.yaml @@ -34,7 +34,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 089642b7c28914..483cc443008a8f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -29,7 +29,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 steps: - uses: Wandalen/wretry.action@v1.0.36 diff --git a/.github/workflows/qemu.yaml b/.github/workflows/qemu.yaml index 20a573e3235ffa..56a11cee92760b 100644 --- a/.github/workflows/qemu.yaml +++ b/.github/workflows/qemu.yaml @@ -27,7 +27,8 @@ env: CHIP_NO_LOG_TIMESTAMPS: true jobs: - qemu: + + qemu-esp32: name: ESP32 timeout-minutes: 85 @@ -38,7 +39,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-esp32-qemu:0.6.40 + image: connectedhomeip/chip-build-esp32-qemu:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" @@ -94,3 +95,46 @@ jobs: with: name: qemu-esp32-logs path: /tmp/log_output + + qemu-tizen: + name: Tizen + + runs-on: ubuntu-latest + if: github.actor != 'restyled-io[bot]' + + container: + image: connectedhomeip/chip-build-tizen-qemu:0.6.44 + volumes: + - "/tmp/log_output:/tmp/test_logs" + + steps: + - uses: Wandalen/wretry.action@v1.0.36 + name: Checkout + with: + action: actions/checkout@v3 + with: | + token: ${{ github.token }} + attempt_limit: 3 + attempt_delay: 2000 + - name: Checkout submodules + run: scripts/checkout_submodules.py --shallow --platform tizen + + - name: Bootstrap cache + uses: actions/cache@v3 + timeout-minutes: 10 + with: + key: ${{ runner.os }}-env-${{ hashFiles('scripts/setup/*', 'third_party/pigweed/**') }} + path: | + .environment + build_overrides/pigweed_environment.gni + - name: Bootstrap + timeout-minutes: 25 + run: scripts/build/gn_bootstrap.sh + + - name: Build and run tests + run: | + ./scripts/run_in_build_env.sh \ + "./scripts/build/build_examples.py \ + --target tizen-arm-tests-no-ble \ + build + " diff --git a/.github/workflows/release_artifacts.yaml b/.github/workflows/release_artifacts.yaml index b536e7e34d9272..c64d2232b3c5d5 100644 --- a/.github/workflows/release_artifacts.yaml +++ b/.github/workflows/release_artifacts.yaml @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build-esp32:0.6.40 + image: connectedhomeip/chip-build-esp32:0.6.44 steps: - uses: Wandalen/wretry.action@v1.0.36 @@ -85,7 +85,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build-efr32:0.6.40 + image: connectedhomeip/chip-build-efr32:0.6.44 steps: - uses: Wandalen/wretry.action@v1.0.36 name: Checkout diff --git a/.github/workflows/smoketest-android.yaml b/.github/workflows/smoketest-android.yaml index 18a17e125953f3..bf6b2d995d55d0 100644 --- a/.github/workflows/smoketest-android.yaml +++ b/.github/workflows/smoketest-android.yaml @@ -38,7 +38,7 @@ jobs: if: github.actor != 'restyled-io[bot]' container: - image: connectedhomeip/chip-build-android:0.6.40 + image: connectedhomeip/chip-build-android:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 8fb92717e6c65b..95d5a4193cd9a7 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -48,7 +48,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 net.ipv4.conf.all.forwarding=1 net.ipv6.conf.all.forwarding=1" @@ -445,7 +445,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 net.ipv4.conf.all.forwarding=0 net.ipv6.conf.all.forwarding=0" @@ -531,7 +531,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 net.ipv4.conf.all.forwarding=0 net.ipv6.conf.all.forwarding=0" diff --git a/.github/workflows/unit_integration_test.yaml b/.github/workflows/unit_integration_test.yaml index 9055cb12ec9ab1..f1a36facba7bb2 100644 --- a/.github/workflows/unit_integration_test.yaml +++ b/.github/workflows/unit_integration_test.yaml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 volumes: - "/tmp/log_output:/tmp/test_logs" options: --privileged --sysctl "net.ipv6.conf.all.disable_ipv6=0 net.ipv4.conf.all.forwarding=1 net.ipv6.conf.all.forwarding=1" diff --git a/.github/workflows/zap_regeneration.yaml b/.github/workflows/zap_regeneration.yaml index 0cc0d843fbeeee..f9372831f11d1f 100644 --- a/.github/workflows/zap_regeneration.yaml +++ b/.github/workflows/zap_regeneration.yaml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-20.04 container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 defaults: run: shell: sh diff --git a/.github/workflows/zap_templates.yaml b/.github/workflows/zap_templates.yaml index 65364d7d601fb1..edcc2b1356ab17 100644 --- a/.github/workflows/zap_templates.yaml +++ b/.github/workflows/zap_templates.yaml @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-20.04 container: - image: connectedhomeip/chip-build:0.6.40 + image: connectedhomeip/chip-build:0.6.44 defaults: run: shell: sh diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 5df772ddbaedbe..bd3c7821934dd5 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -645,6 +645,7 @@ "telink-tlsr9518adk80d-ota-requestor", "telink-tlsr9518adk80d-pump-app", "telink-tlsr9518adk80d-pump-controller-app", + "telink-tlsr9518adk80d-temperature-measurement", "telink-tlsr9518adk80d-thermostat", "tizen-arm-light" ] diff --git a/BUILD.gn b/BUILD.gn index 96ab7addb4a9ce..d9c68de527c0aa 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -668,12 +668,21 @@ if (current_toolchain != "${dir_pw_toolchain}/default:default") { if (enable_tizen_lighting_app) { group("tizen_lighting_app") { - deps = [ "${chip_root}/examples/lighting-app/tizen/(${chip_root}/build/toolchain/tizen:tizen_arm)" ] + deps = [ "${chip_root}/examples/lighting-app/tizen(${chip_root}/build/toolchain/tizen:tizen_arm)" ] } extra_build_deps += [ ":tizen_lighting_app" ] } + if (enable_tizen_builds) { + group("check:tizen") { + testonly = true + deps = [ "${chip_root}/src/test_driver/tizen/integration-tests:check" ] + } + + extra_check_deps += [ ":check:tizen" ] + } + if (enable_mw320_shell_build) { group("mw320_shell") { deps = [ "${chip_root}/examples/shell/mw320(${chip_root}/config/mw320/toolchain:mw320_shell)" ] @@ -703,6 +712,7 @@ if (current_toolchain != "${dir_pw_toolchain}/default:default") { } group("check") { + testonly = true deps = extra_check_deps foreach(_build, builds) { deps += [ get_label_info(_build, "dir") + ":check_" + diff --git a/build/chip/java/tests/test.py b/build/chip/java/tests/test.py index a67c40079b1a59..1c90f51e04e9df 100755 --- a/build/chip/java/tests/test.py +++ b/build/chip/java/tests/test.py @@ -18,7 +18,6 @@ import json import os -import subprocess import unittest from os import path diff --git a/build/config/compiler/BUILD.gn b/build/config/compiler/BUILD.gn index dedb1ae26ad0dd..5c2accfcdc078e 100644 --- a/build/config/compiler/BUILD.gn +++ b/build/config/compiler/BUILD.gn @@ -249,6 +249,10 @@ config("strict_warnings") { ] } + if (current_os == "mac" || current_os == "ios") { + cflags += [ "-Wconversion" ] + } + if (build_java_matter_controller) { cflags -= [ "-Wshadow" ] } diff --git a/build/config/tizen/config.gni b/build/config/tizen/config.gni index 585a889ccfbffd..06a03d59b86881 100644 --- a/build/config/tizen/config.gni +++ b/build/config/tizen/config.gni @@ -13,9 +13,9 @@ # limitations under the License. declare_args() { - # Location of The Tizen sysroot - tizen_sdk_sysroot = "" + # Location of Tizen SDK + tizen_sdk_root = getenv("TIZEN_SDK_ROOT") - # Location of the Tizen SDK. - tizen_sdk_root = "" + # Location of Tizen SDK sysroot + tizen_sdk_sysroot = getenv("TIZEN_SDK_SYSROOT") } diff --git a/config/standalone/CHIPProjectConfig.h b/config/standalone/CHIPProjectConfig.h index 296c090fda05c5..101bdec25e1cd4 100644 --- a/config/standalone/CHIPProjectConfig.h +++ b/config/standalone/CHIPProjectConfig.h @@ -39,7 +39,9 @@ // WARNING: This option makes it possible to circumvent basic chip security functionality. // Because of this it SHOULD NEVER BE ENABLED IN PRODUCTION BUILDS. // +#ifndef CHIP_DEVICE_CONFIG_ENABLE_TEST_SETUP_PARAMS #define CHIP_DEVICE_CONFIG_ENABLE_TEST_SETUP_PARAMS 1 +#endif // Enable reading DRBG seed data from /dev/(u)random. // This is needed for test applications and the CHIP device manager to function diff --git a/examples/all-clusters-app/all-clusters-common/all-clusters-app.matter b/examples/all-clusters-app/all-clusters-common/all-clusters-app.matter index 61d81d2a0dea8d..0624c2d2eca7fd 100644 --- a/examples/all-clusters-app/all-clusters-common/all-clusters-app.matter +++ b/examples/all-clusters-app/all-clusters-common/all-clusters-app.matter @@ -852,7 +852,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -867,14 +867,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/all-clusters-app/all-clusters-common/all-clusters-app.zap b/examples/all-clusters-app/all-clusters-common/all-clusters-app.zap index 7f0a235f1a01f6..0dd8835474050a 100644 --- a/examples/all-clusters-app/all-clusters-common/all-clusters-app.zap +++ b/examples/all-clusters-app/all-clusters-common/all-clusters-app.zap @@ -1977,7 +1977,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1993,7 +1993,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/all-clusters-app/ameba/main/DeviceCallbacks.cpp b/examples/all-clusters-app/ameba/main/DeviceCallbacks.cpp index f0be547259a2b8..3f1d88621bdbfb 100644 --- a/examples/all-clusters-app/ameba/main/DeviceCallbacks.cpp +++ b/examples/all-clusters-app/ameba/main/DeviceCallbacks.cpp @@ -127,9 +127,6 @@ void DeviceCallbacks::PostAttributeChangeCallback(EndpointId endpointId, Cluster void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event) { -#if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR - static bool isOTAInitialized = false; -#endif if (event->InternetConnectivityChange.IPv4 == kConnectivity_Established) { ChipLogProgress(DeviceLayer, "IPv4 Server ready..."); @@ -145,11 +142,10 @@ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event chip::app::DnssdServer::Instance().StartServer(); #if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR // Init OTA requestor only when we have gotten IPv6 address - if (!isOTAInitialized) + if (OTAInitializer::Instance().CheckInit()) { chip::DeviceLayer::SystemLayer().StartTimer(chip::System::Clock::Seconds32(kInitOTARequestorDelaySec), InitOTARequestorHandler, nullptr); - isOTAInitialized = true; } #endif } diff --git a/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.matter b/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.matter index ac388002383329..0b9400d1f4c62b 100644 --- a/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.matter +++ b/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.matter @@ -737,7 +737,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -752,12 +752,12 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; + attribute HourFormatEnum hourFormat = 0; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.zap b/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.zap index 68a1abb7cb3be0..9274f8063b9b17 100644 --- a/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.zap +++ b/examples/all-clusters-minimal-app/all-clusters-common/all-clusters-minimal-app.zap @@ -1956,7 +1956,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1972,7 +1972,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 0, "storageOption": "NVM", "singleton": 0, diff --git a/examples/android/CHIPTool/app/src/main/java/com/google/chip/chiptool/clusterclient/WildcardFragment.kt b/examples/android/CHIPTool/app/src/main/java/com/google/chip/chiptool/clusterclient/WildcardFragment.kt index 9cc81d0db8bc63..64948f2a2c76f4 100644 --- a/examples/android/CHIPTool/app/src/main/java/com/google/chip/chiptool/clusterclient/WildcardFragment.kt +++ b/examples/android/CHIPTool/app/src/main/java/com/google/chip/chiptool/clusterclient/WildcardFragment.kt @@ -9,6 +9,7 @@ import android.view.ViewGroup import android.widget.Button import android.widget.EditText import android.widget.Spinner +import android.widget.TextView import androidx.fragment.app.Fragment import androidx.lifecycle.lifecycleScope import chip.devicecontroller.ChipDeviceController @@ -114,7 +115,7 @@ class WildcardFragment : Fragment() { return stringBuilder.toString() } - private suspend fun subscribe(type: Int, minInterval: Int, maxInterval: Int, keepSubscriptions: Boolean, isFabricFiltered: Boolean) { + private suspend fun subscribe(type: Int, minInterval: Int, maxInterval: Int, keepSubscriptions: Boolean, isFabricFiltered: Boolean, isUrgent: Boolean) { val subscriptionEstablishedCallback = SubscriptionEstablishedCallback { Log.i(TAG, "Subscription to device established") } @@ -141,7 +142,7 @@ class WildcardFragment : Fragment() { keepSubscriptions, isFabricFiltered) } else if (type == EVENT) { - val eventPath = ChipEventPath.newInstance(endpointId, clusterId, eventId) + val eventPath = ChipEventPath.newInstance(endpointId, clusterId, eventId, isUrgent) deviceController.subscribeToPath(subscriptionEstablishedCallback, resubscriptionAttemptCallback, reportCallback, @@ -199,6 +200,15 @@ class WildcardFragment : Fragment() { private fun showSubscribeDialog(type: Int) { val dialogView = requireActivity().layoutInflater.inflate(R.layout.subscribe_dialog, null) + val isUrgentTv = dialogView.findViewById(R.id.titleisUrgent) + val isUrgentSp = dialogView.findViewById(R.id.isUrgentSp) + if (type == EVENT) { + isUrgentTv.visibility = View.VISIBLE + isUrgentSp.visibility = View.VISIBLE + } else { + isUrgentTv.visibility = View.GONE + isUrgentSp.visibility = View.GONE + } val dialog = AlertDialog.Builder(requireContext()).apply { setView(dialogView) }.create() @@ -215,7 +225,8 @@ class WildcardFragment : Fragment() { minIntervalEd.text.toString().toInt(), maxIntervalEd.text.toString().toInt(), keepSubscriptionsSp.selectedItem.toString().toBoolean(), - isFabricFilteredSp.selectedItem.toString().toBoolean() + isFabricFilteredSp.selectedItem.toString().toBoolean(), + isUrgentSp.selectedItem.toString().toBoolean(), ) } else { Log.e(TAG, "minInterval or maxInterval is empty!" ) diff --git a/examples/android/CHIPTool/app/src/main/res/layout/subscribe_dialog.xml b/examples/android/CHIPTool/app/src/main/res/layout/subscribe_dialog.xml index 2e5947370085c8..a80d414f310750 100644 --- a/examples/android/CHIPTool/app/src/main/res/layout/subscribe_dialog.xml +++ b/examples/android/CHIPTool/app/src/main/res/layout/subscribe_dialog.xml @@ -34,6 +34,28 @@ android:layout_marginBottom="8dp" app:layout_constraintStart_toStartOf="parent" app:layout_constraintTop_toBottomOf="@id/minIntervalEd" /> + + - Subscribe Minimum interval (seconds) Maximum interval (seconds) + is Urgent Event (bool) keep subscriptions (bool) is Fabric Filtered (bool) diff --git a/examples/bridge-app/bridge-common/bridge-app.matter b/examples/bridge-app/bridge-common/bridge-app.matter index 1b8f1dd6ed852c..464416e0943658 100644 --- a/examples/bridge-app/bridge-common/bridge-app.matter +++ b/examples/bridge-app/bridge-common/bridge-app.matter @@ -522,7 +522,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -537,14 +537,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/bridge-app/bridge-common/bridge-app.zap b/examples/bridge-app/bridge-common/bridge-app.zap index 14b3f2ced9668a..87aff9c88798bd 100644 --- a/examples/bridge-app/bridge-common/bridge-app.zap +++ b/examples/bridge-app/bridge-common/bridge-app.zap @@ -1149,7 +1149,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1165,7 +1165,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/ameba/main/DeviceCallbacks.cpp b/examples/chef/ameba/main/DeviceCallbacks.cpp index 23030fead6f986..d32135cb48765c 100644 --- a/examples/chef/ameba/main/DeviceCallbacks.cpp +++ b/examples/chef/ameba/main/DeviceCallbacks.cpp @@ -105,9 +105,6 @@ void DeviceCallbacks::PostAttributeChangeCallback(EndpointId endpointId, Cluster void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event) { -#if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR - static bool isOTAInitialized = false; -#endif if (event->InternetConnectivityChange.IPv4 == kConnectivity_Established) { ChipLogProgress(DeviceLayer, "IPv4 Server ready..."); @@ -123,11 +120,10 @@ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event chip::app::DnssdServer::Instance().StartServer(); #if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR // Init OTA requestor only when we have gotten IPv6 address - if (!isOTAInitialized) + if (OTAInitializer::Instance().CheckInit()) { chip::DeviceLayer::SystemLayer().StartTimer(chip::System::Clock::Seconds32(kInitOTARequestorDelaySec), InitOTARequestorHandler, nullptr); - isOTAInitialized = true; } #endif } diff --git a/examples/chef/chef.py b/examples/chef/chef.py index cdd4a61a5c2d6c..bc4c820974ed83 100755 --- a/examples/chef/chef.py +++ b/examples/chef/chef.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib import json import optparse import os @@ -23,7 +22,7 @@ import sys import tarfile import textwrap -from typing import Any, Dict, Sequence +from typing import Any, Dict import constants import stateful_shell @@ -280,7 +279,8 @@ def main() -> int: Notes: - Whenever you change a device type, make sure to also use options -zbe - - Be careful if you have more than one device connected. The script assumes you have only one device connected and might flash the wrong one\ + - Be careful if you have more than one device connected. + The script assumes you have only one device connected and might flash the wrong one """) parser = optparse.OptionParser(usage=usage) @@ -313,9 +313,13 @@ def main() -> int: 'linux', 'silabs-thread', 'ameba'], metavar="TARGET", default="esp32") - parser.add_option("-r", "--rpc", help="enables Pigweed RPC interface. Enabling RPC disables the shell interface. Your sdkconfig configurations will be reverted to default. Default is PW RPC off. When enabling or disabling this flag, on the first build force a clean build with -c", + parser.add_option("-r", "--rpc", + help=("enables Pigweed RPC interface. Enabling RPC disables the shell interface. " + "Your sdkconfig configurations will be reverted to default. Default is PW RPC off. " + "When enabling or disabling this flag, on the first build force a clean build with -c"), action="store_true", dest="do_rpc", default=False) - parser.add_option("-a", "--automated_test_stamp", help="provide the additional stamp \"branch:commit_id\" as the software version string for automated tests.", + parser.add_option("-a", "--automated_test_stamp", + help="provide the additional stamp \"branch:commit_id\" as the software version string for automated tests.", action="store_true", dest="do_automated_test_stamp") parser.add_option("-v", "--vid", dest="vid", type=int, help="specifies the Vendor ID. Default is 0xFFF1", metavar="VID", default=0xFFF1) @@ -325,22 +329,31 @@ def main() -> int: help="specifies the Product Name. Default is TEST_PRODUCT", default="TEST_PRODUCT") parser.add_option("", "--rpc_console", help="Opens PW RPC Console", action="store_true", dest="do_rpc_console") - parser.add_option("-y", "--tty", help="Enumerated USB tty/serial interface enumerated for your physical device. E.g.: /dev/ACM0", + parser.add_option("-y", "--tty", + help="Enumerated USB tty/serial interface enumerated for your physical device. E.g.: /dev/ACM0", dest="tty", metavar="TTY", default=None) - # Build CD params. - parser.add_option("", "--build_all", help="For use in CD only. Builds and bundles all chef examples for the specified platform. Chef exits after completion.", + parser.add_option("", "--build_all", + help=("For use in CD only. Builds and bundles all chef examples for " + "the specified platform. Chef exits after completion."), dest="build_all", action="store_true") parser.add_option("", "--dry_run", help="Display list of target builds of the --build_all command without building them.", dest="dry_run", action="store_true") - parser.add_option("", "--build_exclude", help="For use with --build_all. Build labels to exclude. Accepts a regex pattern. Mutually exclusive with --build_include.", + parser.add_option("", "--build_exclude", + help=("For use with --build_all. Build labels to exclude. " + "Accepts a regex pattern. Mutually exclusive with --build_include."), dest="build_exclude") - parser.add_option("", "--build_include", help="For use with --build_all. Build labels to include. Accepts a regex pattern. Mutually exclusive with --build_exclude.", + parser.add_option("", "--build_include", + help=("For use with --build_all. Build labels to include. " + "Accepts a regex pattern. Mutually exclusive with --build_exclude."), dest="build_include") - parser.add_option("-k", "--keep_going", help="For use in CD only. Continues building all sample apps in the event of an error.", + parser.add_option("-k", "--keep_going", + help="For use in CD only. Continues building all sample apps in the event of an error.", dest="keep_going", action="store_true") - parser.add_option( - "", "--ci", help="Builds Chef examples defined in cicd_config. Uses specified target from -t. Chef exits after completion.", dest="ci", action="store_true") + parser.add_option("", "--ci", + help=("Builds Chef examples defined in cicd_config. " + "Uses specified target from -t. Chef exits after completion."), + dest="ci", action="store_true") parser.add_option( "", "--enable_ipv4", help="Enable IPv4 mDNS. Only applicable to platforms that can support IPV4 (e.g, Linux, ESP32)", action="store_true", default=False) @@ -452,14 +465,12 @@ def main() -> int: flush_print( 'Path for esp32 SDK was not found. Make sure esp32.IDF_PATH is set on your config.yaml file') exit(1) - plat_folder = os.path.normpath(f"{_CHEF_SCRIPT_PATH}/esp32") shell.run_cmd(f'source {config["esp32"]["IDF_PATH"]}/export.sh') elif options.build_target == "nrfconnect": if config['nrfconnect']['ZEPHYR_BASE'] is None: flush_print( 'Path for nrfconnect SDK was not found. Make sure nrfconnect.ZEPHYR_BASE is set on your config.yaml file') exit(1) - plat_folder = os.path.normpath(f"{_CHEF_SCRIPT_PATH}/nrfconnect") shell.run_cmd( f'source {config["nrfconnect"]["ZEPHYR_BASE"]}/zephyr-env.sh') shell.run_cmd("export ZEPHYR_TOOLCHAIN_VARIANT=gnuarmemb") @@ -532,7 +543,8 @@ def main() -> int: shell.run_cmd(f"rm -rf {gen_dir}") shell.run_cmd(f"mkdir -p {gen_dir}") shell.run_cmd( - f"{_REPO_BASE_PATH}/scripts/tools/zap/generate.py {_CHEF_SCRIPT_PATH}/devices/{options.sample_device_type_name}.zap -o {gen_dir}") + f"{_REPO_BASE_PATH}/scripts/tools/zap/generate.py " + f"{_CHEF_SCRIPT_PATH}/devices/{options.sample_device_type_name}.zap -o {gen_dir}") # af-gen-event.h is not generated shell.run_cmd(f"touch {gen_dir}/af-gen-event.h") @@ -545,7 +557,8 @@ def main() -> int: shell.run_cmd( f"export SDKCONFIG_DEFAULTS={_CHEF_SCRIPT_PATH}/esp32/sdkconfig_rpc.defaults") shell.run_cmd( - f"[ -f {_CHEF_SCRIPT_PATH}/esp32/sdkconfig ] || cp {_CHEF_SCRIPT_PATH}/esp32/sdkconfig_rpc.defaults {_CHEF_SCRIPT_PATH}/esp32/sdkconfig") + f"[ -f {_CHEF_SCRIPT_PATH}/esp32/sdkconfig ] || cp " + f"{_CHEF_SCRIPT_PATH}/esp32/sdkconfig_rpc.defaults {_CHEF_SCRIPT_PATH}/esp32/sdkconfig") else: flush_print(f"RPC PW on {options.build_target} not supported") @@ -555,7 +568,8 @@ def main() -> int: shell.run_cmd( f"export SDKCONFIG_DEFAULTS={_CHEF_SCRIPT_PATH}/esp32/sdkconfig.defaults") shell.run_cmd( - f"[ -f {_CHEF_SCRIPT_PATH}/esp32/sdkconfig ] || cp {_CHEF_SCRIPT_PATH}/esp32/sdkconfig.defaults {_CHEF_SCRIPT_PATH}/esp32/sdkconfig") + f"[ -f {_CHEF_SCRIPT_PATH}/esp32/sdkconfig ] || cp " + f"{_CHEF_SCRIPT_PATH}/esp32/sdkconfig.defaults {_CHEF_SCRIPT_PATH}/esp32/sdkconfig") # # Menuconfig @@ -586,7 +600,7 @@ def main() -> int: if options.do_automated_test_stamp: branch = "" for branch_text in shell.run_cmd("git branch", return_cmd_output=True).split("\n"): - match_texts = re.findall("\* (.*)", branch_text) + match_texts = re.findall(r"\* (.*)", branch_text) if match_texts: branch = match_texts[0] break @@ -597,7 +611,8 @@ def main() -> int: if len(sw_ver_string) >= 64: truncated_sw_ver_string = f"""{branch[:22]}:{commit_id}""" flush_print( - f"""Truncate the software version string from \"{sw_ver_string}\" to \"{truncated_sw_ver_string}\" due to 64 bytes limitation""") + f"Truncate the software version string from \"{sw_ver_string}\" to " + f"\"{truncated_sw_ver_string}\" due to 64 bytes limitation") sw_ver_string = truncated_sw_ver_string flush_print("Building...") @@ -620,14 +635,15 @@ def main() -> int: shell.run_cmd(f"cd {_CHEF_SCRIPT_PATH}/esp32") if options.enable_ipv4: shell.run_cmd( - f"sed -i 's/CONFIG_DISABLE_IPV4=y/#\\ CONFIG_DISABLE_IPV4\\ is\\ not\\ set/g' sdkconfig ") + "sed -i 's/CONFIG_DISABLE_IPV4=y/#\\ CONFIG_DISABLE_IPV4\\ is\\ not\\ set/g' sdkconfig ") else: shell.run_cmd( - f"sed -i 's/#\\ CONFIG_DISABLE_IPV4\\ is\\ not\\ set/CONFIG_DISABLE_IPV4=y/g' sdkconfig ") + "sed -i 's/#\\ CONFIG_DISABLE_IPV4\\ is\\ not\\ set/CONFIG_DISABLE_IPV4=y/g' sdkconfig ") shell.run_cmd("idf.py build") shell.run_cmd("idf.py build flashing_script") shell.run_cmd( - f"(cd build/ && tar cJvf $(git rev-parse HEAD)-{options.sample_device_type_name}.tar.xz --files-from=chip-shell.flashbundle.txt)") + f"(cd build/ && tar cJvf $(git rev-parse HEAD)-{options.sample_device_type_name}.tar.xz " + f"--files-from=chip-shell.flashbundle.txt)") shell.run_cmd( f"cp build/$(git rev-parse HEAD)-{options.sample_device_type_name}.tar.xz {_CHEF_SCRIPT_PATH}") elif options.build_target == "nrfconnect": @@ -668,15 +684,17 @@ def main() -> int: shell.run_cmd( f"cd {config['ameba']['AMEBA_SDK']}/project/realtek_amebaD_va0_example/GCC-RELEASE") if options.do_clean: - shell.run_cmd(f"rm -rf out") + shell.run_cmd("rm -rf out") shell.run_cmd( - f"./build.sh {config['ameba']['MATTER_SDK']} ninja {config['ameba']['AMEBA_SDK']}/project/realtek_amebaD_va0_example/GCC-RELEASE/out chef-app") + f"./build.sh {config['ameba']['MATTER_SDK']} ninja " + f"{config['ameba']['AMEBA_SDK']}/project/realtek_amebaD_va0_example/GCC-RELEASE/out chef-app") shell.run_cmd("ninja -C out") elif config['ameba']['MODEL'] == 'Z2': shell.run_cmd( f"cd {config['ameba']['AMEBA_SDK']}/project/realtek_amebaz2_v0_example/GCC-RELEASE") shell.run_cmd("rm -f project_include.mk") - with open(f"{config['ameba']['AMEBA_SDK']}/project/realtek_amebaz2_v0_example/GCC-RELEASE/project_include.mk", "w") as f: + cmd = f"{config['ameba']['AMEBA_SDK']}/project/realtek_amebaz2_v0_example/GCC-RELEASE/project_include.mk" + with open(cmd, "w") as f: f.write(textwrap.dedent(f"""\ SAMPLE_NAME = {options.sample_device_type_name} CHEF_FLAGS = @@ -702,7 +720,10 @@ def main() -> int: 'chip_shell_cmd_server = false', 'chip_build_libshell = true', 'chip_config_network_layer_ble = false', - f'target_defines = ["CHIP_DEVICE_CONFIG_DEVICE_VENDOR_ID={options.vid}", "CHIP_DEVICE_CONFIG_DEVICE_PRODUCT_ID={options.pid}", "CONFIG_ENABLE_PW_RPC={int(options.do_rpc)}", "CHIP_DEVICE_CONFIG_DEVICE_PRODUCT_NAME=\\"{str(options.pname)}\\""]', + (f'target_defines = ["CHIP_DEVICE_CONFIG_DEVICE_VENDOR_ID={options.vid}", ' + f'"CHIP_DEVICE_CONFIG_DEVICE_PRODUCT_ID={options.pid}", ' + f'"CONFIG_ENABLE_PW_RPC={int(options.do_rpc)}", ' + f'"CHIP_DEVICE_CONFIG_DEVICE_PRODUCT_NAME=\\"{str(options.pname)}\\""]'), ]) uname_resp = shell.run_cmd("uname -m", return_cmd_output=True) @@ -758,7 +779,7 @@ def main() -> int: sample_name = "{options.sample_device_type_name}" """)) if options.do_clean: - shell.run_cmd(f"rm -rf out") + shell.run_cmd("rm -rf out") shell.run_cmd("gn gen out") shell.run_cmd("ninja -C out") @@ -799,14 +820,20 @@ def main() -> int: shell.run_cmd(f"cd {_CHEF_SCRIPT_PATH}/ameba") shell.run_cmd( f"cd {config['ameba']['AMEBA_SDK']}/tools/AmebaD/Image_Tool_Linux") - shell.run_cmd( - f"{config['ameba']['AMEBA_SDK']}/tools/AmebaD/Image_Tool_Linux/flash.sh {config['ameba']['TTY']} {config['ameba']['AMEBA_SDK']}/project/realtek_amebaD_va0_example/GCC-RELEASE/out", raise_on_returncode=False) + shell.run_cmd(( + f"{config['ameba']['AMEBA_SDK']}/tools/AmebaD/Image_Tool_Linux/flash.sh " + f"{config['ameba']['TTY']} {config['ameba']['AMEBA_SDK']}" + f"/project/realtek_amebaD_va0_example/GCC-RELEASE/out" + ), raise_on_returncode=False) else: shell.run_cmd(f"cd {_CHEF_SCRIPT_PATH}/ameba") shell.run_cmd( f"cd {config['ameba']['AMEBA_SDK']}/tools/AmebaZ2/Image_Tool_Linux") - shell.run_cmd( - f"{config['ameba']['AMEBA_SDK']}/tools/AmebaZ2/Image_Tool_Linux/flash.sh {config['ameba']['TTY']} {config['ameba']['AMEBA_SDK']}/project/realtek_amebaz2_v0_example/GCC-RELEASE/application_is/Debug/bin", raise_on_returncode=False) + shell.run_cmd(( + f"{config['ameba']['AMEBA_SDK']}/tools/AmebaZ2/Image_Tool_Linux/flash.sh " + f"{config['ameba']['TTY']} {config['ameba']['AMEBA_SDK']}" + f"/project/realtek_amebaz2_v0_example/GCC-RELEASE/application_is/Debug/bin" + ), raise_on_returncode=False) # # Terminal interaction @@ -817,21 +844,24 @@ def main() -> int: if options.build_target == "esp32": if config['esp32']['TTY'] is None: flush_print( - 'The path for the serial enumeration for esp32 is not set. Make sure esp32.TTY is set on your config.yaml file') + 'The path for the serial enumeration for esp32 is not set. ' + 'Make sure esp32.TTY is set on your config.yaml file') exit(1) shell.run_cmd(f"cd {_CHEF_SCRIPT_PATH}/esp32") shell.run_cmd(f"idf.py -p {config['esp32']['TTY']} monitor") elif options.build_target == "nrfconnect": if config['nrfconnect']['TTY'] is None: flush_print( - 'The path for the serial enumeration for nordic is not set. Make sure nrfconnect.TTY is set on your config.yaml file') + 'The path for the serial enumeration for nordic is not set. ' + 'Make sure nrfconnect.TTY is set on your config.yaml file') exit(1) shell.run_cmd("killall screen") shell.run_cmd(f"screen {config['nrfconnect']['TTY']} 115200") elif (options.build_target == "silabs-thread"): if config['silabs-thread']['TTY'] is None: flush_print( - 'The path for the serial enumeration for silabs-thread is not set. Make sure silabs-thread.TTY is set on your config.yaml file') + 'The path for the serial enumeration for silabs-thread is not set. ' + 'Make sure silabs-thread.TTY is set on your config.yaml file') exit(1) shell.run_cmd("killall screen") @@ -864,14 +894,16 @@ def main() -> int: if (sys.platform == "linux") or (sys.platform == "linux2"): if (config['silabs-thread']['TTY'] is None): flush_print( - 'The path for the serial enumeration for silabs-thread is not set. Make sure silabs-thread.TTY is set on your config.yaml file') + 'The path for the serial enumeration for silabs-thread is not set. ' + 'Make sure silabs-thread.TTY is set on your config.yaml file') exit(1) shell.run_cmd( f"python3 -m chip_rpc.console --device {config['silabs-thread']['TTY']} -b 115200") elif sys.platform == "darwin": if (config['silabs-thread']['CU'] is None): flush_print( - 'The path for the serial enumeration for silabs-thread is not set. Make sure silabs-thread.CU is set on your config.yaml file') + 'The path for the serial enumeration for silabs-thread is not set. ' + 'Make sure silabs-thread.CU is set on your config.yaml file') exit(1) shell.run_cmd( f"python3 -m chip_rpc.console --device {config['silabs-thread']['CU']} -b 115200") diff --git a/examples/chef/common/stubs.cpp b/examples/chef/common/stubs.cpp index d43c46ae9317b1..75e80468b0c044 100644 --- a/examples/chef/common/stubs.cpp +++ b/examples/chef/common/stubs.cpp @@ -6,11 +6,191 @@ #ifdef EMBER_AF_PLUGIN_DOOR_LOCK_SERVER #include +class LockManager +{ +public: + static constexpr uint32_t kNumEndpoints = 1; + static constexpr uint32_t kNumUsersPerEndpoint = 2; + static constexpr uint32_t kNumCredentialsPerEndpoint = 20; + static constexpr uint32_t kNumCredentialsPerUser = 10; + static constexpr uint32_t kMaxNameLength = 32; + static constexpr uint32_t kMaxDataLength = 16; + + struct Credential + { + bool set(DlCredentialStatus status, DlCredentialType type, chip::ByteSpan newData) + { + if (newData.size() > kMaxDataLength || type != DlCredentialType::kPIN) + return false; + memcpy(data, newData.data(), newData.size()); + info = EmberAfPluginDoorLockCredentialInfo{ + status, + type, + chip::ByteSpan(data, newData.size()), + }; + return true; + } + + EmberAfPluginDoorLockCredentialInfo info = { DlCredentialStatus::kAvailable }; + uint8_t data[kMaxDataLength]; + }; + + struct User + { + void set(chip::CharSpan newName, uint32_t userId, DlUserStatus userStatus, DlUserType type, DlCredentialRule credentialRule) + { + size_t sz = std::min(sizeof(name), newName.size()); + memcpy(name, newName.data(), sz); + info = EmberAfPluginDoorLockUserInfo{ + chip::CharSpan(name, sz), chip::Span(), userId, userStatus, type, credentialRule, + }; + } + bool addCredential(uint8_t type, uint16_t index) + { + if (info.credentials.size() == kNumCredentialsPerUser) + return false; + auto & cr = credentialMap[info.credentials.size()]; + cr.CredentialType = type; + cr.CredentialIndex = index; + info.credentials = chip::Span(credentialMap, info.credentials.size() + 1); + return true; + } + + EmberAfPluginDoorLockUserInfo info = { .userStatus = DlUserStatus::kAvailable }; + char name[kMaxNameLength]; + DlCredential credentialMap[kNumCredentialsPerUser]; + }; + + struct Endpoint + { + chip::EndpointId id; + User users[kNumUsersPerEndpoint]; + Credential credentials[kNumCredentialsPerEndpoint]; + }; + + static LockManager & Instance() + { + static LockManager instance; + return instance; + } + + LockManager() { defaultInitialize(); } + + bool getUser(chip::EndpointId endpointId, uint16_t userIndex, EmberAfPluginDoorLockUserInfo & user) + { + auto ep = findEndpoint(endpointId); + if (!ep) + return false; + if (userIndex >= kNumUsersPerEndpoint) + return false; + user = ep->users[userIndex].info; + return true; + } + + bool setUser(chip::EndpointId endpointId, uint16_t userIndex, chip::FabricIndex creator, chip::FabricIndex modifier, + const chip::CharSpan & userName, uint32_t uniqueId, DlUserStatus userStatus, DlUserType usertype, + DlCredentialRule credentialRule, const DlCredential * credentials, size_t totalCredentials) + { + auto ep = findEndpoint(endpointId); + if (!ep) + return false; + if (userIndex >= kNumUsersPerEndpoint || totalCredentials > kNumCredentialsPerUser) + return false; + ep->users[userIndex].set(userName, uniqueId, userStatus, usertype, credentialRule); + ep->users[userIndex].info.creationSource = DlAssetSource::kMatterIM; + ep->users[userIndex].info.createdBy = creator; + ep->users[userIndex].info.modificationSource = DlAssetSource::kMatterIM; + ep->users[userIndex].info.lastModifiedBy = modifier; + for (size_t i = 0; i < totalCredentials; i++) + ep->users[userIndex].addCredential(credentials[i].CredentialType, credentials[i].CredentialIndex); + return true; + } + + bool getCredential(chip::EndpointId endpointId, uint16_t credentialIndex, DlCredentialType credentialType, + EmberAfPluginDoorLockCredentialInfo & credential) + { + auto ep = findEndpoint(endpointId); + if (!ep) + return false; + if (credentialIndex >= kNumCredentialsPerEndpoint) + return false; + if (credentialType != DlCredentialType::kPIN) + return false; + credential = ep->credentials[credentialIndex].info; + return true; + } + + bool setCredential(chip::EndpointId endpointId, uint16_t credentialIndex, chip::FabricIndex creator, chip::FabricIndex modifier, + DlCredentialStatus credentialStatus, DlCredentialType credentialType, const chip::ByteSpan & credentialData) + { + auto ep = findEndpoint(endpointId); + if (!ep) + return false; + if (credentialIndex >= kNumCredentialsPerEndpoint) + return false; + if (credentialType != DlCredentialType::kPIN) + return false; + auto & credential = ep->credentials[credentialIndex]; + if (!credential.set(credentialStatus, credentialType, credentialData)) + return false; + credential.info.creationSource = DlAssetSource::kMatterIM; + credential.info.createdBy = creator; + credential.info.modificationSource = DlAssetSource::kMatterIM; + credential.info.lastModifiedBy = modifier; + return true; + } + + bool checkPin(chip::EndpointId endpointId, const chip::Optional & pinCode, + chip::app::Clusters::DoorLock::DlOperationError & err) + { + if (!pinCode.HasValue()) + { + err = DlOperationError::kInvalidCredential; + return false; + } + auto ep = findEndpoint(endpointId); + if (!ep) + return false; + for (auto & pin : ep->credentials) + { + if (pin.info.status == DlCredentialStatus::kOccupied && pin.info.credentialData.data_equal(pinCode.Value())) + { + return true; + } + } + err = DlOperationError::kInvalidCredential; + return false; + } + +private: + Endpoint * findEndpoint(chip::EndpointId endpointId) + { + for (auto & e : endpoints) + { + if (e.id == endpointId) + return &e; + } + return nullptr; + } + + void defaultInitialize() + { + endpoints[0].id = 1; + uint8_t pin[6] = { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36 }; + endpoints[0].credentials[chip::to_underlying(DlCredentialType::kPin)][0].set(DlCredentialStatus::kOccupied, + DlCredentialType::kPin, chip::ByteSpan(pin)); + endpoints[0].users[0].set(chip::CharSpan("default"), 1, DlUserStatus::kOccupiedEnabled, DlUserType::kUnrestrictedUser, + DlCredentialRule::kSingle); + endpoints[0].users[0].addCredential(chip::to_underlying(DlCredentialType::kPin), 1); + } + + Endpoint endpoints[kNumEndpoints]; +}; + bool emberAfPluginDoorLockOnDoorLockCommand(chip::EndpointId endpointId, const chip::Optional & pinCode, chip::app::Clusters::DoorLock::OperationErrorEnum & err) { err = OperationErrorEnum::kUnspecified; - // TBD: LockManager, check pinCode, ... return DoorLockServer::Instance().SetLockState(endpointId, DlLockState::kLocked); } @@ -18,7 +198,35 @@ bool emberAfPluginDoorLockOnDoorUnlockCommand(chip::EndpointId endpointId, const chip::app::Clusters::DoorLock::OperationErrorEnum & err) { err = OperationErrorEnum::kUnspecified; - // TBD: LockManager, check pinCode, ... return DoorLockServer::Instance().SetLockState(endpointId, DlLockState::kUnlocked); } + +bool emberAfPluginDoorLockGetUser(chip::EndpointId endpointId, uint16_t userIndex, EmberAfPluginDoorLockUserInfo & user) +{ + return LockManager::Instance().getUser(endpointId, userIndex - 1, user); +} + +bool emberAfPluginDoorLockSetUser(chip::EndpointId endpointId, uint16_t userIndex, chip::FabricIndex creator, + chip::FabricIndex modifier, const chip::CharSpan & userName, uint32_t uniqueId, + DlUserStatus userStatus, DlUserType usertype, DlCredentialRule credentialRule, + const DlCredential * credentials, size_t totalCredentials) +{ + return LockManager::Instance().setUser(endpointId, userIndex - 1, creator, modifier, userName, uniqueId, userStatus, usertype, + credentialRule, credentials, totalCredentials); +} + +bool emberAfPluginDoorLockGetCredential(chip::EndpointId endpointId, uint16_t credentialIndex, DlCredentialType credentialType, + EmberAfPluginDoorLockCredentialInfo & credential) +{ + return LockManager::Instance().getCredential(endpointId, credentialIndex - 1, credentialType, credential); +} + +bool emberAfPluginDoorLockSetCredential(chip::EndpointId endpointId, uint16_t credentialIndex, chip::FabricIndex creator, + chip::FabricIndex modifier, DlCredentialStatus credentialStatus, + DlCredentialType credentialType, const chip::ByteSpan & credentialData) +{ + return LockManager::Instance().setCredential(endpointId, credentialIndex - 1, creator, modifier, credentialStatus, + credentialType, credentialData); +} + #endif /* EMBER_AF_PLUGIN_DOOR_LOCK_SERVER */ diff --git a/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.matter b/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.matter index 8360d86ea3f6cf..2666156dd67cec 100644 --- a/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.matter +++ b/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.matter @@ -551,7 +551,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -566,14 +566,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.zap b/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.zap index 79d49fca74cc7f..f6fe54c34229d7 100644 --- a/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.zap +++ b/examples/chef/devices/noip_rootnode_dimmablelight_bCwGYSDpoe.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_colortemperaturelight_hbUnzYVeyn.zap b/examples/chef/devices/rootnode_colortemperaturelight_hbUnzYVeyn.zap index 6513f7d3ea0b48..35655e0bf6a34b 100644 --- a/examples/chef/devices/rootnode_colortemperaturelight_hbUnzYVeyn.zap +++ b/examples/chef/devices/rootnode_colortemperaturelight_hbUnzYVeyn.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.matter b/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.matter index c3b699c0fa87cc..d69ad8d02016c8 100644 --- a/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.matter +++ b/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.matter @@ -404,7 +404,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -419,14 +419,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.zap b/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.zap index 76d6265afb2798..b60155192a97c3 100644 --- a/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.zap +++ b/examples/chef/devices/rootnode_contactsensor_lFAGG1bfRO.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.matter b/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.matter index a30f6fcf00706f..9a1eb55c3db7cd 100644 --- a/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.matter +++ b/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.matter @@ -551,7 +551,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -566,14 +566,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.zap b/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.zap index 235516c739524b..dad71db6903b2f 100644 --- a/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.zap +++ b/examples/chef/devices/rootnode_dimmablelight_bCwGYSDpoe.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.matter b/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.matter index a08a7ee4a4da66..0dda840f15e723 100644 --- a/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.matter +++ b/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.matter @@ -404,7 +404,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -419,14 +419,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; @@ -1428,6 +1428,11 @@ server cluster DoorLock = 257 { kHolidaySchedules = 0x800; } + struct CredentialStruct { + CredentialTypeEnum credentialType = 0; + int16u credentialIndex = 1; + } + critical event DoorLockAlarm = 0 { AlarmCodeEnum alarmCode = 0; } @@ -1468,9 +1473,11 @@ server cluster DoorLock = 257 { readonly attribute nullable DlLockState lockState = 0; readonly attribute DlLockType lockType = 1; readonly attribute boolean actuatorEnabled = 2; + readonly attribute int16u numberOfTotalUsersSupported = 17; readonly attribute int16u numberOfPINUsersSupported = 18; readonly attribute int8u maxPINCodeLength = 23; readonly attribute int8u minPINCodeLength = 24; + readonly attribute int8u numberOfCredentialsSupportedPerUser = 28; attribute access(write: manage) int32u autoRelockTime = 35; attribute access(write: manage) OperatingModeEnum operatingMode = 37; readonly attribute DlSupportedOperatingModes supportedOperatingModes = 38; @@ -1493,8 +1500,82 @@ server cluster DoorLock = 257 { optional OCTET_STRING PINCode = 0; } + request struct UnlockWithTimeoutRequest { + INT16U timeout = 0; + optional OCTET_STRING PINCode = 1; + } + + request struct SetUserRequest { + DataOperationTypeEnum operationType = 0; + INT16U userIndex = 1; + nullable CHAR_STRING userName = 2; + nullable INT32U userUniqueID = 3; + nullable UserStatusEnum userStatus = 4; + nullable UserTypeEnum userType = 5; + nullable CredentialRuleEnum credentialRule = 6; + } + + request struct GetUserRequest { + INT16U userIndex = 0; + } + + request struct ClearUserRequest { + INT16U userIndex = 0; + } + + request struct SetCredentialRequest { + DataOperationTypeEnum operationType = 0; + CredentialStruct credential = 1; + LONG_OCTET_STRING credentialData = 2; + nullable INT16U userIndex = 3; + nullable UserStatusEnum userStatus = 4; + nullable UserTypeEnum userType = 5; + } + + request struct GetCredentialStatusRequest { + CredentialStruct credential = 0; + } + + request struct ClearCredentialRequest { + nullable CredentialStruct credential = 0; + } + + response struct GetUserResponse = 28 { + INT16U userIndex = 0; + nullable CHAR_STRING userName = 1; + nullable INT32U userUniqueID = 2; + nullable UserStatusEnum userStatus = 3; + nullable UserTypeEnum userType = 4; + nullable CredentialRuleEnum credentialRule = 5; + nullable CredentialStruct credentials[] = 6; + nullable fabric_idx creatorFabricIndex = 7; + nullable fabric_idx lastModifiedFabricIndex = 8; + nullable INT16U nextUserIndex = 9; + } + + response struct SetCredentialResponse = 35 { + DlStatus status = 0; + nullable INT16U userIndex = 1; + nullable INT16U nextCredentialIndex = 2; + } + + response struct GetCredentialStatusResponse = 37 { + boolean credentialExists = 0; + nullable INT16U userIndex = 1; + nullable fabric_idx creatorFabricIndex = 2; + nullable fabric_idx lastModifiedFabricIndex = 3; + nullable INT16U nextCredentialIndex = 4; + } + timed command LockDoor(LockDoorRequest): DefaultSuccess = 0; timed command UnlockDoor(UnlockDoorRequest): DefaultSuccess = 1; + timed command UnlockWithTimeout(UnlockWithTimeoutRequest): DefaultSuccess = 3; + timed command access(invoke: administer) SetUser(SetUserRequest): DefaultSuccess = 26; + command access(invoke: administer) GetUser(GetUserRequest): GetUserResponse = 27; + timed command access(invoke: administer) ClearUser(ClearUserRequest): DefaultSuccess = 29; + timed command access(invoke: administer) SetCredential(SetCredentialRequest): SetCredentialResponse = 34; + command access(invoke: administer) GetCredentialStatus(GetCredentialStatusRequest): GetCredentialStatusResponse = 36; + timed command access(invoke: administer) ClearCredential(ClearCredentialRequest): DefaultSuccess = 38; } endpoint 0 { @@ -1711,9 +1792,11 @@ endpoint 1 { ram attribute lockState default = 1; ram attribute lockType; ram attribute actuatorEnabled; + ram attribute numberOfTotalUsersSupported default = 2; ram attribute numberOfPINUsersSupported default = 2; ram attribute maxPINCodeLength default = 10; ram attribute minPINCodeLength default = 5; + ram attribute numberOfCredentialsSupportedPerUser default = 5; ram attribute autoRelockTime; ram attribute operatingMode; ram attribute supportedOperatingModes default = 0xFFF6; @@ -1724,7 +1807,7 @@ endpoint 1 { callback attribute generatedCommandList; callback attribute acceptedCommandList; callback attribute attributeList; - ram attribute featureMap default = 0x0081; + ram attribute featureMap default = 0x0181; ram attribute clusterRevision default = 6; } } diff --git a/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.zap b/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.zap index af99ca5266a961..d3f8b5cefaeb1e 100644 --- a/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.zap +++ b/examples/chef/devices/rootnode_doorlock_aNKYAreMXE.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -7040,6 +7040,62 @@ "source": "client", "incoming": 1, "outgoing": 0 + }, + { + "name": "UnlockWithTimeout", + "code": 3, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 1 + }, + { + "name": "SetUser", + "code": 26, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 + }, + { + "name": "GetUser", + "code": 27, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 + }, + { + "name": "ClearUser", + "code": 29, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 + }, + { + "name": "SetCredential", + "code": 34, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 + }, + { + "name": "GetCredentialStatus", + "code": 36, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 + }, + { + "name": "ClearCredential", + "code": 38, + "mfgCode": null, + "source": "client", + "incoming": 1, + "outgoing": 0 } ], "attributes": [ @@ -7084,6 +7140,32 @@ "define": "DOOR_LOCK_CLUSTER", "side": "server", "enabled": 1, + "commands": [ + { + "name": "GetUserResponse", + "code": 28, + "mfgCode": null, + "source": "server", + "incoming": 0, + "outgoing": 1 + }, + { + "name": "SetCredentialResponse", + "code": 35, + "mfgCode": null, + "source": "server", + "incoming": 0, + "outgoing": 1 + }, + { + "name": "GetCredentialStatusResponse", + "code": 37, + "mfgCode": null, + "source": "server", + "incoming": 0, + "outgoing": 1 + } + ], "attributes": [ { "name": "LockState", @@ -7203,11 +7285,11 @@ "mfgCode": null, "side": "server", "type": "int16u", - "included": 0, + "included": 1, "storageOption": "RAM", "singleton": 0, "bounded": 0, - "defaultValue": "0", + "defaultValue": "2", "reportable": 1, "minInterval": 1, "maxInterval": 65534, @@ -7373,6 +7455,22 @@ "maxInterval": 65534, "reportableChange": 0 }, + { + "name": "NumberOfCredentialsSupportedPerUser", + "code": 28, + "mfgCode": null, + "side": "server", + "type": "int8u", + "included": 1, + "storageOption": "RAM", + "singleton": 0, + "bounded": 0, + "defaultValue": "5", + "reportable": 1, + "minInterval": 1, + "maxInterval": 65534, + "reportableChange": 0 + }, { "name": "Language", "code": 33, @@ -7703,7 +7801,7 @@ "storageOption": "RAM", "singleton": 0, "bounded": 0, - "defaultValue": "0x0081", + "defaultValue": "0x0181", "reportable": 1, "minInterval": 1, "maxInterval": 65534, diff --git a/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.matter b/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.matter index 6acc5dc178c815..8e42c417cef86f 100644 --- a/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.matter +++ b/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.matter @@ -551,7 +551,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -566,14 +566,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.zap b/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.zap index 96713712f568c1..ee043c19756934 100644 --- a/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.zap +++ b/examples/chef/devices/rootnode_extendedcolorlight_8lcaaYJVAa.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_fan_7N2TobIlOX.matter b/examples/chef/devices/rootnode_fan_7N2TobIlOX.matter index 2cc3f912a39804..af6104234a63ff 100644 --- a/examples/chef/devices/rootnode_fan_7N2TobIlOX.matter +++ b/examples/chef/devices/rootnode_fan_7N2TobIlOX.matter @@ -402,7 +402,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -417,14 +417,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_fan_7N2TobIlOX.zap b/examples/chef/devices/rootnode_fan_7N2TobIlOX.zap index ff86fca46f353a..56a127c5012700 100644 --- a/examples/chef/devices/rootnode_fan_7N2TobIlOX.zap +++ b/examples/chef/devices/rootnode_fan_7N2TobIlOX.zap @@ -2019,7 +2019,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2035,7 +2035,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.matter b/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.matter index 19e525c1a3b3a5..004787645dcf69 100644 --- a/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.matter +++ b/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.matter @@ -418,7 +418,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -433,14 +433,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.zap b/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.zap index 1d12bf9a2a33bc..be736085657aee 100644 --- a/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.zap +++ b/examples/chef/devices/rootnode_flowsensor_1zVxHedlaV.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.matter b/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.matter index ecb29a7f6a378e..9eddfdffe475d9 100644 --- a/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.matter +++ b/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.matter @@ -544,7 +544,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -559,14 +559,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.zap b/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.zap index 81f57a06c3d6d9..8c71d1a954b375 100644 --- a/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.zap +++ b/examples/chef/devices/rootnode_heatingcoolingunit_ncdGai1E5a.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.matter b/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.matter index 7bdea6102b7ee0..7a45ce19377ed4 100644 --- a/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.matter +++ b/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.matter @@ -418,7 +418,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -433,14 +433,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.zap b/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.zap index a4bb4386b283e2..2a41f49fd56f23 100644 --- a/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.zap +++ b/examples/chef/devices/rootnode_humiditysensor_Xyj4gda6Hb.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.matter b/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.matter index e33407de00c7d2..b49f38eb025857 100644 --- a/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.matter +++ b/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.matter @@ -418,7 +418,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -433,14 +433,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.zap b/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.zap index e8ea73e2600d7e..1ad280a7a693bd 100644 --- a/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.zap +++ b/examples/chef/devices/rootnode_lightsensor_lZQycTFcJK.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.matter b/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.matter index 8ce80dc8b1a2fe..4e218927f80eee 100644 --- a/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.matter +++ b/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.matter @@ -418,7 +418,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -433,14 +433,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.zap b/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.zap index dfe9ef1ae0b3b4..6f86fdbb29054f 100644 --- a/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.zap +++ b/examples/chef/devices/rootnode_occupancysensor_iHyVgifZuo.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter index d5e6b200bf05ad..5c079a5eafd9e2 100644 --- a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter +++ b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter @@ -551,7 +551,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -566,14 +566,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap index 7619f0d3b4a53b..4deb87b2ccef7b 100644 --- a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap +++ b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.matter b/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.matter index b79835b8bf02ca..8dcd107506e0d8 100644 --- a/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.matter +++ b/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.matter @@ -490,7 +490,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -505,14 +505,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.zap b/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.zap index 5bc4338e294bc7..95855e2279edb5 100644 --- a/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.zap +++ b/examples/chef/devices/rootnode_onofflightswitch_FsPlMr090Q.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.matter b/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.matter index 9d6971887247e6..2c7d1b9e16bab8 100644 --- a/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.matter +++ b/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.matter @@ -451,7 +451,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -466,14 +466,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.zap b/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.zap index b1a6670327e29a..075d42554911ee 100644 --- a/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.zap +++ b/examples/chef/devices/rootnode_onoffpluginunit_Wtf8ss5EBY.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.matter b/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.matter index 6b70c03624839b..146e6d58ad75f3 100644 --- a/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.matter +++ b/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.matter @@ -423,7 +423,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -438,14 +438,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.zap b/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.zap index 7e53ac2be19f5b..6062728ccfe563 100644 --- a/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.zap +++ b/examples/chef/devices/rootnode_pressuresensor_s0qC9wLH4k.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_speaker_RpzeXdimqA.matter b/examples/chef/devices/rootnode_speaker_RpzeXdimqA.matter index 577f484f8b2d1c..0c1041f3c77441 100644 --- a/examples/chef/devices/rootnode_speaker_RpzeXdimqA.matter +++ b/examples/chef/devices/rootnode_speaker_RpzeXdimqA.matter @@ -545,7 +545,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -560,14 +560,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_speaker_RpzeXdimqA.zap b/examples/chef/devices/rootnode_speaker_RpzeXdimqA.zap index 393e654f72d615..2e6d8405d6f774 100644 --- a/examples/chef/devices/rootnode_speaker_RpzeXdimqA.zap +++ b/examples/chef/devices/rootnode_speaker_RpzeXdimqA.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.matter b/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.matter index a7ba1b7430bd8a..951abb3781f2b6 100644 --- a/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.matter +++ b/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.matter @@ -418,7 +418,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -433,14 +433,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.zap b/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.zap index ea867b72c9d457..ffe39cbd285b64 100644 --- a/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.zap +++ b/examples/chef/devices/rootnode_temperaturesensor_Qy1zkNW7c3.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.matter b/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.matter index 6d4db69f5679d9..8652dac6608b56 100644 --- a/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.matter +++ b/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.matter @@ -404,7 +404,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -419,14 +419,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.zap b/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.zap index daa586e3d05576..7d40c50e40b1ff 100644 --- a/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.zap +++ b/examples/chef/devices/rootnode_thermostat_bm3fb8dhYi.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.matter b/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.matter index 745b14c784933f..7eb2860ebe40b4 100644 --- a/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.matter +++ b/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.matter @@ -404,7 +404,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -419,14 +419,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.zap b/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.zap index 3faae2686c17b6..c7e89df17bd247 100644 --- a/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.zap +++ b/examples/chef/devices/rootnode_windowcovering_RLCxaGi9Yx.zap @@ -1891,7 +1891,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1907,7 +1907,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/chef/linux/with_pw_rpc.gni b/examples/chef/linux/with_pw_rpc.gni index 416733753a8909..6d90e3da2c7ac7 100644 --- a/examples/chef/linux/with_pw_rpc.gni +++ b/examples/chef/linux/with_pw_rpc.gni @@ -32,6 +32,8 @@ pw_rpc_system_server_BACKEND = "${chip_root}/config/linux/lib/pw_rpc:pw_rpc" dir_pw_third_party_nanopb = "${chip_root}/third_party/nanopb/repo" pw_chrono_SYSTEM_CLOCK_BACKEND = "$dir_pw_chrono_stl:system_clock" pw_sync_MUTEX_BACKEND = "$dir_pw_sync_stl:mutex_backend" +pw_thread_YIELD_BACKEND = "$dir_pw_thread_stl:yield" +pw_thread_SLEEP_BACKEND = "$dir_pw_thread_stl:sleep" pw_build_LINK_DEPS = [ "$dir_pw_assert:impl", diff --git a/examples/chef/sample_app_util/zap_file_parser.py b/examples/chef/sample_app_util/zap_file_parser.py index fddc5c9e8efcd2..6f186e86e27155 100644 --- a/examples/chef/sample_app_util/zap_file_parser.py +++ b/examples/chef/sample_app_util/zap_file_parser.py @@ -35,7 +35,7 @@ import json import os import re -from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union +from typing import Dict, List, Optional, Sequence, TypedDict, Union try: import yaml @@ -154,7 +154,8 @@ def _convert_metadata_to_hashable_digest(metadata_input: Sequence[Dict[str, Endp for cluster_type in ["client_clusters", "server_clusters"]: for cluster_key in list(endpoint_obj[cluster_type].keys()): cluster_id = _get_id(cluster_key) - endpoint_obj[cluster_type][cluster_id] = endpoint_obj[cluster_type].pop(cluster_key) + endpoint_obj[cluster_type][cluster_id] = endpoint_obj[cluster_type].pop( + cluster_key) cluster_obj = endpoint_obj[cluster_type][cluster_id] # Replace attribute names @@ -162,12 +163,14 @@ def _convert_metadata_to_hashable_digest(metadata_input: Sequence[Dict[str, Endp attribute_keys = list(cluster_obj["attributes"]) for attribute_key in attribute_keys: attribute_id = _get_id(attribute_key) - attribute_obj[attribute_id] = attribute_obj.pop(attribute_key) + attribute_obj[attribute_id] = attribute_obj.pop( + attribute_key) # Replace command names if "commands" in cluster_obj: command_keys = cluster_obj["commands"] - cluster_obj["commands"] = [_get_id(x) for x in command_keys] + cluster_obj["commands"] = [ + _get_id(x) for x in command_keys] cluster_obj["commands"].sort() return json.dumps(metadata, sort_keys=True) @@ -277,7 +280,8 @@ def generate_metadata( if not cluster["enabled"]: continue - cluster_ref = _convert_metadata_name(cluster["name"], cluster["code"]) + cluster_ref = _convert_metadata_name( + cluster["name"], cluster["code"]) if include_commands: cluster_obj: ClusterType = {"attributes": {}, "commands": []} @@ -288,7 +292,8 @@ def generate_metadata( attribute_allowed = ( attribute_allow_list is None or str(attribute["code"]) in attribute_allow_list) if attribute["included"] and attribute_allowed: - attribute_ref = _convert_metadata_name(attribute["name"], attribute["code"]) + attribute_ref = _convert_metadata_name( + attribute["name"], attribute["code"]) value = _read_value(attribute["defaultValue"]) cluster_obj["attributes"][attribute_ref] = value @@ -298,7 +303,8 @@ def generate_metadata( if include_commands: for command in cluster["commands"]: - command_ref = _convert_metadata_name(command["name"], command["code"]) + command_ref = _convert_metadata_name( + command["name"], command["code"]) if cluster["side"] == "client" and command["outgoing"] == 1: cluster_obj["commands"].append(command_ref) elif cluster["side"] == "server" and command["incoming"] == 1: diff --git a/examples/chip-tool/commands/clusters/ComplexArgument.h b/examples/chip-tool/commands/clusters/ComplexArgument.h index 0251c5fd6bf8ff..37a219325d66f2 100644 --- a/examples/chip-tool/commands/clusters/ComplexArgument.h +++ b/examples/chip-tool/commands/clusters/ComplexArgument.h @@ -369,6 +369,8 @@ class ComplexArgument virtual ~ComplexArgument() {} virtual CHIP_ERROR Parse(const char * label, const char * json) = 0; + + virtual void Reset() = 0; }; template @@ -402,6 +404,8 @@ class TypedComplexArgument : public ComplexArgument return ComplexArgumentParser::Setup(label, *mRequest, value); } + void Reset() { *mRequest = T(); } + private: T * mRequest; }; diff --git a/examples/chip-tool/commands/common/Command.cpp b/examples/chip-tool/commands/common/Command.cpp index f08d87a2c7ed19..c1d62793024ef7 100644 --- a/examples/chip-tool/commands/common/Command.cpp +++ b/examples/chip-tool/commands/common/Command.cpp @@ -1018,6 +1018,11 @@ void Command::ResetArguments() } vectorArgument->clear(); } + else if (type == ArgumentType::Complex) + { + auto argument = static_cast(arg.value); + argument->Reset(); + } } } } diff --git a/examples/common/tracing/BUILD.gn b/examples/common/tracing/BUILD.gn index d63a4ae84e043c..86ea5a1f63b4ff 100644 --- a/examples/common/tracing/BUILD.gn +++ b/examples/common/tracing/BUILD.gn @@ -26,6 +26,8 @@ source_set("trace_handlers") { deps = [ "${chip_root}/src/lib" ] public_configs = [ ":default_config" ] + + cflags = [ "-Wconversion" ] } source_set("trace_handlers_decoder") { @@ -47,6 +49,8 @@ source_set("trace_handlers_decoder") { deps = [ "${chip_root}/src/lib" ] public_deps = [ "${chip_root}/third_party/jsoncpp" ] + + cflags = [ "-Wconversion" ] } executable("chip-trace-decoder") { @@ -73,4 +77,6 @@ executable("chip-trace-decoder") { "${chip_root}/src/lib", "${chip_root}/third_party/jsoncpp", ] + + cflags = [ "-Wconversion" ] } diff --git a/examples/common/tracing/TraceDecoder.cpp b/examples/common/tracing/TraceDecoder.cpp index eedfd73fb748fd..2bb3c5ee213ce0 100644 --- a/examples/common/tracing/TraceDecoder.cpp +++ b/examples/common/tracing/TraceDecoder.cpp @@ -131,7 +131,7 @@ CHIP_ERROR TraceDecoder::ReadString(const char * str) CHIP_ERROR TraceDecoder::LogJSON(Json::Value & json) { auto protocol = json[kProtocolIdKey].asLargestUInt(); - uint16_t vendorId = protocol >> 16; + uint16_t vendorId = static_cast(protocol >> 16); uint16_t protocolId = protocol & 0xFFFF; if (!mOptions.IsProtocolEnabled(chip::Protocols::Id(chip::VendorId(vendorId), protocolId))) { @@ -213,7 +213,7 @@ CHIP_ERROR TraceDecoder::LogAndConsumeProtocol(Json::Value & json) auto id = json[kProtocolIdKey].asLargestUInt(); auto opcode = static_cast(json[kProtocolCodeKey].asLargestUInt()); - uint16_t vendorId = (id >> 16); + uint16_t vendorId = static_cast(id >> 16); uint16_t protocolId = (id & 0xFFFF); chip::StringBuilderBase builder(protocolInfo, sizeof(protocolInfo)); @@ -228,7 +228,7 @@ CHIP_ERROR TraceDecoder::LogAndConsumeProtocol(Json::Value & json) builder.Add(protocolDetail); builder.Add(" ["); - builder.Add(ToProtocolName(id)); + builder.Add(ToProtocolName(vendorId, protocolId)); builder.Add(" "); memset(protocolDetail, '\0', sizeof(protocolDetail)); @@ -236,7 +236,7 @@ CHIP_ERROR TraceDecoder::LogAndConsumeProtocol(Json::Value & json) builder.Add(protocolDetail); builder.Add(" / "); - builder.Add(ToProtocolMessageTypeName(id, opcode)); + builder.Add(ToProtocolMessageTypeName(vendorId, protocolId, opcode)); builder.Add(" "); memset(protocolDetail, '\0', sizeof(protocolDetail)); @@ -278,11 +278,14 @@ CHIP_ERROR TraceDecoder::MaybeLogAndConsumePayload(Json::Value & json, bool isRe Log("data", payload.c_str()); } - bool shouldDecode = !isResponse || mOptions.mEnableProtocolInteractionModelResponse; - auto payload = json[kPayloadDataKey].asString(); - auto protocolId = json[kProtocolIdKey].asLargestUInt(); - auto protocolCode = json[kProtocolCodeKey].asLargestUInt(); - ReturnErrorOnFailure(LogAsProtocolMessage(protocolId, protocolCode, payload.c_str(), payload.size(), shouldDecode)); + bool shouldDecode = !isResponse || mOptions.mEnableProtocolInteractionModelResponse; + auto payload = json[kPayloadDataKey].asString(); + auto id = json[kProtocolIdKey].asLargestUInt(); + uint16_t vendorId = static_cast(id >> 16); + uint16_t protocolId = (id & 0xFFFF); + auto protocolCode = static_cast(json[kProtocolCodeKey].asLargestUInt()); + ReturnErrorOnFailure( + LogAsProtocolMessage(vendorId, protocolId, protocolCode, payload.c_str(), payload.size(), shouldDecode)); Log(" "); } diff --git a/examples/common/tracing/decoder/TraceDecoderProtocols.cpp b/examples/common/tracing/decoder/TraceDecoderProtocols.cpp index d50ccd77147b8f..e6881bd4361c8c 100644 --- a/examples/common/tracing/decoder/TraceDecoderProtocols.cpp +++ b/examples/common/tracing/decoder/TraceDecoderProtocols.cpp @@ -44,11 +44,11 @@ void ENFORCE_FORMAT(1, 2) TLVPrettyPrinter(const char * aFormat, ...) namespace chip { namespace trace { -const char * ToProtocolName(uint16_t protocolId) +const char * ToProtocolName(uint16_t vendorId, uint16_t protocolId) { const char * name = nullptr; - auto protocol = Protocols::Id(VendorId::Common, protocolId); + auto protocol = Protocols::Id(static_cast(vendorId), protocolId); if (protocol == Protocols::SecureChannel::Id) { name = secure_channel::ToProtocolName(); @@ -77,11 +77,11 @@ const char * ToProtocolName(uint16_t protocolId) return name; } -const char * ToProtocolMessageTypeName(uint16_t protocolId, uint8_t protocolCode) +const char * ToProtocolMessageTypeName(uint16_t vendorId, uint16_t protocolId, uint8_t protocolCode) { const char * name = nullptr; - auto protocol = Protocols::Id(VendorId::Common, protocolId); + auto protocol = Protocols::Id(static_cast(vendorId), protocolId); if (protocol == Protocols::SecureChannel::Id) { name = secure_channel::ToProtocolMessageTypeName(protocolCode); @@ -110,7 +110,7 @@ const char * ToProtocolMessageTypeName(uint16_t protocolId, uint8_t protocolCode return name; } -CHIP_ERROR LogAsProtocolMessage(uint16_t protocolId, uint8_t protocolCode, const char * payload, size_t len, +CHIP_ERROR LogAsProtocolMessage(uint16_t vendorId, uint16_t protocolId, uint8_t protocolCode, const char * payload, size_t len, bool interactionModelResponse) { constexpr uint16_t kMaxPayloadLen = 2048; @@ -120,7 +120,7 @@ CHIP_ERROR LogAsProtocolMessage(uint16_t protocolId, uint8_t protocolCode, const CHIP_ERROR err = CHIP_NO_ERROR; - auto protocol = Protocols::Id(VendorId::Common, protocolId); + auto protocol = Protocols::Id(static_cast(vendorId), protocolId); if (protocol == Protocols::SecureChannel::Id) { err = secure_channel::LogAsProtocolMessage(protocolCode, data, dataLen); diff --git a/examples/common/tracing/decoder/TraceDecoderProtocols.h b/examples/common/tracing/decoder/TraceDecoderProtocols.h index ae387c82e007df..3c9a49234f7a4d 100644 --- a/examples/common/tracing/decoder/TraceDecoderProtocols.h +++ b/examples/common/tracing/decoder/TraceDecoderProtocols.h @@ -26,11 +26,11 @@ namespace chip { namespace trace { -const char * ToProtocolName(uint16_t protocolId); +const char * ToProtocolName(uint16_t vendorId, uint16_t protocolId); -const char * ToProtocolMessageTypeName(uint16_t protocolId, uint8_t protocolCode); +const char * ToProtocolMessageTypeName(uint16_t vendorId, uint16_t protocolId, uint8_t protocolCode); -CHIP_ERROR LogAsProtocolMessage(uint16_t protocolId, uint8_t protocolCode, const char * payload, size_t len, +CHIP_ERROR LogAsProtocolMessage(uint16_t vendorId, uint16_t protocolId, uint8_t protocolCode, const char * payload, size_t len, bool interactionModelResponse); } // namespace trace diff --git a/examples/common/tracing/decoder/logging/Log.cpp b/examples/common/tracing/decoder/logging/Log.cpp index bc3cb74d82f117..4fa5d9dfeb8bfc 100644 --- a/examples/common/tracing/decoder/logging/Log.cpp +++ b/examples/common/tracing/decoder/logging/Log.cpp @@ -36,12 +36,12 @@ void ENFORCE_FORMAT(1, 2) LogFormatted(const char * format, ...) { char buffer[CHIP_CONFIG_LOG_MESSAGE_MAX_SIZE] = {}; - uint8_t indentation = gIndentLevel * kSpacePerIndent; + int indentation = gIndentLevel * kSpacePerIndent; snprintf(buffer, sizeof(buffer), "%*s", indentation, ""); va_list args; va_start(args, format); - vsnprintf(&buffer[indentation], sizeof(buffer) - indentation, format, args); + vsnprintf(&buffer[indentation], sizeof(buffer) - static_cast(indentation), format, args); va_end(args); ChipLogDetail(DataManagement, "%s", buffer); diff --git a/examples/common/tracing/decoder/logging/ToCertificateString.cpp b/examples/common/tracing/decoder/logging/ToCertificateString.cpp index fcc56d9ab50a37..ee2dfe30a9bc10 100644 --- a/examples/common/tracing/decoder/logging/ToCertificateString.cpp +++ b/examples/common/tracing/decoder/logging/ToCertificateString.cpp @@ -19,6 +19,7 @@ #include "ToCertificateString.h" #include +#include #include namespace { @@ -40,6 +41,12 @@ const char * ToCertificate(const chip::ByteSpan & source, chip::MutableCharSpan return destination.data(); } + if (!chip::CanCastTo(source.size())) + { + ChipLogError(DataManagement, "The certificate is too large to do base64 conversion on"); + return destination.data(); + } + size_t base64DataLen = BASE64_ENCODED_LEN(source.size()); size_t bufferLen = base64DataLen + 1; // add one character for null-terminator if (bufferLen + strlen(header) + strlen(footer) > destination.size()) @@ -51,7 +58,7 @@ const char * ToCertificate(const chip::ByteSpan & source, chip::MutableCharSpan chip::Platform::ScopedMemoryBuffer str; str.Alloc(bufferLen); - auto encodedLen = chip::Base64Encode(source.data(), source.size(), str.Get()); + auto encodedLen = chip::Base64Encode(source.data(), static_cast(source.size()), str.Get()); str.Get()[encodedLen] = '\0'; if (IsChipCertificate(source)) @@ -79,7 +86,8 @@ const char * ToCertificate(const chip::ByteSpan & source, chip::MutableCharSpan snprintf(destination.data(), destination.size(), "%s\n", header); for (; inIndex < base64DataLen; inIndex += 64) { - outIndex += snprintf(&destination.data()[outIndex], destination.size() - outIndex, "%.64s\n", &str[inIndex]); + auto charsPrinted = snprintf(&destination.data()[outIndex], destination.size() - outIndex, "%.64s\n", &str[inIndex]); + outIndex += static_cast(charsPrinted); } snprintf(&destination.data()[outIndex], destination.size() - outIndex, "%s", footer); } diff --git a/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.matter b/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.matter index de18597ede5762..7b59e70d22e86c 100644 --- a/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.matter +++ b/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.matter @@ -407,7 +407,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -422,14 +422,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.zap b/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.zap index 8bf1c29f14ecc8..d93ae8c35658a2 100644 --- a/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.zap +++ b/examples/contact-sensor-app/contact-sensor-common/contact-sensor-app.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/dynamic-bridge-app/bridge-common/bridge-app.matter b/examples/dynamic-bridge-app/bridge-common/bridge-app.matter index 31f37a4e061ba2..b84898e28a475f 100644 --- a/examples/dynamic-bridge-app/bridge-common/bridge-app.matter +++ b/examples/dynamic-bridge-app/bridge-common/bridge-app.matter @@ -522,7 +522,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -537,14 +537,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/dynamic-bridge-app/bridge-common/bridge-app.zap b/examples/dynamic-bridge-app/bridge-common/bridge-app.zap index 8170a02def0490..570bfd85a49dc4 100644 --- a/examples/dynamic-bridge-app/bridge-common/bridge-app.zap +++ b/examples/dynamic-bridge-app/bridge-common/bridge-app.zap @@ -1156,7 +1156,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1172,7 +1172,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/dynamic-bridge-app/linux/with_pw_rpc.gni b/examples/dynamic-bridge-app/linux/with_pw_rpc.gni index 416733753a8909..6d90e3da2c7ac7 100644 --- a/examples/dynamic-bridge-app/linux/with_pw_rpc.gni +++ b/examples/dynamic-bridge-app/linux/with_pw_rpc.gni @@ -32,6 +32,8 @@ pw_rpc_system_server_BACKEND = "${chip_root}/config/linux/lib/pw_rpc:pw_rpc" dir_pw_third_party_nanopb = "${chip_root}/third_party/nanopb/repo" pw_chrono_SYSTEM_CLOCK_BACKEND = "$dir_pw_chrono_stl:system_clock" pw_sync_MUTEX_BACKEND = "$dir_pw_sync_stl:mutex_backend" +pw_thread_YIELD_BACKEND = "$dir_pw_thread_stl:yield" +pw_thread_SLEEP_BACKEND = "$dir_pw_thread_stl:sleep" pw_build_LINK_DEPS = [ "$dir_pw_assert:impl", diff --git a/examples/light-switch-app/ameba/main/DeviceCallbacks.cpp b/examples/light-switch-app/ameba/main/DeviceCallbacks.cpp index babcc549eba3bb..cc855031952eda 100644 --- a/examples/light-switch-app/ameba/main/DeviceCallbacks.cpp +++ b/examples/light-switch-app/ameba/main/DeviceCallbacks.cpp @@ -127,9 +127,6 @@ void DeviceCallbacks::PostAttributeChangeCallback(EndpointId endpointId, Cluster void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event) { -#if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR - static bool isOTAInitialized = false; -#endif if (event->InternetConnectivityChange.IPv4 == kConnectivity_Established) { ChipLogProgress(DeviceLayer, "IPv4 Server ready..."); @@ -145,11 +142,10 @@ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event chip::app::DnssdServer::Instance().StartServer(); #if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR // Init OTA requestor only when we have gotten IPv6 address - if (!isOTAInitialized) + if (OTAInitializer::Instance().CheckInit()) { chip::DeviceLayer::SystemLayer().StartTimer(chip::System::Clock::Seconds32(kInitOTARequestorDelaySec), InitOTARequestorHandler, nullptr); - isOTAInitialized = true; } #endif } diff --git a/examples/light-switch-app/light-switch-common/light-switch-app.matter b/examples/light-switch-app/light-switch-common/light-switch-app.matter index b8a654a291b15f..4a92de568eadd7 100644 --- a/examples/light-switch-app/light-switch-common/light-switch-app.matter +++ b/examples/light-switch-app/light-switch-common/light-switch-app.matter @@ -621,7 +621,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -636,14 +636,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/light-switch-app/light-switch-common/light-switch-app.zap b/examples/light-switch-app/light-switch-common/light-switch-app.zap index 49761fdea15576..e38ff2637a6ad9 100644 --- a/examples/light-switch-app/light-switch-common/light-switch-app.zap +++ b/examples/light-switch-app/light-switch-common/light-switch-app.zap @@ -2019,7 +2019,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2035,7 +2035,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lighting-app/ameba/main/DeviceCallbacks.cpp b/examples/lighting-app/ameba/main/DeviceCallbacks.cpp index 957512f727ba1a..d6fd23d9521451 100644 --- a/examples/lighting-app/ameba/main/DeviceCallbacks.cpp +++ b/examples/lighting-app/ameba/main/DeviceCallbacks.cpp @@ -107,10 +107,6 @@ void DeviceCallbacks::DeviceEventCallback(const ChipDeviceEvent * event, intptr_ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event) { -#if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR - static bool isOTAInitialized = false; -#endif - if (event->InternetConnectivityChange.IPv4 == kConnectivity_Established) { printf("IPv4 Server ready..."); @@ -126,11 +122,10 @@ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event chip::app::DnssdServer::Instance().StartServer(); #if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR // Init OTA requestor only when we have gotten IPv6 address - if (!isOTAInitialized) + if (OTAInitializer::Instance().CheckInit()) { chip::DeviceLayer::SystemLayer().StartTimer(chip::System::Clock::Seconds32(kInitOTARequestorDelaySec), InitOTARequestorHandler, nullptr); - isOTAInitialized = true; } #endif } diff --git a/examples/lighting-app/lighting-common/lighting-app.matter b/examples/lighting-app/lighting-common/lighting-app.matter index f4392a598e6445..73f3b486a12dbd 100644 --- a/examples/lighting-app/lighting-common/lighting-app.matter +++ b/examples/lighting-app/lighting-common/lighting-app.matter @@ -575,7 +575,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -590,14 +590,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/lighting-app/lighting-common/lighting-app.zap b/examples/lighting-app/lighting-common/lighting-app.zap index 7547cc26afd149..755f82a37b2630 100644 --- a/examples/lighting-app/lighting-common/lighting-app.zap +++ b/examples/lighting-app/lighting-common/lighting-app.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lighting-app/linux/with_pw_rpc.gni b/examples/lighting-app/linux/with_pw_rpc.gni index 416733753a8909..6d90e3da2c7ac7 100644 --- a/examples/lighting-app/linux/with_pw_rpc.gni +++ b/examples/lighting-app/linux/with_pw_rpc.gni @@ -32,6 +32,8 @@ pw_rpc_system_server_BACKEND = "${chip_root}/config/linux/lib/pw_rpc:pw_rpc" dir_pw_third_party_nanopb = "${chip_root}/third_party/nanopb/repo" pw_chrono_SYSTEM_CLOCK_BACKEND = "$dir_pw_chrono_stl:system_clock" pw_sync_MUTEX_BACKEND = "$dir_pw_sync_stl:mutex_backend" +pw_thread_YIELD_BACKEND = "$dir_pw_thread_stl:yield" +pw_thread_SLEEP_BACKEND = "$dir_pw_thread_stl:sleep" pw_build_LINK_DEPS = [ "$dir_pw_assert:impl", diff --git a/examples/lighting-app/nxp/zap/lighting-on-off.zap b/examples/lighting-app/nxp/zap/lighting-on-off.zap index e7aa91adddd521..d50cbd2c056b6d 100644 --- a/examples/lighting-app/nxp/zap/lighting-on-off.zap +++ b/examples/lighting-app/nxp/zap/lighting-on-off.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.matter b/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.matter index 9783baccbb3679..34ecc1a58681f0 100644 --- a/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.matter +++ b/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.matter @@ -575,7 +575,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -590,14 +590,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.zap b/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.zap index 9d62f2ced7f06c..e047beae16bd01 100644 --- a/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.zap +++ b/examples/lighting-app/silabs/efr32/data_model/lighting-thread-app.zap @@ -1883,7 +1883,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1899,7 +1899,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.matter b/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.matter index 72b3162a9f9d1f..0ca16845849b6a 100644 --- a/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.matter +++ b/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.matter @@ -575,7 +575,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -590,14 +590,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.zap b/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.zap index c6b92d047da3e0..a005282133c0da 100644 --- a/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.zap +++ b/examples/lighting-app/silabs/efr32/data_model/lighting-wifi-app.zap @@ -1909,7 +1909,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1925,7 +1925,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lock-app/lock-common/lock-app.matter b/examples/lock-app/lock-common/lock-app.matter index 1d4b51515a7ea2..3f175fc88bea6a 100644 --- a/examples/lock-app/lock-common/lock-app.matter +++ b/examples/lock-app/lock-common/lock-app.matter @@ -384,7 +384,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -399,14 +399,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/lock-app/lock-common/lock-app.zap b/examples/lock-app/lock-common/lock-app.zap index be3458f81d5d40..11236cb97dedaf 100644 --- a/examples/lock-app/lock-common/lock-app.zap +++ b/examples/lock-app/lock-common/lock-app.zap @@ -1719,7 +1719,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1735,7 +1735,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/lock-app/nxp/zap/lock-app.zap b/examples/lock-app/nxp/zap/lock-app.zap index 8f38946f65f15e..5e68c0346481b1 100644 --- a/examples/lock-app/nxp/zap/lock-app.zap +++ b/examples/lock-app/nxp/zap/lock-app.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/minimal-mdns/server.cpp b/examples/minimal-mdns/server.cpp index 1bee06f8773b3b..44c03c784c4ab9 100644 --- a/examples/minimal-mdns/server.cpp +++ b/examples/minimal-mdns/server.cpp @@ -165,7 +165,7 @@ class ReplyDelegate : public mdns::Minimal::ServerDelegate, public mdns::Minimal mdns::Minimal::ResponseSender * mResponder; const Inet::IPPacketInfo * mCurrentSource = nullptr; - uint32_t mMessageId = 0; + uint16_t mMessageId = 0; }; mdns::Minimal::Server<10 /* endpoints */> gMdnsServer; diff --git a/examples/ota-provider-app/ota-provider-common/ota-provider-app.matter b/examples/ota-provider-app/ota-provider-common/ota-provider-app.matter index 60a37e2d65f110..2bb77fc4bd7a03 100644 --- a/examples/ota-provider-app/ota-provider-common/ota-provider-app.matter +++ b/examples/ota-provider-app/ota-provider-common/ota-provider-app.matter @@ -293,7 +293,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -308,14 +308,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/ota-provider-app/ota-provider-common/ota-provider-app.zap b/examples/ota-provider-app/ota-provider-common/ota-provider-app.zap index 72fbd4acf4d73d..485543b8697751 100644 --- a/examples/ota-provider-app/ota-provider-common/ota-provider-app.zap +++ b/examples/ota-provider-app/ota-provider-common/ota-provider-app.zap @@ -1790,7 +1790,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1806,7 +1806,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/ota-requestor-app/ameba/main/DeviceCallbacks.cpp b/examples/ota-requestor-app/ameba/main/DeviceCallbacks.cpp index e2da60638d3d7d..baa7910e3030bf 100644 --- a/examples/ota-requestor-app/ameba/main/DeviceCallbacks.cpp +++ b/examples/ota-requestor-app/ameba/main/DeviceCallbacks.cpp @@ -127,9 +127,6 @@ void DeviceCallbacks::PostAttributeChangeCallback(EndpointId endpointId, Cluster void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event) { -#if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR - static bool isOTAInitialized = false; -#endif if (event->InternetConnectivityChange.IPv4 == kConnectivity_Established) { ChipLogProgress(DeviceLayer, "IPv4 Server ready..."); @@ -145,11 +142,10 @@ void DeviceCallbacks::OnInternetConnectivityChange(const ChipDeviceEvent * event chip::app::DnssdServer::Instance().StartServer(); #if CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR // Init OTA requestor only when we have gotten IPv6 address - if (!isOTAInitialized) + if (OTAInitializer::Instance().CheckInit()) { chip::DeviceLayer::SystemLayer().StartTimer(chip::System::Clock::Seconds32(kInitOTARequestorDelaySec), InitOTARequestorHandler, nullptr); - isOTAInitialized = true; } #endif } diff --git a/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.matter b/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.matter index 8b2d542399d193..aa541eeaf95d44 100644 --- a/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.matter +++ b/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.matter @@ -468,7 +468,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -483,14 +483,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.zap b/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.zap index ddfb5df3433458..2e385b38a057ec 100644 --- a/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.zap +++ b/examples/ota-requestor-app/ota-requestor-common/ota-requestor-app.zap @@ -1885,7 +1885,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1901,7 +1901,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/placeholder/linux/BUILD.gn b/examples/placeholder/linux/BUILD.gn index 92c568d5ffc744..f2c743a9d7641a 100644 --- a/examples/placeholder/linux/BUILD.gn +++ b/examples/placeholder/linux/BUILD.gn @@ -15,19 +15,10 @@ import("//build_overrides/build.gni") import("//build_overrides/chip.gni") -import("${chip_root}/src/app/chip_data_model.gni") - declare_args() { chip_tests_zap_config = "none" } -chip_data_model("configuration") { - zap_file = "apps/${chip_tests_zap_config}/config.zap" - - zap_pregenerated_dir = "${chip_root}/zzz_generated/placeholder/${chip_tests_zap_config}/zap-generated" - is_server = true -} - config("includes") { include_dirs = [ ".", @@ -41,13 +32,11 @@ executable("chip-${chip_tests_zap_config}") { "AppOptions.cpp", "InteractiveServer.cpp", "main.cpp", - "src/bridged-actions-stub.cpp", - "static-supported-modes-manager.cpp", ] deps = [ - ":configuration", "${chip_root}/examples/common/websocket-server", + "${chip_root}/examples/placeholder/linux/apps/${chip_tests_zap_config}:${chip_tests_zap_config}", "${chip_root}/examples/platform/linux:app-main", "${chip_root}/src/app/tests/suites/commands/delay", "${chip_root}/src/app/tests/suites/commands/discovery", diff --git a/examples/placeholder/linux/apps/app1/BUILD.gn b/examples/placeholder/linux/apps/app1/BUILD.gn new file mode 100644 index 00000000000000..595a0b7563132c --- /dev/null +++ b/examples/placeholder/linux/apps/app1/BUILD.gn @@ -0,0 +1,38 @@ +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build_overrides/chip.gni") +import("${chip_root}/src/app/chip_data_model.gni") + +chip_data_model("configuration") { + zap_file = "config.zap" + + zap_pregenerated_dir = + "${chip_root}/zzz_generated/placeholder/app1/zap-generated" + is_server = true +} + +source_set("app1") { + include_dirs = [ + ".", + "../../include", + ] + + sources = [ + "../../src/bridged-actions-stub.cpp", + "../../static-supported-modes-manager.cpp", + ] + + public_deps = [ ":configuration" ] +} diff --git a/examples/placeholder/linux/apps/app1/config.matter b/examples/placeholder/linux/apps/app1/config.matter index b06b0ded06743a..9e38684a3534f0 100644 --- a/examples/placeholder/linux/apps/app1/config.matter +++ b/examples/placeholder/linux/apps/app1/config.matter @@ -678,7 +678,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -693,14 +693,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/placeholder/linux/apps/app1/config.zap b/examples/placeholder/linux/apps/app1/config.zap index a933ea668120db..a7d0d1f9f7013b 100644 --- a/examples/placeholder/linux/apps/app1/config.zap +++ b/examples/placeholder/linux/apps/app1/config.zap @@ -1226,7 +1226,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "External", "singleton": 0, @@ -1242,7 +1242,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "External", "singleton": 0, diff --git a/examples/placeholder/linux/apps/app2/BUILD.gn b/examples/placeholder/linux/apps/app2/BUILD.gn new file mode 100644 index 00000000000000..7d8f30a8ea83ec --- /dev/null +++ b/examples/placeholder/linux/apps/app2/BUILD.gn @@ -0,0 +1,38 @@ +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build_overrides/chip.gni") +import("${chip_root}/src/app/chip_data_model.gni") + +chip_data_model("configuration") { + zap_file = "config.zap" + + zap_pregenerated_dir = + "${chip_root}/zzz_generated/placeholder/app2/zap-generated" + is_server = true +} + +source_set("app2") { + include_dirs = [ + ".", + "../../include", + ] + + sources = [ + "../../src/bridged-actions-stub.cpp", + "../../static-supported-modes-manager.cpp", + ] + + public_deps = [ ":configuration" ] +} diff --git a/examples/placeholder/linux/apps/app2/config.matter b/examples/placeholder/linux/apps/app2/config.matter index b5fa6943447ee1..aa39ea9b46db01 100644 --- a/examples/placeholder/linux/apps/app2/config.matter +++ b/examples/placeholder/linux/apps/app2/config.matter @@ -667,7 +667,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -682,14 +682,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/placeholder/linux/apps/app2/config.zap b/examples/placeholder/linux/apps/app2/config.zap index a3772221da087e..ea791be8d894dd 100644 --- a/examples/placeholder/linux/apps/app2/config.zap +++ b/examples/placeholder/linux/apps/app2/config.zap @@ -1342,7 +1342,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "External", "singleton": 0, @@ -1358,7 +1358,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "External", "singleton": 0, diff --git a/examples/platform/ameba/ota/OTAInitializer.cpp b/examples/platform/ameba/ota/OTAInitializer.cpp index 5f2bcaab0b9b21..e1ea16197f0a40 100644 --- a/examples/platform/ameba/ota/OTAInitializer.cpp +++ b/examples/platform/ameba/ota/OTAInitializer.cpp @@ -36,16 +36,18 @@ AmebaOTAImageProcessor gImageProcessor; extern "C" void amebaQueryImageCmdHandler() { ChipLogProgress(DeviceLayer, "Calling amebaQueryImageCmdHandler"); - PlatformMgr().ScheduleWork([](intptr_t) { GetRequestorInstance()->TriggerImmediateQuery(); }); + if (OTAInitializer::Instance().CheckInit()) + PlatformMgr().ScheduleWork([](intptr_t) { GetRequestorInstance()->TriggerImmediateQuery(); }); } extern "C" void amebaApplyUpdateCmdHandler() { ChipLogProgress(DeviceLayer, "Calling amebaApplyUpdateCmdHandler"); - PlatformMgr().ScheduleWork([](intptr_t) { GetRequestorInstance()->ApplyUpdate(); }); + if (OTAInitializer::Instance().CheckInit()) + PlatformMgr().ScheduleWork([](intptr_t) { GetRequestorInstance()->ApplyUpdate(); }); } -void OTAInitializer::InitOTARequestor(void) +void OTAInitializer::InitOTARequestor() { SetRequestorInstance(&gRequestorCore); gRequestorStorage.Init(chip::Server::GetInstance().GetPersistentStorage()); @@ -55,4 +57,10 @@ void OTAInitializer::InitOTARequestor(void) // Connect the Downloader and Image Processor objects gDownloader.SetImageProcessorDelegate(&gImageProcessor); gRequestorUser.Init(&gRequestorCore, &gImageProcessor); + initialized = true; +} + +bool OTAInitializer::CheckInit() +{ + return initialized; } diff --git a/examples/platform/ameba/ota/OTAInitializer.h b/examples/platform/ameba/ota/OTAInitializer.h index f678c80a780c00..26e3bc4aaca849 100644 --- a/examples/platform/ameba/ota/OTAInitializer.h +++ b/examples/platform/ameba/ota/OTAInitializer.h @@ -24,4 +24,8 @@ class OTAInitializer return sInitOTA; } void InitOTARequestor(void); + bool CheckInit(void); + +private: + bool initialized = false; }; diff --git a/examples/platform/linux/Options.h b/examples/platform/linux/Options.h index 3f15872f0f190d..258ea826bb5c22 100644 --- a/examples/platform/linux/Options.h +++ b/examples/platform/linux/Options.h @@ -48,11 +48,11 @@ struct LinuxDeviceOptions bool mWiFi = false; bool mThread = false; #if CHIP_DEVICE_CONFIG_ENABLE_BOTH_COMMISSIONER_AND_COMMISSIONEE || CHIP_DEVICE_ENABLE_PORT_PARAMS - uint32_t securedDevicePort = CHIP_PORT; - uint32_t unsecuredCommissionerPort = CHIP_UDC_PORT; + uint16_t securedDevicePort = CHIP_PORT; + uint16_t unsecuredCommissionerPort = CHIP_UDC_PORT; #endif // CHIP_DEVICE_CONFIG_ENABLE_BOTH_COMMISSIONER_AND_COMMISSIONEE #if CHIP_DEVICE_CONFIG_ENABLE_BOTH_COMMISSIONER_AND_COMMISSIONEE - uint32_t securedCommissionerPort = CHIP_PORT + 12; // TODO: why + 12? + uint16_t securedCommissionerPort = CHIP_PORT + 12; // TODO: why + 12? #endif // CHIP_DEVICE_CONFIG_ENABLE_BOTH_COMMISSIONER_AND_COMMISSIONEE const char * command = nullptr; const char * PICS = nullptr; diff --git a/examples/platform/nxp/k32w/k32w0/scripts/detokenizer.py b/examples/platform/nxp/k32w/k32w0/scripts/detokenizer.py index 4ad1436a71fc8b..a4035d0655baa1 100644 --- a/examples/platform/nxp/k32w/k32w0/scripts/detokenizer.py +++ b/examples/platform/nxp/k32w/k32w0/scripts/detokenizer.py @@ -53,7 +53,7 @@ def decode_string(tstr, detok): if s.find('$') == 0: return None return s - except: + except ValueError: return None @@ -88,7 +88,7 @@ def decode_serial(serialport, outfile, database): print(line, file=sys.stdout) if output: print(line, file=output) - except: + except Exception: print("Serial error or program closed", file=sys.stderr) if output: @@ -120,7 +120,7 @@ def decode_file(infile, outfile, database): # ascii decode line # serial terminals may include non ascii characters line = line.decode('ascii').strip() - except: + except Exception: continue # find token start and detokenize idx = line.rfind(']') diff --git a/examples/platform/silabs/efr32/TemperatureSensor.cpp b/examples/platform/silabs/efr32/TemperatureSensor.cpp index 69ba106fd69872..d7e13e19932526 100644 --- a/examples/platform/silabs/efr32/TemperatureSensor.cpp +++ b/examples/platform/silabs/efr32/TemperatureSensor.cpp @@ -19,31 +19,44 @@ #include "TemperatureSensor.h" -#ifdef __cplusplus -extern "C" { -#endif -// This is a C implementation. Need the ifdef __cplusplus else we get linking issues -#include "sl_sensor_rht.h" - -#ifdef __cplusplus -} -#endif +#include "sl_board_control.h" +#include "sl_i2cspm_instances.h" +#include "sl_si70xx.h" namespace TemperatureSensor { constexpr uint16_t kSensorTemperatureOffset = 800; +static bool initialized = false; sl_status_t Init() { - return sl_sensor_rht_init(); + sl_status_t status; + sl_i2cspm_t * rht_sensor = sl_i2cspm_sensor; + (void) sl_board_enable_sensor(SL_BOARD_SENSOR_RHT); + + status = sl_si70xx_init(rht_sensor, SI7021_ADDR); + initialized = (SL_STATUS_OK == status); + return status; } sl_status_t GetTemp(uint32_t * relativeHumidity, int16_t * temperature) { + if (!initialized) + { + return SL_STATUS_NOT_INITIALIZED; + } + // Sensor resolution 0.001 C // DataModel resolution 0.01 C - int32_t temp; - sl_status_t status = sl_sensor_rht_get(relativeHumidity, &temp); - *temperature = static_cast(temp / 10) - kSensorTemperatureOffset; + sl_status_t status; + sl_i2cspm_t * rht_sensor = sl_i2cspm_sensor; + int32_t temp = 0; + status = sl_si70xx_measure_rh_and_temp(rht_sensor, SI7021_ADDR, relativeHumidity, &temp); + + if (temperature != nullptr) + { + *temperature = static_cast(temp / 10) - kSensorTemperatureOffset; + } + return status; } }; // namespace TemperatureSensor diff --git a/examples/providers/DeviceInfoProviderImpl.cpp b/examples/providers/DeviceInfoProviderImpl.cpp index 0e731492a91ba2..5524b770c0585a 100644 --- a/examples/providers/DeviceInfoProviderImpl.cpp +++ b/examples/providers/DeviceInfoProviderImpl.cpp @@ -317,40 +317,40 @@ bool DeviceInfoProviderImpl::SupportedCalendarTypesIteratorImpl::Next(CalendarTy switch (mIndex) { case 0: - output = app::Clusters::TimeFormatLocalization::CalendarType::kBuddhist; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kBuddhist; break; case 1: - output = app::Clusters::TimeFormatLocalization::CalendarType::kChinese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kChinese; break; case 2: - output = app::Clusters::TimeFormatLocalization::CalendarType::kCoptic; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kCoptic; break; case 3: - output = app::Clusters::TimeFormatLocalization::CalendarType::kEthiopian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kEthiopian; break; case 4: - output = app::Clusters::TimeFormatLocalization::CalendarType::kGregorian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kGregorian; break; case 5: - output = app::Clusters::TimeFormatLocalization::CalendarType::kHebrew; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kHebrew; break; case 6: - output = app::Clusters::TimeFormatLocalization::CalendarType::kIndian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIndian; break; case 7: - output = app::Clusters::TimeFormatLocalization::CalendarType::kJapanese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kJapanese; break; case 8: - output = app::Clusters::TimeFormatLocalization::CalendarType::kKorean; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kKorean; break; case 9: - output = app::Clusters::TimeFormatLocalization::CalendarType::kPersian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kPersian; break; case 10: - output = app::Clusters::TimeFormatLocalization::CalendarType::kTaiwanese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kTaiwanese; break; case 11: - output = app::Clusters::TimeFormatLocalization::CalendarType::kIslamic; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIslamic; break; default: err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND; diff --git a/examples/pump-app/pump-common/pump-app.zap b/examples/pump-app/pump-common/pump-app.zap index 07421d70731f56..e4d2ea5488b764 100644 --- a/examples/pump-app/pump-common/pump-app.zap +++ b/examples/pump-app/pump-common/pump-app.zap @@ -2083,7 +2083,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2099,7 +2099,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/pump-controller-app/pump-controller-common/pump-controller-app.zap b/examples/pump-controller-app/pump-controller-common/pump-controller-app.zap index fcec2e93cbd457..2b88845ed5cc26 100644 --- a/examples/pump-controller-app/pump-controller-common/pump-controller-app.zap +++ b/examples/pump-controller-app/pump-controller-common/pump-controller-app.zap @@ -2031,7 +2031,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2047,7 +2047,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/shell/shell_common/BUILD.gn b/examples/shell/shell_common/BUILD.gn index d601f0b54764b8..efd5d74944a4e6 100644 --- a/examples/shell/shell_common/BUILD.gn +++ b/examples/shell/shell_common/BUILD.gn @@ -76,4 +76,6 @@ static_library("shell_common") { } public_configs = [ ":shell_common_config" ] + + cflags = [ "-Wconversion" ] } diff --git a/examples/shell/shell_common/cmd_server.cpp b/examples/shell/shell_common/cmd_server.cpp index 46f95de11b2127..d18eb88cf7a1b0 100644 --- a/examples/shell/shell_common/cmd_server.cpp +++ b/examples/shell/shell_common/cmd_server.cpp @@ -170,7 +170,7 @@ static CHIP_ERROR CmdAppServerClusters(int argc, char ** argv) { bool server = true; - for (int i = 0; i < emberAfEndpointCount(); i++) + for (uint16_t i = 0; i < emberAfEndpointCount(); i++) { EndpointId endpoint = emberAfEndpointFromIndex(i); @@ -190,7 +190,7 @@ static CHIP_ERROR CmdAppServerClusters(int argc, char ** argv) static CHIP_ERROR CmdAppServerEndpoints(int argc, char ** argv) { - for (int i = 0; i < emberAfEndpointCount(); i++) + for (uint16_t i = 0; i < emberAfEndpointCount(); i++) { EndpointId endpoint = emberAfEndpointFromIndex(i); diff --git a/examples/temperature-measurement-app/esp32/main/CMakeLists.txt b/examples/temperature-measurement-app/esp32/main/CMakeLists.txt index 8e41f8f6b6eb35..73636f69b2a9e9 100644 --- a/examples/temperature-measurement-app/esp32/main/CMakeLists.txt +++ b/examples/temperature-measurement-app/esp32/main/CMakeLists.txt @@ -78,8 +78,8 @@ idf_component_register(PRIV_INCLUDE_DIRS ${PRIV_INCLUDE_DIRS_LIST} PRIV_REQUIRES ${PRIV_REQUIRES_LIST}) include("${CHIP_ROOT}/build/chip/esp32/esp32_codegen.cmake") -chip_app_component_codegen("${CHIP_ROOT}/examples/temperature-measurement-app/esp32/main/temperature-measurement.matter") -chip_app_component_zapgen("${CHIP_ROOT}/examples/temperature-measurement-app/esp32/main/temperature-measurement.zap") +chip_app_component_codegen("${CHIP_ROOT}/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.matter") +chip_app_component_zapgen("${CHIP_ROOT}/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.zap") set_property(TARGET ${COMPONENT_LIB} PROPERTY CXX_STANDARD 17) target_compile_options(${COMPONENT_LIB} PRIVATE "-DCHIP_HAVE_CONFIG_H") diff --git a/examples/temperature-measurement-app/telink/.gitignore b/examples/temperature-measurement-app/telink/.gitignore new file mode 100644 index 00000000000000..84c048a73cc2e5 --- /dev/null +++ b/examples/temperature-measurement-app/telink/.gitignore @@ -0,0 +1 @@ +/build/ diff --git a/examples/temperature-measurement-app/telink/CMakeLists.txt b/examples/temperature-measurement-app/telink/CMakeLists.txt new file mode 100644 index 00000000000000..04b3803882cc95 --- /dev/null +++ b/examples/temperature-measurement-app/telink/CMakeLists.txt @@ -0,0 +1,66 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +cmake_minimum_required(VERSION 3.13.1) + +set(BOARD tlsr9518adk80d) + +get_filename_component(CHIP_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/third_party/connectedhomeip REALPATH) +get_filename_component(TELINK_COMMON ${CHIP_ROOT}/examples/platform/telink REALPATH) +get_filename_component(GEN_DIR ${CHIP_ROOT}/zzz_generated/ REALPATH) + +set(CONF_FILE ${CHIP_ROOT}/config/telink/app/zephyr.conf prj.conf) + +# Load NCS/Zephyr build system +list(APPEND ZEPHYR_EXTRA_MODULES ${CHIP_ROOT}/config/telink/chip-module) +find_package(Zephyr HINTS $ENV{ZEPHYR_BASE}) + +project(chip-telink-temperature-measurement-example) + +include(${CHIP_ROOT}/config/telink/app/enable-gnu-std.cmake) +include(${CHIP_ROOT}/src/app/chip_data_model.cmake) + +target_compile_options(app PRIVATE -fpermissive) + +target_include_directories(app PRIVATE + include + ${GEN_DIR}/app-common + ${GEN_DIR}/temperature-measurement-app + ${TELINK_COMMON}/util/include + ${TELINK_COMMON}/app/include + ) + +add_definitions( + "-DCHIP_ADDRESS_RESOLVE_IMPL_INCLUDE_HEADER=" +) + +target_sources(app PRIVATE + src/AppTask.cpp + src/SensorManager.cpp + src/main.cpp + ${TELINK_COMMON}/util/src/LEDWidget.cpp + ${TELINK_COMMON}/util/src/ButtonManager.cpp + ${TELINK_COMMON}/util/src/ThreadUtil.cpp + ${TELINK_COMMON}/util/src/PWMDevice.cpp + ) + +chip_configure_data_model(app + INCLUDE_SERVER + ZAP_FILE ${CMAKE_CURRENT_SOURCE_DIR}/../temperature-measurement-common/temperature-measurement.zap +) + +if(CONFIG_CHIP_OTA_REQUESTOR) + target_sources(app PRIVATE ${TELINK_COMMON}/util/src/OTAUtil.cpp) +endif() diff --git a/examples/temperature-measurement-app/telink/README.md b/examples/temperature-measurement-app/telink/README.md new file mode 100644 index 00000000000000..68c79aabd1b2f4 --- /dev/null +++ b/examples/temperature-measurement-app/telink/README.md @@ -0,0 +1,163 @@ +# Matter Telink Temperature Measurement Example Application + +The Telink Temperature Measurement Example demonstrates getting simulated data +from temperature sensor. In further releases the real sensor handling will be +implemented along. It uses buttons to test changing the device states and LEDs +to show the state of these changes. You can use this example as a reference for +creating your own application. + +![Telink B91 EVK](http://wiki.telink-semi.cn/wiki/assets/Hardware/B91_Generic_Starter_Kit_Hardware_Guide/connection_chart.png) + +## Build and flash + +1. Pull docker image from repository: + + ```bash + $ docker pull connectedhomeip/chip-build-telink:latest + ``` + +1. Run docker container: + + ```bash + $ docker run -it --rm -v ${CHIP_BASE}:/root/chip -v /dev/bus/usb:/dev/bus/usb --device-cgroup-rule "c 189:* rmw" connectedhomeip/chip-build-telink:latest + ``` + + here `${CHIP_BASE}` is directory which contains CHIP repo files **!!!Pay + attention that OUTPUT_DIR should contains ABSOLUTE path to output dir** + +1. Activate the build environment: + + ```bash + $ source ./scripts/activate.sh + ``` + +1. In the example dir run: + + ```bash + $ west build + ``` + +1. Flash binary: + + ``` + $ west flash --erase + ``` + +## Usage + +### UART + +To get output from device, connect UART to following pins: + +| Name | Pin | +| :--: | :---------------------------- | +| RX | PB3 (pin 17 of J34 connector) | +| TX | PB2 (pin 16 of J34 connector) | +| GND | GND | + +### Buttons + +The following buttons are available on **tlsr9518adk80d** board: + +| Name | Function | Description | +| :------- | :--------------------- | :----------------------------------------------------------------------------------------------------- | +| Button 1 | Factory reset | Perform factory reset to forget currently commissioned Thread network and back to uncommissioned state | +| Button 2 | NA | NA | +| Button 3 | Thread start | Commission thread with static credentials and enables the Thread on device | +| Button 4 | Open commission window | The button is opening commissioning window to perform commissioning over BLE | + +### LEDs + +#### Indicate current state of Thread network + +**Red** LED indicates current state of Thread network. It is able to be in +following states: + +| State | Description | +| :-------------------------- | :--------------------------------------------------------------------------- | +| Blinks with short pulses | Device is not commissioned to Thread, Thread is disabled | +| Blinks with frequent pulses | Device is commissioned, Thread enabled. Device trying to JOIN thread network | +| Blinks with wide pulses | Device commissioned and joined to thread network as CHILD | + +### CHIP tool commands + +1. Build + [chip-tool cli](https://github.com/project-chip/connectedhomeip/blob/master/examples/chip-tool/README.md) + +2. Pair with device + + ``` + ${CHIP_TOOL_DIR}/chip-tool pairing ble-thread ${NODE_ID} hex:${DATASET} ${PIN_CODE} ${DISCRIMINATOR} + ``` + + Example: + + ``` + ./chip-tool pairing ble-thread 1234 hex:0e080000000000010000000300000f35060004001fffe0020811111111222222220708fd61f77bd3df233e051000112233445566778899aabbccddeeff030e4f70656e54687265616444656d6f010212340410445f2b5ca6f2a93a55ce570a70efeecb0c0402a0fff8 20202021 3840 + ``` + +### OTA with Linux OTA Provider + +OTA feature enabled by default only for ota-requestor-app example. To enable OTA +feature for another Telink example: + +- set CONFIG_CHIP_OTA_REQUESTOR=y in corresponding "prj.conf" configuration + file. + +After build application with enabled OTA feature, use next binary files: + +- zephyr.bin - main binary to flash PCB (Use 2MB PCB). +- zephyr-ota.bin - binary for OTA Provider + +All binaries has the same SW version. To test OTA “zephyr-ota.bin” should have +higher SW version than base SW. Set CONFIG_CHIP_DEVICE_SOFTWARE_VERSION=2 in +corresponding “prj.conf” configuration file. + +Usage of OTA: + +- Build the [Linux OTA Provider](../../ota-provider-app/linux) + + ``` + ./scripts/examples/gn_build_example.sh examples/ota-provider-app/linux out/ota-provider-app chip_config_network_layer_ble=false + ``` + +- Run the Linux OTA Provider with OTA image. + + ``` + ./chip-ota-provider-app -f zephyr-ota.bin + ``` + +- Provision the Linux OTA Provider using chip-tool + + ``` + ./chip-tool pairing onnetwork ${OTA_PROVIDER_NODE_ID} 20202021 + ``` + + here: + + - \${OTA_PROVIDER_NODE_ID} is the node id of Linux OTA Provider + +- Configure the ACL of the ota-provider-app to allow access + + ``` + ./chip-tool accesscontrol write acl '[{"fabricIndex": 1, "privilege": 5, "authMode": 2, "subjects": [112233], "targets": null}, {"fabricIndex": 1, "privilege": 3, "authMode": 2, "subjects": null, "targets": null}]' ${OTA_PROVIDER_NODE_ID} 0 + ``` + + here: + + - \${OTA_PROVIDER_NODE_ID} is the node id of Linux OTA Provider + +- Use the chip-tool to announce the ota-provider-app to start the OTA process + + ``` + ./chip-tool otasoftwareupdaterequestor announce-ota-provider ${OTA_PROVIDER_NODE_ID} 0 0 0 ${DEVICE_NODE_ID} 0 + ``` + + here: + + - \${OTA_PROVIDER_NODE_ID} is the node id of Linux OTA Provider + - \${DEVICE_NODE_ID} is the node id of paired device + +Once the transfer is complete, OTA requestor sends ApplyUpdateRequest command to +OTA provider for applying the image. Device will restart on successful +application of OTA image. diff --git a/examples/temperature-measurement-app/telink/include/AppConfig.h b/examples/temperature-measurement-app/telink/include/AppConfig.h new file mode 100644 index 00000000000000..f3daa577c44063 --- /dev/null +++ b/examples/temperature-measurement-app/telink/include/AppConfig.h @@ -0,0 +1,33 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +// ---- Temperature measurement Example App Config ---- + +// Buttons config +#define BUTTON_PORT DEVICE_DT_GET(DT_NODELABEL(gpioc)) + +#define BUTTON_PIN_1 2 +#define BUTTON_PIN_3 3 +#define BUTTON_PIN_4 1 +#define BUTTON_PIN_2 0 + +// LEDs config +#define LEDS_PORT DEVICE_DT_GET(DT_NODELABEL(gpiob)) +#define SYSTEM_STATE_LED 7 diff --git a/examples/temperature-measurement-app/telink/include/AppEvent.h b/examples/temperature-measurement-app/telink/include/AppEvent.h new file mode 100644 index 00000000000000..d38fb150212a85 --- /dev/null +++ b/examples/temperature-measurement-app/telink/include/AppEvent.h @@ -0,0 +1,57 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +struct AppEvent; +typedef void (*EventHandler)(AppEvent *); + +class LEDWidget; + +struct AppEvent +{ + enum AppEventTypes + { + kEventType_Button = 0, + kEventType_Timer, + kEventType_UpdateLedState, + kEventType_Install, + }; + + uint16_t Type; + + union + { + struct + { + uint8_t Action; + } ButtonEvent; + struct + { + void * Context; + } TimerEvent; + struct + { + LEDWidget * LedWidget; + } UpdateLedStateEvent; + }; + + EventHandler Handler; +}; diff --git a/examples/temperature-measurement-app/telink/include/AppTask.h b/examples/temperature-measurement-app/telink/include/AppTask.h new file mode 100644 index 00000000000000..b67d2ee6f1944a --- /dev/null +++ b/examples/temperature-measurement-app/telink/include/AppTask.h @@ -0,0 +1,85 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "AppConfig.h" +#include "AppEvent.h" +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED +#include "LEDWidget.h" +#endif +#include "PWMDevice.h" +#include + +#if CONFIG_CHIP_FACTORY_DATA +#include +#endif + +#include + +struct k_timer; + +class AppTask +{ +public: + CHIP_ERROR StartApp(void); + + void SetInitiateAction(PWMDevice::Action_t aAction, int32_t aActor, uint8_t * value); + void PostEvent(AppEvent * aEvent); + +private: + friend AppTask & GetAppTask(void); + CHIP_ERROR Init(void); + + void DispatchEvent(AppEvent * event); + +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED + static void UpdateLedStateEventHandler(AppEvent * aEvent); + static void LEDStateUpdateHandler(LEDWidget * ledWidget); + static void UpdateStatusLED(); +#endif + static void FactoryResetButtonEventHandler(void); + static void StartThreadButtonEventHandler(void); + static void StartBleAdvButtonEventHandler(void); + + static void ChipEventHandler(const chip::DeviceLayer::ChipDeviceEvent * event, intptr_t arg); + + static void TemperatureMeasurementTimerTimeoutCallback(k_timer * timer); + static void TemperatureMeasurementTimerEventHandler(AppEvent * aEvent); + + static void FactoryResetTimerTimeoutCallback(k_timer * timer); + static void FactoryResetTimerEventHandler(AppEvent * aEvent); + static void FactoryResetHandler(AppEvent * aEvent); + static void StartThreadHandler(AppEvent * aEvent); + static void StartBleAdvHandler(AppEvent * aEvent); + + static void InitButtons(void); + + static void ThreadProvisioningHandler(const chip::DeviceLayer::ChipDeviceEvent * event, intptr_t arg); + + static AppTask sAppTask; + +#if CONFIG_CHIP_FACTORY_DATA + chip::DeviceLayer::FactoryDataProvider mFactoryDataProvider; +#endif +}; + +inline AppTask & GetAppTask(void) +{ + return AppTask::sAppTask; +} diff --git a/examples/temperature-measurement-app/telink/include/CHIPProjectConfig.h b/examples/temperature-measurement-app/telink/include/CHIPProjectConfig.h new file mode 100644 index 00000000000000..412932a59c3726 --- /dev/null +++ b/examples/temperature-measurement-app/telink/include/CHIPProjectConfig.h @@ -0,0 +1,39 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * Example project configuration file for CHIP. + * + * This is a place to put application or project-specific overrides + * to the default configuration values for general CHIP features. + * + */ + +#pragma once + +// Use a default pairing code if one hasn't been provisioned in flash. +#define CHIP_DEVICE_CONFIG_USE_TEST_SETUP_PIN_CODE 20202021 +#define CHIP_DEVICE_CONFIG_USE_TEST_SETUP_DISCRIMINATOR 0xF00 + +/** + * CHIP_SYSTEM_CONFIG_PACKETBUFFER_POOL_SIZE + * + * Reduce packet buffer pool size to 8 (default 15) to reduce ram consumption + */ +#define CHIP_SYSTEM_CONFIG_PACKETBUFFER_POOL_SIZE 8 diff --git a/examples/temperature-measurement-app/telink/include/SensorManager.h b/examples/temperature-measurement-app/telink/include/SensorManager.h new file mode 100644 index 00000000000000..b7059808601010 --- /dev/null +++ b/examples/temperature-measurement-app/telink/include/SensorManager.h @@ -0,0 +1,54 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include + +#include "AppEvent.h" + +#include +#include + +class SensorManager +{ +public: + CHIP_ERROR Init(); + + int16_t GetMeasuredValue(); + int16_t GetMinMeasuredValue(); + int16_t GetMaxMeasuredValue(); + +private: + friend SensorManager & SensorMgr(); + + // Reads new generated sensor value, stores it, and updates local temperature attribute + static int16_t SensorEventHandler(); + + int16_t mMeasuredTempCelsius; + int16_t mMinMeasuredTempCelsius = -40; + int16_t mMaxMeasuredTempCelsius = 120; + + static SensorManager sSensorManager; +}; + +inline SensorManager & SensorMgr() +{ + return SensorManager::sSensorManager; +} diff --git a/examples/temperature-measurement-app/telink/prj.conf b/examples/temperature-measurement-app/telink/prj.conf new file mode 100644 index 00000000000000..57d38353f586fb --- /dev/null +++ b/examples/temperature-measurement-app/telink/prj.conf @@ -0,0 +1,79 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This sample uses sample-defaults.conf to set options common for all +# samples. This file should contain only options specific for this sample +# or overrides of default values. + +# enable GPIO +CONFIG_GPIO=y + +# enable PWM +CONFIG_PWM=y + +# OpenThread configs +CONFIG_OPENTHREAD_MTD=y +CONFIG_OPENTHREAD_FTD=n +CONFIG_CHIP_ENABLE_SLEEPY_END_DEVICE_SUPPORT=n +CONFIG_CHIP_SED_IDLE_INTERVAL=200 +CONFIG_CHIP_THREAD_SSED=n + +# Default OpenThread network settings +CONFIG_OPENTHREAD_PANID=4660 +CONFIG_OPENTHREAD_CHANNEL=15 +CONFIG_OPENTHREAD_NETWORK_NAME="OpenThreadDemo" +CONFIG_OPENTHREAD_XPANID="11:11:11:11:22:22:22:22" + +# Disable Matter OTA DFU +CONFIG_CHIP_OTA_REQUESTOR=n + +# CHIP configuration +CONFIG_CHIP_PROJECT_CONFIG="include/CHIPProjectConfig.h" +CONFIG_CHIP_OPENTHREAD_CONFIG="../../platform/telink/project_include/OpenThreadConfig.h" + +CONFIG_CHIP_DEVICE_VENDOR_ID=65521 +# 32781 == 0x800D (example temperature-measurement-app) +CONFIG_CHIP_DEVICE_PRODUCT_ID=32781 +CONFIG_CHIP_DEVICE_TYPE=65535 + +CONFIG_CHIP_DEVICE_SOFTWARE_VERSION=1 +CONFIG_CHIP_DEVICE_SOFTWARE_VERSION_STRING="2023" + +# Enable CHIP pairing automatically on application start. +CONFIG_CHIP_ENABLE_PAIRING_AUTOSTART=y + +# CHIP shell +CONFIG_CHIP_LIB_SHELL=n + +# Disable factory data support. +CONFIG_CHIP_FACTORY_DATA=n +CONFIG_CHIP_FACTORY_DATA_BUILD=n +CONFIG_CHIP_FACTORY_DATA_MERGE_WITH_FIRMWARE=n +CONFIG_CHIP_CERTIFICATION_DECLARATION_STORAGE=n + +# Enable Button IRQ mode. The poling mode is used by default. +CONFIG_CHIP_BUTTON_MANAGER_IRQ_MODE=n + +# Disable Status LED. +CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED=y + +# Enable Power Management +CONFIG_PM=n +CONFIG_PM_DEVICE=n + +# Custom RF power values +CONFIG_B91_BLE_CTRL_RF_POWER_P9P11DBM=y +CONFIG_IEEE802154_B91_CUSTOM_RF_POWER=9 diff --git a/examples/temperature-measurement-app/telink/src/AppTask.cpp b/examples/temperature-measurement-app/telink/src/AppTask.cpp new file mode 100644 index 00000000000000..9d84f94fa98d9b --- /dev/null +++ b/examples/temperature-measurement-app/telink/src/AppTask.cpp @@ -0,0 +1,481 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "AppTask.h" + +#include "AppConfig.h" +#include "AppEvent.h" +#include "ButtonManager.h" +#include "SensorManager.h" + +#include "ThreadUtil.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#if CONFIG_CHIP_OTA_REQUESTOR +#include "OTAUtil.h" +#endif + +#include +#include + +#include + +#if CONFIG_CHIP_LIB_SHELL +#include +#include + +static int cmd_telink_reboot(const struct shell * shell, size_t argc, char ** argv) +{ + ARG_UNUSED(argc); + ARG_UNUSED(argv); + + shell_print(shell, "Performing board reboot..."); + sys_reboot(); +} + +SHELL_STATIC_SUBCMD_SET_CREATE(sub_telink, SHELL_CMD(reboot, NULL, "Reboot board command", cmd_telink_reboot), + SHELL_SUBCMD_SET_END); +SHELL_CMD_REGISTER(telink, &sub_telink, "Telink commands", NULL); +#endif // CONFIG_CHIP_LIB_SHELL + +LOG_MODULE_DECLARE(app, CONFIG_CHIP_APP_LOG_LEVEL); + +using namespace ::chip; +using namespace ::chip::app; +using namespace ::chip::Credentials; +using namespace ::chip::DeviceLayer; + +namespace { +constexpr int kFactoryResetCalcTimeout = 3000; +constexpr int kFactoryResetTriggerCntr = 3; +constexpr int kAppEventQueueSize = 10; +constexpr uint8_t kButtonPushEvent = 1; +constexpr uint8_t kButtonReleaseEvent = 0; +constexpr EndpointId kEndpointId = 1; +constexpr uint8_t kDefaultMinLevel = 0; +constexpr uint8_t kDefaultMaxLevel = 254; + +#if CONFIG_CHIP_FACTORY_DATA +// NOTE! This key is for test/certification only and should not be available in production devices! +uint8_t sTestEventTriggerEnableKey[TestEventTriggerDelegate::kEnableKeyLength] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; +#endif + +K_MSGQ_DEFINE(sAppEventQueue, sizeof(AppEvent), kAppEventQueueSize, alignof(AppEvent)); +k_timer sFactoryResetTimer; +uint8_t sFactoryResetCntr = 0; + +k_timer sTemperatureMeasurementTimer; +constexpr uint16_t kSensorTimerPeriodMs = 5000; // 5s timer period + +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED +LEDWidget sStatusLED; +#endif + +Button sFactoryResetButton; +Button sThreadStartButton; +Button sBleAdvStartButton; + +bool sIsThreadProvisioned = false; +bool sIsThreadEnabled = false; +bool sIsThreadAttached = false; +bool sHaveBLEConnections = false; + +chip::DeviceLayer::DeviceInfoProviderImpl gExampleDeviceInfoProvider; + +} // namespace + +AppTask AppTask::sAppTask; + +class AppFabricTableDelegate : public FabricTable::Delegate +{ + void OnFabricRemoved(const FabricTable & fabricTable, FabricIndex fabricIndex) + { + if (chip::Server::GetInstance().GetFabricTable().FabricCount() == 0) + { + chip::Server::GetInstance().ScheduleFactoryReset(); + } + } +}; + +CHIP_ERROR AppTask::Init(void) +{ + LOG_INF("SW Version: %u, %s", CHIP_DEVICE_CONFIG_DEVICE_SOFTWARE_VERSION, CHIP_DEVICE_CONFIG_DEVICE_SOFTWARE_VERSION_STRING); + + // Initialize LEDs +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED + LEDWidget::InitGpio(LEDS_PORT); + LEDWidget::SetStateUpdateCallback(LEDStateUpdateHandler); + + sStatusLED.Init(SYSTEM_STATE_LED); + + UpdateStatusLED(); +#endif + + InitButtons(); + + // Initialize function button timer + k_timer_init(&sFactoryResetTimer, &AppTask::FactoryResetTimerTimeoutCallback, nullptr); + k_timer_user_data_set(&sFactoryResetTimer, this); + + // Initialize temperature measurement timer + k_timer_init(&sTemperatureMeasurementTimer, &AppTask::TemperatureMeasurementTimerTimeoutCallback, nullptr); + k_timer_user_data_set(&sTemperatureMeasurementTimer, this); + k_timer_start(&sTemperatureMeasurementTimer, K_MSEC(kSensorTimerPeriodMs), K_NO_WAIT); + + // Initialize CHIP server +#if CONFIG_CHIP_FACTORY_DATA + ReturnErrorOnFailure(mFactoryDataProvider.Init()); + SetDeviceInstanceInfoProvider(&mFactoryDataProvider); + SetDeviceAttestationCredentialsProvider(&mFactoryDataProvider); + SetCommissionableDataProvider(&mFactoryDataProvider); + // Read EnableKey from the factory data. + MutableByteSpan enableKey(sTestEventTriggerEnableKey); + err = mFactoryDataProvider.GetEnableKey(enableKey); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("GetEnableKey fail"); + memset(sTestEventTriggerEnableKey, 0, sizeof(sTestEventTriggerEnableKey)); + } +#else + SetDeviceAttestationCredentialsProvider(Examples::GetExampleDACProvider()); +#endif + + static CommonCaseDeviceServerInitParams initParams; + (void) initParams.InitializeStaticResourcesBeforeServerInit(); + ReturnErrorOnFailure(chip::Server::GetInstance().Init(initParams)); + + gExampleDeviceInfoProvider.SetStorageDelegate(&Server::GetInstance().GetPersistentStorage()); + chip::DeviceLayer::SetDeviceInfoProvider(&gExampleDeviceInfoProvider); + +#if CONFIG_CHIP_OTA_REQUESTOR + InitBasicOTARequestor(); +#endif + + ConfigurationMgr().LogDeviceConfig(); + PrintOnboardingCodes(chip::RendezvousInformationFlags(chip::RendezvousInformationFlag::kBLE)); + + // Add CHIP event handler and start CHIP thread. + // Note that all the initialization code should happen prior to this point to avoid data races + // between the main and the CHIP threads. + PlatformMgr().AddEventHandler(ChipEventHandler, 0); + + // Init Temperature Sensor + CHIP_ERROR err = SensorMgr().Init(); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("SensorMgr Init fail"); + return err; + } + + PlatformMgr().LockChipStack(); + app::Clusters::TemperatureMeasurement::Attributes::MinMeasuredValue::Set(kEndpointId, SensorMgr().GetMinMeasuredValue()); + app::Clusters::TemperatureMeasurement::Attributes::MaxMeasuredValue::Set(kEndpointId, SensorMgr().GetMaxMeasuredValue()); + PlatformMgr().UnlockChipStack(); + + err = ConnectivityMgr().SetBLEDeviceName("TelinkTerm"); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("SetBLEDeviceName fail"); + return err; + } + + err = chip::Server::GetInstance().GetFabricTable().AddFabricDelegate(new AppFabricTableDelegate); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("AppFabricTableDelegate fail"); + return err; + } + + return CHIP_NO_ERROR; +} + +CHIP_ERROR AppTask::StartApp(void) +{ + CHIP_ERROR err = Init(); + + if (err != CHIP_NO_ERROR) + { + LOG_ERR("AppTask Init fail"); + return err; + } + + AppEvent event = {}; + + while (true) + { + k_msgq_get(&sAppEventQueue, &event, K_FOREVER); + DispatchEvent(&event); + } +} + +void AppTask::FactoryResetButtonEventHandler(void) +{ + AppEvent event; + + event.Type = AppEvent::kEventType_Button; + event.ButtonEvent.Action = kButtonPushEvent; + event.Handler = FactoryResetHandler; + sAppTask.PostEvent(&event); +} + +void AppTask::FactoryResetHandler(AppEvent * aEvent) +{ + if (sFactoryResetCntr == 0) + { + k_timer_start(&sFactoryResetTimer, K_MSEC(kFactoryResetCalcTimeout), K_NO_WAIT); + } + + sFactoryResetCntr++; + LOG_INF("Factory Reset Trigger Counter: %d/%d", sFactoryResetCntr, kFactoryResetTriggerCntr); + + if (sFactoryResetCntr == kFactoryResetTriggerCntr) + { + k_timer_stop(&sFactoryResetTimer); + sFactoryResetCntr = 0; + + chip::Server::GetInstance().ScheduleFactoryReset(); + } +} + +void AppTask::StartThreadButtonEventHandler(void) +{ + AppEvent event; + + event.Type = AppEvent::kEventType_Button; + event.ButtonEvent.Action = kButtonPushEvent; + event.Handler = StartThreadHandler; + sAppTask.PostEvent(&event); +} + +void AppTask::StartThreadHandler(AppEvent * aEvent) +{ + LOG_INF("StartThreadHandler"); + if (!chip::DeviceLayer::ConnectivityMgr().IsThreadProvisioned()) + { + // Switch context from BLE to Thread + Internal::BLEManagerImpl sInstance; + sInstance.SwitchToIeee802154(); + StartDefaultThreadNetwork(); + } + else + { + LOG_INF("Device already commissioned"); + } +} + +void AppTask::StartBleAdvButtonEventHandler(void) +{ + AppEvent event; + + event.Type = AppEvent::kEventType_Button; + event.ButtonEvent.Action = kButtonPushEvent; + event.Handler = StartBleAdvHandler; + sAppTask.PostEvent(&event); +} + +void AppTask::StartBleAdvHandler(AppEvent * aEvent) +{ + LOG_INF("StartBleAdvHandler"); + + // Don't allow on starting Matter service BLE advertising after Thread provisioning. + if (ConnectivityMgr().IsThreadProvisioned()) + { + LOG_INF("Device already commissioned"); + return; + } + + if (ConnectivityMgr().IsBLEAdvertisingEnabled()) + { + LOG_INF("BLE adv already enabled"); + return; + } + + if (chip::Server::GetInstance().GetCommissioningWindowManager().OpenBasicCommissioningWindow() != CHIP_NO_ERROR) + { + LOG_ERR("OpenBasicCommissioningWindow fail"); + } +} + +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED +void AppTask::UpdateLedStateEventHandler(AppEvent * aEvent) +{ + if (aEvent->Type == AppEvent::kEventType_UpdateLedState) + { + aEvent->UpdateLedStateEvent.LedWidget->UpdateState(); + } +} + +void AppTask::LEDStateUpdateHandler(LEDWidget * ledWidget) +{ + AppEvent event; + event.Type = AppEvent::kEventType_UpdateLedState; + event.Handler = UpdateLedStateEventHandler; + event.UpdateLedStateEvent.LedWidget = ledWidget; + sAppTask.PostEvent(&event); +} + +void AppTask::UpdateStatusLED(void) +{ + if (sIsThreadProvisioned && sIsThreadEnabled) + { + if (sIsThreadAttached) + { + sStatusLED.Blink(950, 50); + } + else + { + sStatusLED.Blink(100, 100); + } + } + else + { + sStatusLED.Blink(50, 950); + } +} +#endif + +void AppTask::ChipEventHandler(const ChipDeviceEvent * event, intptr_t /* arg */) +{ + switch (event->Type) + { + case DeviceEventType::kCHIPoBLEAdvertisingChange: + sHaveBLEConnections = ConnectivityMgr().NumBLEConnections() != 0; +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED + UpdateStatusLED(); +#endif + break; + case DeviceEventType::kThreadStateChange: + sIsThreadProvisioned = ConnectivityMgr().IsThreadProvisioned(); + sIsThreadEnabled = ConnectivityMgr().IsThreadEnabled(); + sIsThreadAttached = ConnectivityMgr().IsThreadAttached(); +#if CONFIG_CHIP_ENABLE_APPLICATION_STATUS_LED + UpdateStatusLED(); +#endif + break; + case DeviceEventType::kThreadConnectivityChange: +#if CONFIG_CHIP_OTA_REQUESTOR + if (event->ThreadConnectivityChange.Result == kConnectivity_Established) + { + InitBasicOTARequestor(); + } +#endif + break; + default: + break; + } +} + +void AppTask::PostEvent(AppEvent * aEvent) +{ + if (k_msgq_put(&sAppEventQueue, aEvent, K_NO_WAIT) != 0) + { + LOG_INF("PostEvent fail"); + } +} + +void AppTask::DispatchEvent(AppEvent * aEvent) +{ + if (aEvent->Handler) + { + aEvent->Handler(aEvent); + } + else + { + LOG_INF("Dropping event without handler"); + } +} + +void AppTask::TemperatureMeasurementTimerTimeoutCallback(k_timer * timer) +{ + if (!timer) + { + return; + } + + AppEvent event; + event.Type = AppEvent::kEventType_Timer; + event.Handler = TemperatureMeasurementTimerEventHandler; + sAppTask.PostEvent(&event); +} + +void AppTask::TemperatureMeasurementTimerEventHandler(AppEvent * aEvent) +{ + if (aEvent->Type != AppEvent::kEventType_Timer) + { + return; + } + + PlatformMgr().LockChipStack(); + app::Clusters::TemperatureMeasurement::Attributes::MeasuredValue::Set(kEndpointId, SensorMgr().GetMeasuredValue()); + PlatformMgr().UnlockChipStack(); + + LOG_INF("Current temperature is (%d*0.01)°C", SensorMgr().GetMeasuredValue()); + + // Start next timer to handle temp sensor. + k_timer_start(&sTemperatureMeasurementTimer, K_MSEC(kSensorTimerPeriodMs), K_NO_WAIT); +} + +void AppTask::FactoryResetTimerTimeoutCallback(k_timer * timer) +{ + if (!timer) + { + return; + } + + AppEvent event; + event.Type = AppEvent::kEventType_Timer; + event.Handler = FactoryResetTimerEventHandler; + sAppTask.PostEvent(&event); +} + +void AppTask::FactoryResetTimerEventHandler(AppEvent * aEvent) +{ + if (aEvent->Type != AppEvent::kEventType_Timer) + { + return; + } + + sFactoryResetCntr = 0; + LOG_INF("Factory Reset Trigger Counter is cleared"); +} + +void AppTask::InitButtons(void) +{ +#if CONFIG_CHIP_BUTTON_MANAGER_IRQ_MODE + sFactoryResetButton.Configure(BUTTON_PORT, BUTTON_PIN_1, FactoryResetButtonEventHandler); + sThreadStartButton.Configure(BUTTON_PORT, BUTTON_PIN_3, StartThreadButtonEventHandler); + sBleAdvStartButton.Configure(BUTTON_PORT, BUTTON_PIN_4, StartBleAdvButtonEventHandler); +#else + sFactoryResetButton.Configure(BUTTON_PORT, BUTTON_PIN_3, BUTTON_PIN_1, FactoryResetButtonEventHandler); + sThreadStartButton.Configure(BUTTON_PORT, BUTTON_PIN_3, BUTTON_PIN_2, StartThreadButtonEventHandler); + sBleAdvStartButton.Configure(BUTTON_PORT, BUTTON_PIN_4, BUTTON_PIN_2, StartBleAdvButtonEventHandler); +#endif + + ButtonManagerInst().AddButton(sFactoryResetButton); + ButtonManagerInst().AddButton(sThreadStartButton); + ButtonManagerInst().AddButton(sBleAdvStartButton); +} diff --git a/examples/temperature-measurement-app/telink/src/SensorManager.cpp b/examples/temperature-measurement-app/telink/src/SensorManager.cpp new file mode 100644 index 00000000000000..b108c59fab83a1 --- /dev/null +++ b/examples/temperature-measurement-app/telink/src/SensorManager.cpp @@ -0,0 +1,93 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "SensorManager.h" +#include "AppConfig.h" +#include "AppEvent.h" +#include "AppTask.h" + +#define TEMPERATURE_SIMULATION_IS_USED + +LOG_MODULE_DECLARE(app, CONFIG_CHIP_APP_LOG_LEVEL); + +using namespace chip; +using namespace ::chip::DeviceLayer; + +constexpr float kMinTemperatureDelta = 0.5; // 0.5 degree Celsius +constexpr uint16_t kSimulatedReadingFrequency = 5; // Change Simulated number +static float mSimulatedTemp[] = { 23.01, 24.02, 28.03, 25.50, 22.05, 21.25, 21.07, 26.08, 18.09, 27.11 }; + +k_timer sSensorTimer; + +SensorManager SensorManager::sSensorManager; + +CHIP_ERROR SensorManager::Init() +{ + // TODO: Initialize temp sensor + return CHIP_NO_ERROR; +} + +int16_t SensorManager::SensorEventHandler() +{ + float temperature = 0.0; + static float lastTemperature = 0.0; + +#ifdef TEMPERATURE_SIMULATION_IS_USED + static uint8_t nbOfRepetition = 0; + static uint8_t simulatedIndex = 0; + if (simulatedIndex >= sizeof(mSimulatedTemp) - 1) + { + simulatedIndex = 0; + } + temperature = mSimulatedTemp[simulatedIndex]; + + nbOfRepetition++; + + if (nbOfRepetition >= kSimulatedReadingFrequency) + { + simulatedIndex++; + nbOfRepetition = 0; + } + + if ((temperature >= (lastTemperature + kMinTemperatureDelta)) || temperature <= (lastTemperature - kMinTemperatureDelta)) + { + lastTemperature = temperature; + // Per spec Application Clusters 2.3.4.1. : MeasuredValue = 100 x temperature [°C] + sSensorManager.mMeasuredTempCelsius = (int16_t) 100 * temperature; + } +#else + // TODO: provide REAL sensor implementation + sSensorManager.mMeasuredTempCelsius = (int16_t) 100 * GetRealSensorTemperature(); +#endif // TEMPERATURE_SIMULATION_IS_USED + return sSensorManager.mMeasuredTempCelsius; +} + +int16_t SensorManager::GetMeasuredValue() +{ + return SensorEventHandler(); +} + +int16_t SensorManager::GetMinMeasuredValue() +{ + return mMinMeasuredTempCelsius; +} + +int16_t SensorManager::GetMaxMeasuredValue() +{ + return mMaxMeasuredTempCelsius; +} diff --git a/examples/temperature-measurement-app/telink/src/main.cpp b/examples/temperature-measurement-app/telink/src/main.cpp new file mode 100644 index 00000000000000..71aa52f9d91901 --- /dev/null +++ b/examples/temperature-measurement-app/telink/src/main.cpp @@ -0,0 +1,82 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "AppTask.h" + +#include +#include + +#include + +LOG_MODULE_REGISTER(app, CONFIG_CHIP_APP_LOG_LEVEL); + +using namespace ::chip; +using namespace ::chip::Inet; +using namespace ::chip::DeviceLayer; + +int main(void) +{ + CHIP_ERROR err = CHIP_NO_ERROR; + + err = chip::Platform::MemoryInit(); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("MemoryInit fail"); + goto exit; + } + + err = PlatformMgr().InitChipStack(); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("InitChipStack fail"); + goto exit; + } + + err = PlatformMgr().StartEventLoopTask(); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("StartEventLoopTask fail"); + goto exit; + } + + err = ThreadStackMgr().InitThreadStack(); + if (err != CHIP_NO_ERROR) + { + LOG_ERR("InitThreadStack fail"); + goto exit; + } + +#ifdef CONFIG_OPENTHREAD_MTD_SED + err = ConnectivityMgr().SetThreadDeviceType(ConnectivityManager::kThreadDeviceType_SleepyEndDevice); +#elif CONFIG_OPENTHREAD_MTD + err = ConnectivityMgr().SetThreadDeviceType(ConnectivityManager::kThreadDeviceType_MinimalEndDevice); +#else + err = ConnectivityMgr().SetThreadDeviceType(ConnectivityManager::kThreadDeviceType_Router); +#endif + if (err != CHIP_NO_ERROR) + { + LOG_ERR("SetThreadDeviceType fail"); + goto exit; + } + + err = GetAppTask().StartApp(); + +exit: + LOG_ERR("Exit err %" CHIP_ERROR_FORMAT, err.Format()); + return (err == CHIP_NO_ERROR) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/examples/temperature-measurement-app/telink/third_party/connectedhomeip b/examples/temperature-measurement-app/telink/third_party/connectedhomeip new file mode 120000 index 00000000000000..c866b86874994d --- /dev/null +++ b/examples/temperature-measurement-app/telink/third_party/connectedhomeip @@ -0,0 +1 @@ +../../../.. \ No newline at end of file diff --git a/examples/temperature-measurement-app/temperature-measurement-common/BUILD.gn b/examples/temperature-measurement-app/temperature-measurement-common/BUILD.gn new file mode 100644 index 00000000000000..0430c288a90354 --- /dev/null +++ b/examples/temperature-measurement-app/temperature-measurement-common/BUILD.gn @@ -0,0 +1,25 @@ +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build_overrides/chip.gni") + +import("${chip_root}/src/app/chip_data_model.gni") + +chip_data_model("temperature-measurement-common") { + zap_file = "temperature-measurement.zap" + + zap_pregenerated_dir = + "${chip_root}/zzz_generated/temperature-measurement/zap-generated" + is_server = true +} diff --git a/examples/temperature-measurement-app/esp32/main/temperature-measurement.matter b/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.matter similarity index 99% rename from examples/temperature-measurement-app/esp32/main/temperature-measurement.matter rename to examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.matter index 3d809d4214df2a..231209600fca32 100644 --- a/examples/temperature-measurement-app/esp32/main/temperature-measurement.matter +++ b/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.matter @@ -154,7 +154,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -169,14 +169,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/temperature-measurement-app/esp32/main/temperature-measurement.zap b/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.zap similarity index 99% rename from examples/temperature-measurement-app/esp32/main/temperature-measurement.zap rename to examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.zap index ba4be46d36882a..f389822f276167 100644 --- a/examples/temperature-measurement-app/esp32/main/temperature-measurement.zap +++ b/examples/temperature-measurement-app/temperature-measurement-common/temperature-measurement.zap @@ -18,7 +18,7 @@ "package": [ { "pathRelativity": "relativeToZap", - "path": "../../../../src/app/zap-templates/zcl/zcl.json", + "path": "../../../src/app/zap-templates/zcl/zcl.json", "type": "zcl-properties", "category": "matter", "version": 1, @@ -26,7 +26,7 @@ }, { "pathRelativity": "relativeToZap", - "path": "../../../../src/app/zap-templates/app-templates.json", + "path": "../../../src/app/zap-templates/app-templates.json", "type": "gen-templates-json", "version": "chip-v1" } @@ -1076,7 +1076,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1092,7 +1092,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/thermostat/silabs/efr32/BUILD.gn b/examples/thermostat/silabs/efr32/BUILD.gn index 5a047229611904..d976118d53a905 100644 --- a/examples/thermostat/silabs/efr32/BUILD.gn +++ b/examples/thermostat/silabs/efr32/BUILD.gn @@ -114,8 +114,6 @@ efr32_executable("thermostat_app") { if (use_temp_sensor) { sources += [ - "${efr32_sdk_root}/app/bluetooth/common/sensor_rht/sl_sensor_rht.c", - "${efr32_sdk_root}/app/bluetooth/common/sensor_select/sl_sensor_select.c", "${efr32_sdk_root}/hardware/driver/si70xx/src/sl_si70xx.c", "${efr32_sdk_root}/platform/common/src/sl_status.c", "${efr32_sdk_root}/platform/driver/i2cspm/src/sl_i2cspm.c", diff --git a/examples/thermostat/thermostat-common/thermostat.matter b/examples/thermostat/thermostat-common/thermostat.matter index 314607a604a6f6..14a7a104b6c205 100644 --- a/examples/thermostat/thermostat-common/thermostat.matter +++ b/examples/thermostat/thermostat-common/thermostat.matter @@ -580,7 +580,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -595,14 +595,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/thermostat/thermostat-common/thermostat.zap b/examples/thermostat/thermostat-common/thermostat.zap index 692c3fb00cfc44..a7dbfd6603f3dd 100644 --- a/examples/thermostat/thermostat-common/thermostat.zap +++ b/examples/thermostat/thermostat-common/thermostat.zap @@ -1985,7 +1985,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2001,7 +2001,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/tv-app/android/App/content-app/src/main/java/com/example/contentapp/CommandResponseHolder.java b/examples/tv-app/android/App/content-app/src/main/java/com/example/contentapp/CommandResponseHolder.java index aaaeaa0cba7005..61ee303b408587 100644 --- a/examples/tv-app/android/App/content-app/src/main/java/com/example/contentapp/CommandResponseHolder.java +++ b/examples/tv-app/android/App/content-app/src/main/java/com/example/contentapp/CommandResponseHolder.java @@ -30,7 +30,7 @@ private CommandResponseHolder() { setResponseValue( Clusters.AccountLogin.Id, Clusters.AccountLogin.Commands.GetSetupPIN.ID, - "{\"0\":\"12345678\"}"); + "{\"0\":\"20202021\"}"); }; public static CommandResponseHolder getInstance() { diff --git a/examples/tv-app/android/java/ContentAppCommandDelegate.cpp b/examples/tv-app/android/java/ContentAppCommandDelegate.cpp index 4708700874304a..b477b75333866b 100644 --- a/examples/tv-app/android/java/ContentAppCommandDelegate.cpp +++ b/examples/tv-app/android/java/ContentAppCommandDelegate.cpp @@ -81,11 +81,13 @@ void ContentAppCommandDelegate::InvokeCommand(CommandHandlerInterface::HandlerCo env->ExceptionDescribe(); env->ExceptionClear(); FormatResponseData(handlerContext, "{\"value\":{}}"); - return; } - JniUtfString respStr(env, resp); - ChipLogProgress(Zcl, "ContentAppCommandDelegate::InvokeCommand got response %s", respStr.c_str()); - FormatResponseData(handlerContext, respStr.c_str()); + else + { + JniUtfString respStr(env, resp); + ChipLogProgress(Zcl, "ContentAppCommandDelegate::InvokeCommand got response %s", respStr.c_str()); + FormatResponseData(handlerContext, respStr.c_str()); + } env->DeleteLocalRef(resp); } else diff --git a/examples/tv-app/tv-common/tv-app.matter b/examples/tv-app/tv-common/tv-app.matter index 71d25a9614199f..ad38b6dca1023b 100644 --- a/examples/tv-app/tv-common/tv-app.matter +++ b/examples/tv-app/tv-common/tv-app.matter @@ -415,7 +415,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -430,14 +430,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/tv-app/tv-common/tv-app.zap b/examples/tv-app/tv-common/tv-app.zap index cc297493778181..f93946ca6f8cd6 100644 --- a/examples/tv-app/tv-common/tv-app.zap +++ b/examples/tv-app/tv-common/tv-app.zap @@ -1740,7 +1740,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1756,7 +1756,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/DiscoveredNodeData.java b/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/DiscoveredNodeData.java index b8a051036edd54..e3bc55388658ed 100644 --- a/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/DiscoveredNodeData.java +++ b/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/DiscoveredNodeData.java @@ -52,6 +52,9 @@ public class DiscoveredNodeData { public DiscoveredNodeData(NsdServiceInfo serviceInfo) { Map attributes = serviceInfo.getAttributes(); this.deviceName = new String(attributes.get(KEY_DEVICE_NAME), StandardCharsets.UTF_8); + if (serviceInfo.getHost() != null) { + this.hostName = serviceInfo.getHost().getHostName(); + } this.deviceType = Long.parseLong(new String(attributes.get(KEY_DEVICE_TYPE), StandardCharsets.UTF_8)); diff --git a/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/VideoPlayer.java b/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/VideoPlayer.java index 9ba557c910b704..e440acdce093d4 100644 --- a/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/VideoPlayer.java +++ b/examples/tv-casting-app/android/App/app/src/main/jni/com/chip/casting/VideoPlayer.java @@ -37,6 +37,7 @@ public class VideoPlayer { private int numIPs; private List ipAddresses; + private String hostName; private boolean isInitialized = false; @@ -50,6 +51,7 @@ public VideoPlayer( List contentApps, int numIPs, List ipAddresses, + String hostName, boolean isConnected) { this.nodeId = nodeId; this.fabricIndex = fabricIndex; @@ -61,6 +63,7 @@ public VideoPlayer( this.isConnected = isConnected; this.numIPs = numIPs; this.ipAddresses = ipAddresses; + this.hostName = hostName; this.isInitialized = true; } @@ -70,6 +73,11 @@ public boolean isSameAs(DiscoveredNodeData discoveredNodeData) { return false; } + // return true if hostNames match + if (Objects.equals(hostName, discoveredNodeData.getHostName())) { + return true; + } + // return false because deviceNames are different if (Objects.equals(deviceName, discoveredNodeData.getDeviceName()) == false) { return false; @@ -133,6 +141,9 @@ public java.lang.String toString() { + ", ipAddresses=" + ipAddresses + ", isInitialized=" + + ", hostName='" + + hostName + + '\'' + isInitialized + '}'; } diff --git a/examples/tv-casting-app/android/App/app/src/main/jni/cpp/ConversionUtils.cpp b/examples/tv-casting-app/android/App/app/src/main/jni/cpp/ConversionUtils.cpp index 3b5832af055963..16b2cec60f201f 100644 --- a/examples/tv-casting-app/android/App/app/src/main/jni/cpp/ConversionUtils.cpp +++ b/examples/tv-casting-app/android/App/app/src/main/jni/cpp/ConversionUtils.cpp @@ -24,7 +24,7 @@ CHIP_ERROR convertJAppParametersToCppAppParams(jobject appParameters, AppParams & outAppParams) { - ChipLogProgress(AppServer, "convertJContentAppToTargetEndpointInfo called"); + ChipLogProgress(AppServer, "convertJAppParametersToCppAppParams called"); JNIEnv * env = chip::JniReferences::GetInstance().GetEnvForCurrentThread(); VerifyOrReturnError(appParameters != nullptr, CHIP_ERROR_INVALID_ARGUMENT); @@ -149,7 +149,13 @@ CHIP_ERROR convertJVideoPlayerToTargetVideoPlayerInfo(jobject videoPlayer, Targe jfieldID getDeviceNameField = env->GetFieldID(jVideoPlayerClass, "deviceName", "Ljava/lang/String;"); jstring jDeviceName = static_cast(env->GetObjectField(videoPlayer, getDeviceNameField)); const char * deviceName = env->GetStringUTFChars(jDeviceName, 0); - outTargetVideoPlayerInfo.Initialize(nodeId, fabricIndex, nullptr, nullptr, vendorId, productId, deviceType, deviceName); + + jfieldID getHostNameField = env->GetFieldID(jVideoPlayerClass, "hostName", "Ljava/lang/String;"); + jstring jHostName = static_cast(env->GetObjectField(videoPlayer, getHostNameField)); + const char * hostName = env->GetStringUTFChars(jHostName, 0); + + outTargetVideoPlayerInfo.Initialize(nodeId, fabricIndex, nullptr, nullptr, vendorId, productId, deviceType, deviceName, + hostName); jfieldID jContentAppsField = env->GetFieldID(jVideoPlayerClass, "contentApps", "Ljava/util/List;"); jobject jContentApps = env->GetObjectField(videoPlayer, jContentAppsField); @@ -189,8 +195,8 @@ CHIP_ERROR convertTargetVideoPlayerInfoToJVideoPlayer(TargetVideoPlayerInfo * ta jclass jVideoPlayerClass; ReturnErrorOnFailure( chip::JniReferences::GetInstance().GetClassRef(env, "com/chip/casting/VideoPlayer", jVideoPlayerClass)); - jmethodID jVideoPlayerConstructor = - env->GetMethodID(jVideoPlayerClass, "", "(JBLjava/lang/String;IIILjava/util/List;ILjava/util/List;Z)V"); + jmethodID jVideoPlayerConstructor = env->GetMethodID( + jVideoPlayerClass, "", "(JBLjava/lang/String;IIILjava/util/List;ILjava/util/List;Ljava/lang/String;Z)V"); jobject jContentAppList = nullptr; TargetEndpointInfo * endpoints = targetVideoPlayerInfo->GetEndpoints(); @@ -208,6 +214,9 @@ CHIP_ERROR convertTargetVideoPlayerInfoToJVideoPlayer(TargetVideoPlayerInfo * ta jstring deviceName = targetVideoPlayerInfo->GetDeviceName() == nullptr ? nullptr : env->NewStringUTF(targetVideoPlayerInfo->GetDeviceName()); + jstring hostName = + targetVideoPlayerInfo->GetHostName() == nullptr ? nullptr : env->NewStringUTF(targetVideoPlayerInfo->GetHostName()); + jobject jIPAddressList = nullptr; const chip::Inet::IPAddress * ipAddresses = targetVideoPlayerInfo->GetIpAddresses(); if (ipAddresses != nullptr) @@ -233,7 +242,7 @@ CHIP_ERROR convertTargetVideoPlayerInfoToJVideoPlayer(TargetVideoPlayerInfo * ta outVideoPlayer = env->NewObject(jVideoPlayerClass, jVideoPlayerConstructor, targetVideoPlayerInfo->GetNodeId(), targetVideoPlayerInfo->GetFabricIndex(), deviceName, targetVideoPlayerInfo->GetVendorId(), targetVideoPlayerInfo->GetProductId(), targetVideoPlayerInfo->GetDeviceType(), - jContentAppList, targetVideoPlayerInfo->GetNumIPs(), jIPAddressList, + jContentAppList, targetVideoPlayerInfo->GetNumIPs(), jIPAddressList, hostName, targetVideoPlayerInfo->GetOperationalDeviceProxy() != nullptr); } return CHIP_NO_ERROR; @@ -259,7 +268,7 @@ CHIP_ERROR convertJDiscoveredNodeDataToCppDiscoveredNodeData(jobject jDiscovered env->GetStringUTFChars(jHostName, 0)); } - jfieldID getInstanceNameField = env->GetFieldID(jDiscoveredNodeDataClass, "deviceName", "Ljava/lang/String;"); + jfieldID getInstanceNameField = env->GetFieldID(jDiscoveredNodeDataClass, "instanceName", "Ljava/lang/String;"); jstring jInstanceName = static_cast(env->GetObjectField(jDiscoveredNodeData, getInstanceNameField)); if (jInstanceName != nullptr) { @@ -289,8 +298,11 @@ CHIP_ERROR convertJDiscoveredNodeDataToCppDiscoveredNodeData(jobject jDiscovered jfieldID getDeviceNameField = env->GetFieldID(jDiscoveredNodeDataClass, "deviceName", "Ljava/lang/String;"); jstring jDeviceName = static_cast(env->GetObjectField(jDiscoveredNodeData, getDeviceNameField)); - chip::Platform::CopyString(outCppDiscoveredNodeData.commissionData.deviceName, chip::Dnssd::kMaxDeviceNameLen + 1, - env->GetStringUTFChars(jDeviceName, 0)); + if (jDeviceName != nullptr) + { + chip::Platform::CopyString(outCppDiscoveredNodeData.commissionData.deviceName, chip::Dnssd::kMaxDeviceNameLen + 1, + env->GetStringUTFChars(jDeviceName, 0)); + } // TODO: map rotating ID jfieldID jRotatingIdLenField = env->GetFieldID(jDiscoveredNodeDataClass, "rotatingIdLen", "I"); diff --git a/examples/tv-casting-app/android/App/app/src/main/jni/cpp/TvCastingApp-JNI.cpp b/examples/tv-casting-app/android/App/app/src/main/jni/cpp/TvCastingApp-JNI.cpp index b6eb30b142d4d6..66a710d137ff7d 100644 --- a/examples/tv-casting-app/android/App/app/src/main/jni/cpp/TvCastingApp-JNI.cpp +++ b/examples/tv-casting-app/android/App/app/src/main/jni/cpp/TvCastingApp-JNI.cpp @@ -209,7 +209,7 @@ JNI_METHOD(jboolean, verifyOrEstablishConnection) [](CHIP_ERROR err) { TvCastingAppJNIMgr().getOnConnectionFailureHandler(true).Handle(err); }, [](TargetEndpointInfo * endpoint) { TvCastingAppJNIMgr().getOnNewOrUpdatedEndpointHandler(true).Handle(endpoint); }); VerifyOrExit(CHIP_NO_ERROR == err, - ChipLogError(AppServer, "CastingServer::OpenBasicCommissioningWindow failed: %" CHIP_ERROR_FORMAT, err.Format())); + ChipLogError(AppServer, "CastingServer::verifyOrEstablishConnection failed: %" CHIP_ERROR_FORMAT, err.Format())); exit: return (err == CHIP_NO_ERROR); diff --git a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CastingServerBridge.mm b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CastingServerBridge.mm index 3b9204cdaf5436..73dcec1946c8b5 100644 --- a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CastingServerBridge.mm +++ b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CastingServerBridge.mm @@ -33,6 +33,14 @@ #include #include +#ifndef CHIP_DEVICE_CONFIG_USE_TEST_SETUP_PIN_CODE +#define CHIP_DEVICE_CONFIG_USE_TEST_SETUP_PIN_CODE 20202021 +#endif + +#ifndef CHIP_DEVICE_CONFIG_USE_TEST_SETUP_DISCRIMINATOR +#define CHIP_DEVICE_CONFIG_USE_TEST_SETUP_DISCRIMINATOR 0xF00 +#endif + @interface CastingServerBridge () @property AppParameters * appParameters; @@ -614,7 +622,8 @@ - (void)stopMatterServer self->_previouslyConnectedVideoPlayer->Initialize(currentTargetVideoPlayerInfo->GetNodeId(), currentTargetVideoPlayerInfo->GetFabricIndex(), nullptr, nullptr, currentTargetVideoPlayerInfo->GetVendorId(), currentTargetVideoPlayerInfo->GetProductId(), currentTargetVideoPlayerInfo->GetDeviceType(), - currentTargetVideoPlayerInfo->GetDeviceName(), currentTargetVideoPlayerInfo->GetNumIPs(), + currentTargetVideoPlayerInfo->GetDeviceName(), currentTargetVideoPlayerInfo->GetHostName(), + currentTargetVideoPlayerInfo->GetNumIPs(), const_cast(currentTargetVideoPlayerInfo->GetIpAddresses())); TargetEndpointInfo * prevEndpoints = self->_previouslyConnectedVideoPlayer->GetEndpoints(); diff --git a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CommissionableDataProviderImpl.mm b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CommissionableDataProviderImpl.mm index c1e7c6ef9b6120..40c2efcd99cb65 100644 --- a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CommissionableDataProviderImpl.mm +++ b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/CommissionableDataProviderImpl.mm @@ -33,6 +33,10 @@ namespace { +#ifndef CHIP_DEVICE_CONFIG_USE_TEST_SPAKE2P_ITERATION_COUNT +#define CHIP_DEVICE_CONFIG_USE_TEST_SPAKE2P_ITERATION_COUNT 1000 +#endif + CHIP_ERROR GeneratePaseSalt(std::vector & spake2pSaltVector) { constexpr size_t kSaltLen = kSpake2p_Max_PBKDF_Salt_Length; diff --git a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/ConversionUtils.mm b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/ConversionUtils.mm index f71cab0bf0e1c6..3f98f96d2286c3 100644 --- a/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/ConversionUtils.mm +++ b/examples/tv-casting-app/darwin/MatterTvCastingBridge/MatterTvCastingBridge/ConversionUtils.mm @@ -54,16 +54,22 @@ + (CHIP_ERROR)convertToCppDiscoveredNodeDataFrom:(DiscoveredNodeData * _Nonnull) outDiscoveredNodeData.commissionData.longDiscriminator = objCDiscoveredNodeData.longDiscriminator; outDiscoveredNodeData.commissionData.commissioningMode = objCDiscoveredNodeData.commissioningMode; outDiscoveredNodeData.commissionData.pairingHint = objCDiscoveredNodeData.pairingHint; - chip::Platform::CopyString(outDiscoveredNodeData.commissionData.deviceName, chip::Dnssd::kMaxDeviceNameLen + 1, - [objCDiscoveredNodeData.deviceName UTF8String]); + memset(outDiscoveredNodeData.commissionData.deviceName, '\0', sizeof(outDiscoveredNodeData.commissionData.deviceName)); + if (objCDiscoveredNodeData.deviceName != nullptr) { + chip::Platform::CopyString(outDiscoveredNodeData.commissionData.deviceName, chip::Dnssd::kMaxDeviceNameLen + 1, + [objCDiscoveredNodeData.deviceName UTF8String]); + } outDiscoveredNodeData.commissionData.rotatingIdLen = objCDiscoveredNodeData.rotatingIdLen; memcpy( outDiscoveredNodeData.commissionData.rotatingId, objCDiscoveredNodeData.rotatingId, objCDiscoveredNodeData.rotatingIdLen); // setting CommonResolutionData outDiscoveredNodeData.resolutionData.port = objCDiscoveredNodeData.port; - chip::Platform::CopyString(outDiscoveredNodeData.resolutionData.hostName, chip::Dnssd::kHostNameMaxLength + 1, - [objCDiscoveredNodeData.hostName UTF8String]); + memset(outDiscoveredNodeData.resolutionData.hostName, '\0', sizeof(outDiscoveredNodeData.resolutionData.hostName)); + if (objCDiscoveredNodeData.hostName != nullptr) { + chip::Platform::CopyString(outDiscoveredNodeData.resolutionData.hostName, chip::Dnssd::kHostNameMaxLength + 1, + [objCDiscoveredNodeData.hostName UTF8String]); + } outDiscoveredNodeData.resolutionData.interfaceId = chip::Inet::InterfaceId(objCDiscoveredNodeData.platformInterface); outDiscoveredNodeData.resolutionData.numIPs = objCDiscoveredNodeData.numIPs; for (size_t i = 0; i < objCDiscoveredNodeData.numIPs; i++) { diff --git a/examples/tv-casting-app/tv-casting-common/include/CHIPProjectAppConfig.h b/examples/tv-casting-app/tv-casting-common/include/CHIPProjectAppConfig.h index 641cf0d120be0c..748c048730c7ce 100644 --- a/examples/tv-casting-app/tv-casting-common/include/CHIPProjectAppConfig.h +++ b/examples/tv-casting-app/tv-casting-common/include/CHIPProjectAppConfig.h @@ -33,8 +33,6 @@ #define CHIP_CONFIG_KVS_PATH "/tmp/chip_casting_kvs" #endif -#include - #define CHIP_DEVICE_CONFIG_ENABLE_COMMISSIONER_DISCOVERY 0 #define CHIP_DEVICE_CONFIG_ENABLE_COMMISSIONER_DISCOVERY_CLIENT 1 @@ -61,3 +59,12 @@ #define CHIP_ENABLE_ROTATING_DEVICE_ID 1 #define CHIP_DEVICE_CONFIG_ROTATING_DEVICE_ID_UNIQUE_ID_LENGTH 128 + +// Disable this since it should not be enabled for production setups +#define CHIP_DEVICE_CONFIG_ENABLE_TEST_SETUP_PARAMS 0 + +#define CHIP_DEVICE_CONFIG_DYNAMIC_ENDPOINT_COUNT 4 + +// Include the CHIPProjectConfig from config/standalone +// Add this at the end so that we can hit our #defines first +#include diff --git a/examples/tv-casting-app/tv-casting-common/include/CastingServer.h b/examples/tv-casting-app/tv-casting-common/include/CastingServer.h index f67b4d9df93456..3430a4f3bac7f1 100644 --- a/examples/tv-casting-app/tv-casting-common/include/CastingServer.h +++ b/examples/tv-casting-app/tv-casting-common/include/CastingServer.h @@ -97,6 +97,7 @@ class CastingServer std::function onConnectionFailure, std::function onNewOrUpdatedEndpoint); + void LogCachedVideoPlayers(); CHIP_ERROR PurgeVideoPlayerCache(); /** @@ -436,8 +437,9 @@ class CastingServer TargetVideoPlayerInfo mCachedTargetVideoPlayerInfo[kMaxCachedVideoPlayers]; uint16_t mTargetVideoPlayerVendorId = 0; uint16_t mTargetVideoPlayerProductId = 0; - uint16_t mTargetVideoPlayerDeviceType = 0; + chip::DeviceTypeId mTargetVideoPlayerDeviceType = 0; char mTargetVideoPlayerDeviceName[chip::Dnssd::kMaxDeviceNameLen + 1] = {}; + char mTargetVideoPlayerHostName[chip::Dnssd::kHostNameMaxLength + 1] = {}; size_t mTargetVideoPlayerNumIPs = 0; // number of valid IP addresses chip::Inet::IPAddress mTargetVideoPlayerIpAddress[chip::Dnssd::CommonResolutionData::kMaxIPAddresses]; diff --git a/examples/tv-casting-app/tv-casting-common/include/PersistenceManager.h b/examples/tv-casting-app/tv-casting-common/include/PersistenceManager.h index b1e7bd6c58130d..6d3f36b7801416 100644 --- a/examples/tv-casting-app/tv-casting-common/include/PersistenceManager.h +++ b/examples/tv-casting-app/tv-casting-common/include/PersistenceManager.h @@ -49,6 +49,7 @@ class PersistenceManager kVideoPlayerProductIdTag, kVideoPlayerDeviceTypeIdTag, kVideoPlayerDeviceNameTag, + kVideoPlayerHostNameTag, kVideoPlayerNumIPsTag, kVideoPlayerIPAddressTag, kIpAddressesContainerTag, diff --git a/examples/tv-casting-app/tv-casting-common/include/TargetVideoPlayerInfo.h b/examples/tv-casting-app/tv-casting-common/include/TargetVideoPlayerInfo.h index f98116f3c7ca27..c0722dc0f2dcad 100644 --- a/examples/tv-casting-app/tv-casting-common/include/TargetVideoPlayerInfo.h +++ b/examples/tv-casting-app/tv-casting-common/include/TargetVideoPlayerInfo.h @@ -25,32 +25,67 @@ constexpr size_t kMaxNumberOfEndpoints = 5; +class TargetVideoPlayerInfo; +class VideoPlayerConnectionContext +{ +public: + VideoPlayerConnectionContext(TargetVideoPlayerInfo * targetVideoPlayerInfo, chip::OnDeviceConnected handleDeviceConnected, + chip::OnDeviceConnectionFailure handleConnectionFailure, + std::function onConnectionSuccess, + std::function onConnectionFailure) + { + mTargetVideoPlayerInfo = targetVideoPlayerInfo; + mOnConnectedCallback = new chip::Callback::Callback(handleDeviceConnected, this); + mOnConnectionFailureCallback = new chip::Callback::Callback(handleConnectionFailure, this); + mOnConnectionSuccessClientCallback = onConnectionSuccess; + mOnConnectionFailureClientCallback = onConnectionFailure; + } + + ~VideoPlayerConnectionContext() + { + if (mOnConnectedCallback != nullptr) + { + delete mOnConnectedCallback; + } + + if (mOnConnectionFailureCallback != nullptr) + { + delete mOnConnectionFailureCallback; + } + } + + TargetVideoPlayerInfo * mTargetVideoPlayerInfo; + chip::Callback::Callback * mOnConnectedCallback = nullptr; + chip::Callback::Callback * mOnConnectionFailureCallback = nullptr; + std::function mOnConnectionSuccessClientCallback = {}; + std::function mOnConnectionFailureClientCallback = {}; +}; + class TargetVideoPlayerInfo { public: - TargetVideoPlayerInfo() : - mOnConnectedCallback(HandleDeviceConnected, this), mOnConnectionFailureCallback(HandleDeviceConnectionFailure, this) - {} + TargetVideoPlayerInfo() {} bool operator==(const TargetVideoPlayerInfo & other) { return this->mNodeId == other.mNodeId; } bool IsInitialized() { return mInitialized; } uint16_t GetVendorId() const { return mVendorId; } uint16_t GetProductId() const { return mProductId; } - uint16_t GetDeviceType() const { return mDeviceType; } + chip::DeviceTypeId GetDeviceType() const { return mDeviceType; } chip::NodeId GetNodeId() const { return mNodeId; } chip::FabricIndex GetFabricIndex() const { return mFabricIndex; } const char * GetDeviceName() const { return mDeviceName; } + const char * GetHostName() const { return mHostName; } size_t GetNumIPs() const { return mNumIPs; } const chip::Inet::IPAddress * GetIpAddresses() const { return mIpAddress; } bool IsSameAs(const chip::Dnssd::DiscoveredNodeData * discoveredNodeData); - bool IsSameAs(const char * deviceName, size_t numIPs, const chip::Inet::IPAddress * ipAddresses); + bool IsSameAs(const char * hostName, const char * deviceName, size_t numIPs, const chip::Inet::IPAddress * ipAddresses); chip::OperationalDeviceProxy * GetOperationalDeviceProxy() { - if (mDeviceProxy.ConnectionReady()) + if (mDeviceProxy != nullptr && mDeviceProxy->ConnectionReady()) { - return &mDeviceProxy; + return mDeviceProxy; } return nullptr; } @@ -58,8 +93,8 @@ class TargetVideoPlayerInfo CHIP_ERROR Initialize(chip::NodeId nodeId, chip::FabricIndex fabricIndex, std::function onConnectionSuccess, std::function onConnectionFailure, uint16_t vendorId = 0, uint16_t productId = 0, - uint16_t deviceType = 0, const char * deviceName = {}, size_t numIPs = 0, - chip::Inet::IPAddress * ipAddressList = nullptr); + chip::DeviceTypeId deviceType = 0, const char * deviceName = {}, const char * hostName = {}, + size_t numIPs = 0, chip::Inet::IPAddress * ipAddressList = nullptr); CHIP_ERROR FindOrEstablishCASESession(std::function onConnectionSuccess, std::function onConnectionFailure); TargetEndpointInfo * GetOrAddEndpoint(chip::EndpointId endpointId); @@ -72,19 +107,33 @@ class TargetVideoPlayerInfo static void HandleDeviceConnected(void * context, chip::Messaging::ExchangeManager & exchangeMgr, const chip::SessionHandle & sessionHandle) { - TargetVideoPlayerInfo * _this = static_cast(context); - _this->mDeviceProxy = chip::OperationalDeviceProxy(&exchangeMgr, sessionHandle); - _this->mInitialized = true; + ChipLogProgress(AppServer, "tmplog: HandleDeviceConnected called"); + VideoPlayerConnectionContext * connectionContext = static_cast(context); + if (connectionContext == nullptr || connectionContext->mTargetVideoPlayerInfo == nullptr) + { + ChipLogError(AppServer, "HandleDeviceConnected called with null context or null context.targetVideoPlayerInfo"); + return; + } + if (connectionContext->mTargetVideoPlayerInfo->mDeviceProxy != nullptr) + { + ChipLogProgress(AppServer, "HandleDeviceConnected deleting mDeviceProxy"); + delete connectionContext->mTargetVideoPlayerInfo->mDeviceProxy; + ChipLogProgress(AppServer, "HandleDeviceConnected deleted mDeviceProxy"); + } + connectionContext->mTargetVideoPlayerInfo->mDeviceProxy = new chip::OperationalDeviceProxy(&exchangeMgr, sessionHandle); + connectionContext->mTargetVideoPlayerInfo->mInitialized = true; ChipLogProgress(AppServer, "HandleDeviceConnected created an instance of OperationalDeviceProxy for nodeId: 0x" ChipLogFormatX64 ", fabricIndex: %d", - ChipLogValueX64(_this->GetNodeId()), _this->GetFabricIndex()); + ChipLogValueX64(connectionContext->mTargetVideoPlayerInfo->GetNodeId()), + connectionContext->mTargetVideoPlayerInfo->GetFabricIndex()); - if (_this->mOnConnectionSuccessClientCallback) + if (connectionContext->mOnConnectionSuccessClientCallback) { ChipLogProgress(AppServer, "HandleDeviceConnected calling mOnConnectionSuccessClientCallback"); - _this->mOnConnectionSuccessClientCallback(_this); + connectionContext->mOnConnectionSuccessClientCallback(connectionContext->mTargetVideoPlayerInfo); } + delete connectionContext; } static void HandleDeviceConnectionFailure(void * context, const chip::ScopedNodeId & peerId, CHIP_ERROR error) @@ -93,30 +142,35 @@ class TargetVideoPlayerInfo "HandleDeviceConnectionFailure called for peerId.nodeId: 0x" ChipLogFormatX64 ", peer.fabricIndex: %d with error: %" CHIP_ERROR_FORMAT, ChipLogValueX64(peerId.GetNodeId()), peerId.GetFabricIndex(), error.Format()); - TargetVideoPlayerInfo * _this = static_cast(context); - _this->mDeviceProxy = chip::OperationalDeviceProxy(); - if (_this->mOnConnectionFailureClientCallback) + VideoPlayerConnectionContext * connectionContext = static_cast(context); + if (connectionContext == nullptr || connectionContext->mTargetVideoPlayerInfo == nullptr) + { + ChipLogError(AppServer, "HandleDeviceConnectionFailure called with null context"); + return; + } + if (connectionContext->mTargetVideoPlayerInfo->mDeviceProxy != nullptr) + { + delete connectionContext->mTargetVideoPlayerInfo->mDeviceProxy; + } + connectionContext->mTargetVideoPlayerInfo->mDeviceProxy = new chip::OperationalDeviceProxy(); + if (connectionContext->mOnConnectionFailureClientCallback) { ChipLogProgress(AppServer, "HandleDeviceConnectionFailure calling mOnConnectionFailureClientCallback"); - _this->mOnConnectionFailureClientCallback(error); + connectionContext->mOnConnectionFailureClientCallback(error); } + delete connectionContext; } TargetEndpointInfo mEndpoints[kMaxNumberOfEndpoints]; chip::NodeId mNodeId; chip::FabricIndex mFabricIndex; - chip::OperationalDeviceProxy mDeviceProxy; + chip::OperationalDeviceProxy * mDeviceProxy = nullptr; uint16_t mVendorId = 0; uint16_t mProductId = 0; - uint16_t mDeviceType = 0; + chip::DeviceTypeId mDeviceType = 0; char mDeviceName[chip::Dnssd::kMaxDeviceNameLen + 1] = {}; + char mHostName[chip::Dnssd::kHostNameMaxLength + 1] = {}; size_t mNumIPs = 0; // number of valid IP addresses chip::Inet::IPAddress mIpAddress[chip::Dnssd::CommonResolutionData::kMaxIPAddresses]; - - chip::Callback::Callback mOnConnectedCallback; - chip::Callback::Callback mOnConnectionFailureCallback; - std::function mOnConnectionSuccessClientCallback; - std::function mOnConnectionFailureClientCallback; - bool mInitialized = false; }; diff --git a/examples/tv-casting-app/tv-casting-common/src/CastingServer.cpp b/examples/tv-casting-app/tv-casting-common/src/CastingServer.cpp index fa4138deeeba43..b6f90d9a8f5451 100644 --- a/examples/tv-casting-app/tv-casting-common/src/CastingServer.cpp +++ b/examples/tv-casting-app/tv-casting-common/src/CastingServer.cpp @@ -162,6 +162,8 @@ CHIP_ERROR CastingServer::SendUserDirectedCommissioningRequest(Dnssd::Discovered } chip::Platform::CopyString(mTargetVideoPlayerDeviceName, chip::Dnssd::kMaxDeviceNameLen + 1, selectedCommissioner->commissionData.deviceName); + chip::Platform::CopyString(mTargetVideoPlayerHostName, chip::Dnssd::kHostNameMaxLength + 1, + selectedCommissioner->resolutionData.hostName); return CHIP_NO_ERROR; } #endif // CHIP_DEVICE_CONFIG_ENABLE_COMMISSIONER_DISCOVERY_CLIENT @@ -293,11 +295,22 @@ TargetVideoPlayerInfo * CastingServer::ReadCachedTargetVideoPlayerInfos() return mCachedTargetVideoPlayerInfo; } +void CastingServer::LogCachedVideoPlayers() +{ + ChipLogProgress(AppServer, "CastingServer:LogCachedVideoPlayers dumping any/all cached video players."); + for (size_t i = 0; i < kMaxCachedVideoPlayers && mCachedTargetVideoPlayerInfo[i].IsInitialized(); i++) + { + mCachedTargetVideoPlayerInfo[i].PrintInfo(); + } +} + CHIP_ERROR CastingServer::VerifyOrEstablishConnection(TargetVideoPlayerInfo & targetVideoPlayerInfo, std::function onConnectionSuccess, std::function onConnectionFailure, std::function onNewOrUpdatedEndpoint) { + LogCachedVideoPlayers(); + if (!targetVideoPlayerInfo.IsInitialized()) { return CHIP_ERROR_INVALID_ARGUMENT; @@ -314,7 +327,8 @@ CHIP_ERROR CastingServer::VerifyOrEstablishConnection(TargetVideoPlayerInfo & ta prevDeviceProxy->Disconnect(); } - return targetVideoPlayerInfo.FindOrEstablishCASESession( + CastingServer::GetInstance()->mActiveTargetVideoPlayerInfo = targetVideoPlayerInfo; + return CastingServer::GetInstance()->mActiveTargetVideoPlayerInfo.FindOrEstablishCASESession( [](TargetVideoPlayerInfo * videoPlayer) { ChipLogProgress(AppServer, "CastingServer::OnConnectionSuccess lambda called"); CastingServer::GetInstance()->mActiveTargetVideoPlayerInfo = *videoPlayer; @@ -412,7 +426,8 @@ void CastingServer::DeviceEventCallback(const DeviceLayer::ChipDeviceEvent * eve CastingServer::GetInstance()->mOnConnectionFailureClientCallback, CastingServer::GetInstance()->mTargetVideoPlayerVendorId, CastingServer::GetInstance()->mTargetVideoPlayerProductId, CastingServer::GetInstance()->mTargetVideoPlayerDeviceType, CastingServer::GetInstance()->mTargetVideoPlayerDeviceName, - CastingServer::GetInstance()->mTargetVideoPlayerNumIPs, CastingServer::GetInstance()->mTargetVideoPlayerIpAddress); + CastingServer::GetInstance()->mTargetVideoPlayerHostName, CastingServer::GetInstance()->mTargetVideoPlayerNumIPs, + CastingServer::GetInstance()->mTargetVideoPlayerIpAddress); if (err != CHIP_NO_ERROR) { diff --git a/examples/tv-casting-app/tv-casting-common/src/PersistenceManager.cpp b/examples/tv-casting-app/tv-casting-common/src/PersistenceManager.cpp index dea0277064b30c..b5c24ea6f5b9d1 100644 --- a/examples/tv-casting-app/tv-casting-common/src/PersistenceManager.cpp +++ b/examples/tv-casting-app/tv-casting-common/src/PersistenceManager.cpp @@ -98,6 +98,10 @@ CHIP_ERROR PersistenceManager::WriteAllVideoPlayers(TargetVideoPlayerInfo videoP ReturnErrorOnFailure(tlvWriter.PutBytes(TLV::ContextTag(kVideoPlayerDeviceNameTag), (const uint8_t *) videoPlayer->GetDeviceName(), static_cast(strlen(videoPlayer->GetDeviceName()) + 1))); + ReturnErrorOnFailure(tlvWriter.PutBytes(TLV::ContextTag(kVideoPlayerHostNameTag), + (const uint8_t *) videoPlayer->GetHostName(), + static_cast(strlen(videoPlayer->GetHostName()) + 1))); + ReturnErrorOnFailure( tlvWriter.Put(TLV::ContextTag(kVideoPlayerNumIPsTag), static_cast(videoPlayer->GetNumIPs()))); const Inet::IPAddress * ipAddress = videoPlayer->GetIpAddresses(); @@ -205,6 +209,7 @@ CHIP_ERROR PersistenceManager::ReadAllVideoPlayers(TargetVideoPlayerInfo outVide uint16_t productId = 0; uint16_t deviceType = 0; char deviceName[chip::Dnssd::kMaxDeviceNameLen + 1] = {}; + char hostName[chip::Dnssd::kHostNameMaxLength + 1] = {}; size_t numIPs = 0; Inet::IPAddress ipAddress[chip::Dnssd::CommonResolutionData::kMaxIPAddresses]; CHIP_ERROR err; @@ -254,6 +259,12 @@ CHIP_ERROR PersistenceManager::ReadAllVideoPlayers(TargetVideoPlayerInfo outVide continue; } + if (videoPlayersContainerTagNum == kVideoPlayerHostNameTag) + { + ReturnErrorOnFailure(reader.GetBytes(reinterpret_cast(hostName), chip::Dnssd::kHostNameMaxLength + 1)); + continue; + } + if (videoPlayersContainerTagNum == kVideoPlayerNumIPsTag) { ReturnErrorOnFailure(reader.Get(reinterpret_cast(numIPs))); @@ -301,7 +312,7 @@ CHIP_ERROR PersistenceManager::ReadAllVideoPlayers(TargetVideoPlayerInfo outVide if (videoPlayersContainerTagNum == kContentAppEndpointsContainerTag) { outVideoPlayers[videoPlayerIndex].Initialize(nodeId, fabricIndex, nullptr, nullptr, vendorId, productId, deviceType, - deviceName, numIPs, ipAddress); + deviceName, hostName, numIPs, ipAddress); // Entering Content App Endpoints container TLV::TLVType contentAppEndpointArrayContainerType = TLV::kTLVType_Array; ReturnErrorOnFailure(reader.EnterContainer(contentAppEndpointArrayContainerType)); diff --git a/examples/tv-casting-app/tv-casting-common/src/TargetVideoPlayerInfo.cpp b/examples/tv-casting-app/tv-casting-common/src/TargetVideoPlayerInfo.cpp index f21fde3883256a..ae51105a96d3bb 100644 --- a/examples/tv-casting-app/tv-casting-common/src/TargetVideoPlayerInfo.cpp +++ b/examples/tv-casting-app/tv-casting-common/src/TargetVideoPlayerInfo.cpp @@ -26,8 +26,8 @@ CASEClientPool gCASEClientPool; CHIP_ERROR TargetVideoPlayerInfo::Initialize(NodeId nodeId, FabricIndex fabricIndex, std::function onConnectionSuccess, std::function onConnectionFailure, uint16_t vendorId, - uint16_t productId, uint16_t deviceType, const char * deviceName, size_t numIPs, - chip::Inet::IPAddress * ipAddress) + uint16_t productId, chip::DeviceTypeId deviceType, const char * deviceName, + const char * hostName, size_t numIPs, chip::Inet::IPAddress * ipAddress) { ChipLogProgress(NotSpecified, "TargetVideoPlayerInfo nodeId=0x" ChipLogFormatX64 " fabricIndex=%d", ChipLogValueX64(nodeId), fabricIndex); @@ -42,7 +42,18 @@ CHIP_ERROR TargetVideoPlayerInfo::Initialize(NodeId nodeId, FabricIndex fabricIn mIpAddress[i] = ipAddress[i]; } - chip::Platform::CopyString(mDeviceName, chip::Dnssd::kMaxDeviceNameLen + 1, deviceName); + memset(mDeviceName, '\0', sizeof(mDeviceName)); + if (deviceName != nullptr) + { + chip::Platform::CopyString(mDeviceName, chip::Dnssd::kMaxDeviceNameLen, deviceName); + } + + memset(mHostName, '\0', sizeof(mHostName)); + if (hostName != nullptr) + { + chip::Platform::CopyString(mHostName, chip::Dnssd::kHostNameMaxLength, hostName); + } + for (auto & endpointInfo : mEndpoints) { endpointInfo.Reset(); @@ -60,11 +71,14 @@ CHIP_ERROR TargetVideoPlayerInfo::Initialize(NodeId nodeId, FabricIndex fabricIn CHIP_ERROR TargetVideoPlayerInfo::FindOrEstablishCASESession(std::function onConnectionSuccess, std::function onConnectionFailure) { - mOnConnectionSuccessClientCallback = onConnectionSuccess; - mOnConnectionFailureClientCallback = onConnectionFailure; - Server * server = &(chip::Server::GetInstance()); - server->GetCASESessionManager()->FindOrEstablishSession(ScopedNodeId(mNodeId, mFabricIndex), &mOnConnectedCallback, - &mOnConnectionFailureCallback); + ChipLogProgress(AppServer, "TargetVideoPlayerInfo::FindOrEstablishCASESession called"); + + VideoPlayerConnectionContext * connectionContext = new VideoPlayerConnectionContext( + this, HandleDeviceConnected, HandleDeviceConnectionFailure, onConnectionSuccess, onConnectionFailure); + Server * server = &(chip::Server::GetInstance()); + server->GetCASESessionManager()->FindOrEstablishSession(ScopedNodeId(mNodeId, mFabricIndex), + connectionContext->mOnConnectedCallback, + connectionContext->mOnConnectionFailureCallback); return CHIP_NO_ERROR; } @@ -117,8 +131,8 @@ bool TargetVideoPlayerInfo::HasEndpoint(EndpointId endpointId) void TargetVideoPlayerInfo::PrintInfo() { - ChipLogProgress(NotSpecified, " TargetVideoPlayerInfo nodeId=0x" ChipLogFormatX64 " fabric index=%d", ChipLogValueX64(mNodeId), - mFabricIndex); + ChipLogProgress(NotSpecified, " TargetVideoPlayerInfo deviceName=%s nodeId=0x" ChipLogFormatX64 " fabric index=%d", mDeviceName, + ChipLogValueX64(mNodeId), mFabricIndex); for (auto & endpointInfo : mEndpoints) { if (endpointInfo.IsInitialized()) @@ -128,8 +142,15 @@ void TargetVideoPlayerInfo::PrintInfo() } } -bool TargetVideoPlayerInfo::IsSameAs(const char * deviceName, size_t numIPs, const chip::Inet::IPAddress * ipAddresses) +bool TargetVideoPlayerInfo::IsSameAs(const char * hostName, const char * deviceName, size_t numIPs, + const chip::Inet::IPAddress * ipAddresses) { + // return true if the hostNames match + if (strcmp(mHostName, hostName) == 0) + { + return true; + } + // return false because deviceNames are different if (strcmp(mDeviceName, deviceName) != 0) { @@ -173,6 +194,6 @@ bool TargetVideoPlayerInfo::IsSameAs(const chip::Dnssd::DiscoveredNodeData * dis return false; } - return IsSameAs(discoveredNodeData->commissionData.deviceName, discoveredNodeData->resolutionData.numIPs, - discoveredNodeData->resolutionData.ipAddress); + return IsSameAs(discoveredNodeData->resolutionData.hostName, discoveredNodeData->commissionData.deviceName, + discoveredNodeData->resolutionData.numIPs, discoveredNodeData->resolutionData.ipAddress); } diff --git a/examples/tv-casting-app/tv-casting-common/tv-casting-app.matter b/examples/tv-casting-app/tv-casting-common/tv-casting-app.matter index da4468d774d6b4..ae80a0051bf8ca 100644 --- a/examples/tv-casting-app/tv-casting-common/tv-casting-app.matter +++ b/examples/tv-casting-app/tv-casting-common/tv-casting-app.matter @@ -799,7 +799,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -814,14 +814,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/tv-casting-app/tv-casting-common/tv-casting-app.zap b/examples/tv-casting-app/tv-casting-common/tv-casting-app.zap index 9783884a26b4b6..a4f8afd300aa3c 100644 --- a/examples/tv-casting-app/tv-casting-common/tv-casting-app.zap +++ b/examples/tv-casting-app/tv-casting-common/tv-casting-app.zap @@ -1722,7 +1722,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1738,7 +1738,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/examples/window-app/common/window-app.matter b/examples/window-app/common/window-app.matter index f4047a4373489c..76f47712d12acc 100644 --- a/examples/window-app/common/window-app.matter +++ b/examples/window-app/common/window-app.matter @@ -523,7 +523,7 @@ server cluster LocalizationConfiguration = 43 { } server cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -538,14 +538,14 @@ server cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/examples/window-app/common/window-app.zap b/examples/window-app/common/window-app.zap index b65392a74938d5..e58cbfacb7c805 100644 --- a/examples/window-app/common/window-app.zap +++ b/examples/window-app/common/window-app.zap @@ -2167,7 +2167,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -2183,7 +2183,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/integrations/cloudbuild/build-all.yaml b/integrations/cloudbuild/build-all.yaml index 7aa42e5a2cfa3c..38cb327f288191 100644 --- a/integrations/cloudbuild/build-all.yaml +++ b/integrations/cloudbuild/build-all.yaml @@ -6,7 +6,7 @@ steps: - "--init" - "--recursive" id: Submodules - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: @@ -21,7 +21,7 @@ steps: path: /pwenv timeout: 900s - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: @@ -76,7 +76,7 @@ steps: --target k32w-shell build --create-archives /workspace/artifacts/ - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: diff --git a/integrations/cloudbuild/chef.yaml b/integrations/cloudbuild/chef.yaml index ffd52ffbd23d05..48f556fcfc8adf 100644 --- a/integrations/cloudbuild/chef.yaml +++ b/integrations/cloudbuild/chef.yaml @@ -1,5 +1,5 @@ steps: - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: @@ -12,7 +12,7 @@ steps: path: /pwenv timeout: 2700s - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: @@ -26,7 +26,7 @@ steps: - name: pwenv path: /pwenv - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: diff --git a/integrations/cloudbuild/smoke-test.yaml b/integrations/cloudbuild/smoke-test.yaml index 1b27bc67f808cc..5b2a39ae9322a2 100644 --- a/integrations/cloudbuild/smoke-test.yaml +++ b/integrations/cloudbuild/smoke-test.yaml @@ -1,5 +1,5 @@ steps: - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" entrypoint: "bash" args: - "-c" @@ -7,7 +7,7 @@ steps: git config --global --add safe.directory "*" git submodule update --init --recursive id: Submodules - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" env: - PW_ENVIRONMENT_ROOT=/pwenv args: @@ -22,7 +22,7 @@ steps: path: /pwenv timeout: 900s - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" id: ESP32 env: - PW_ENVIRONMENT_ROOT=/pwenv @@ -40,7 +40,7 @@ steps: volumes: - name: pwenv path: /pwenv - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" id: NRFConnect env: - PW_ENVIRONMENT_ROOT=/pwenv @@ -61,7 +61,7 @@ steps: - name: pwenv path: /pwenv - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" id: EFR32 env: - PW_ENVIRONMENT_ROOT=/pwenv @@ -83,7 +83,7 @@ steps: - name: pwenv path: /pwenv - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" id: Linux env: - PW_ENVIRONMENT_ROOT=/pwenv @@ -143,7 +143,7 @@ steps: - name: pwenv path: /pwenv - - name: "connectedhomeip/chip-build-vscode:0.6.40" + - name: "connectedhomeip/chip-build-vscode:0.6.44" id: Android env: - PW_ENVIRONMENT_ROOT=/pwenv @@ -151,7 +151,7 @@ steps: - >- ./scripts/build/build_examples.py --enable-flashbundle --target 'android-arm64-chip-tool' - build + build --create-archives /workspace/artifacts/ waitFor: - Bootstrap diff --git a/integrations/docker/images/chip-build-java/Dockerfile b/integrations/docker/images/chip-build-java/Dockerfile new file mode 100644 index 00000000000000..8329fb5dabd05e --- /dev/null +++ b/integrations/docker/images/chip-build-java/Dockerfile @@ -0,0 +1,13 @@ +ARG VERSION=latest +FROM connectedhomeip/chip-build:${VERSION} + +# Download and install kotlin compiler +RUN set -x \ + && cd /usr/lib \ + && wget -q https://github.com/JetBrains/kotlin/releases/download/v1.8.10/kotlin-compiler-1.8.10.zip \ + && unzip kotlin-compiler-*.zip \ + && rm kotlin-compiler-*.zip \ + && rm -f kotlinc/bin/*.bat \ + && : # last line + +ENV PATH $PATH:/usr/lib/kotlinc/bin diff --git a/integrations/docker/images/chip-build-java/build.sh b/integrations/docker/images/chip-build-java/build.sh new file mode 120000 index 00000000000000..fcb4d4ee75d531 --- /dev/null +++ b/integrations/docker/images/chip-build-java/build.sh @@ -0,0 +1 @@ +../../build.sh \ No newline at end of file diff --git a/integrations/docker/images/chip-build-java/run.sh b/integrations/docker/images/chip-build-java/run.sh new file mode 120000 index 00000000000000..ccbd3501b330d9 --- /dev/null +++ b/integrations/docker/images/chip-build-java/run.sh @@ -0,0 +1 @@ +../../run.sh \ No newline at end of file diff --git a/integrations/docker/images/chip-build-java/version b/integrations/docker/images/chip-build-java/version new file mode 120000 index 00000000000000..a4280acd348e7f --- /dev/null +++ b/integrations/docker/images/chip-build-java/version @@ -0,0 +1 @@ +../chip-build/version \ No newline at end of file diff --git a/integrations/docker/images/chip-build-tizen-qemu/Dockerfile b/integrations/docker/images/chip-build-tizen-qemu/Dockerfile index fb88bf618fd50c..1c0ae65f843da5 100644 --- a/integrations/docker/images/chip-build-tizen-qemu/Dockerfile +++ b/integrations/docker/images/chip-build-tizen-qemu/Dockerfile @@ -16,6 +16,8 @@ RUN set -x \ && DEBIAN_FRONTEND=noninteractive apt-get install -fy --no-install-recommends \ bc \ genisoimage \ + libgmp-dev \ + libmpc-dev \ qemu-system-arm \ # Cleanup && apt-get clean \ @@ -32,13 +34,14 @@ COPY files/0001-smack-add-permissive-mode.patch $TIZEN_SDK_ROOT/files/ RUN set -x \ && mkdir -p /tmp/workdir && cd /tmp/workdir \ # Download Linux rpi4 kernel - && wget --progress=dot:giga -r -nd --no-parent -A 'rpi4-linux-kernel-*.src.rpm' \ - http://download.tizen.org/snapshots/tizen/$TIZEN_VERSION-unified/latest/repos/standard/source/ \ + && wget --progress=dot:giga -r -nd --no-parent -e robots=off -A 'rpi4-linux-kernel-*.src.rpm' \ + http://download.tizen.org/snapshots/TIZEN/Tizen-$TIZEN_VERSION/Tizen-$TIZEN_VERSION-Unified/latest/repos/standard/source/ \ # Prepare kernel source (Linux kernel + Tizen patchset) && unrpm rpi4-linux-kernel-*.src.rpm \ + && rm rpi4-linux-kernel-*.src.rpm \ && tar -xJf linux-kernel-*.tar.xz \ && rm linux-kernel-*.tar.xz \ - && cd linux-kernel-* \ + && cd *linux-kernel-* \ && zcat ../*-to-*.diff.gz | patch -p1 \ && patch -p1 < $TIZEN_SDK_ROOT/files/0001-smack-add-permissive-mode.patch \ # Compile @@ -66,8 +69,8 @@ RUN set -x \ && mkdir -p /tmp/workdir && cd /tmp/workdir \ && SYSTEMD_SYSTEM=/usr/lib/systemd/system \ # Download Tizen images - && wget --progress=dot:giga -r -nd --no-parent -A 'tizen-*.tar.gz' \ - http://download.tizen.org/snapshots/tizen/unified/latest/images/standard/tizen-headless-armv7l/ \ + && wget --progress=dot:giga -r -nd --no-parent -e robots=off -A 'tizen-*.tar.gz' \ + http://download.tizen.org/snapshots/TIZEN/Tizen-$TIZEN_VERSION/Tizen-$TIZEN_VERSION-Unified/latest/images/standard/tizen-headless-armv7l/ \ # Unpack && tar -xzf tizen-*.tar.gz \ && mv system-data.img $TIZEN_IOT_IMAGE_DATA \ @@ -80,6 +83,8 @@ RUN set -x \ && guestfish --rw -a $TIZEN_IOT_IMAGE_ROOT -m /dev/sda glob copy-in \ $TIZEN_SDK_TOOLCHAIN/arm-tizen-linux-gnueabi/lib/libasan.so.* \ $TIZEN_SDK_TOOLCHAIN/arm-tizen-linux-gnueabi/lib/libubsan.so.* \ + $TIZEN_SDK_SYSROOT/usr/lib/libbluetooth-api.so.* \ + $TIZEN_SDK_SYSROOT/usr/lib/libcapi-network-bluetooth.so.* \ $TIZEN_SDK_SYSROOT/usr/lib/libcapi-network-thread.so.* \ $TIZEN_SDK_SYSROOT/usr/lib/libnsd-dns-sd.so.* \ /usr/lib/ \ diff --git a/integrations/docker/images/chip-build-tizen/Dockerfile b/integrations/docker/images/chip-build-tizen/Dockerfile index f0334106b6cbbe..dfa4ddfb37f915 100644 --- a/integrations/docker/images/chip-build-tizen/Dockerfile +++ b/integrations/docker/images/chip-build-tizen/Dockerfile @@ -30,7 +30,7 @@ RUN set -x \ # ------------------------------------------------------------------------------ # Install tizen -ENV TIZEN_VERSION 6.0 +ENV TIZEN_VERSION 7.0 ENV TIZEN_SDK_ROOT /opt/tizen-sdk COPY tizen-sdk-installer $TIZEN_SDK_ROOT/files/installer diff --git a/integrations/docker/images/chip-build-tizen/tizen-sdk-installer/install.sh b/integrations/docker/images/chip-build-tizen/tizen-sdk-installer/install.sh index 99fd9c3334d153..d072e936b14a64 100755 --- a/integrations/docker/images/chip-build-tizen/tizen-sdk-installer/install.sh +++ b/integrations/docker/images/chip-build-tizen/tizen-sdk-installer/install.sh @@ -20,8 +20,8 @@ set -e # Default settings options TIZEN_SDK_ROOT=/opt/tizen-sdk -TIZEN_SDK_DATA_PATH=~/tizen-sdk-data -TIZEN_VERSION=6.0 +TIZEN_SDK_DATA_PATH=$HOME/tizen-sdk-data +TIZEN_VERSION=7.0 SECRET_TOOL=false SCRIPT_NAME=$(basename -- "$(readlink -f "${BASH_SOURCE:?}")") @@ -41,7 +41,7 @@ fi # Help display function function show_help() { echo "Usage: $SCRIPT_NAME [ options .. ]" - echo "Example: $SCRIPT_NAME --tizen-sdk-path ~/tizen-sdk --tizen-version 6.0 --install-dependencies" + echo "Example: $SCRIPT_NAME --tizen-sdk-path ~/tizen-sdk --tizen-version 7.0 --install-dependencies" echo echo "Options:" echo " -h, --help Display this information" @@ -92,7 +92,7 @@ function download() { for PKG in "${@:2}"; do PKGS+=("-A" "$PKG") done - wget -r -nd --no-parent --progress=dot:mega "${PKGS[@]}" "$1" + wget -r -nd --no-parent -e robots=off --progress=dot:mega "${PKGS[@]}" "$1" # Check if the files have been downloaded for PKG in "${@:2}"; do @@ -143,10 +143,10 @@ function install_tizen_sdk() { # Download URL="http://download.tizen.org/sdk/tizenstudio/official/binary/" PKG_ARR=( - 'certificate-encryptor_1.0.7_ubuntu-64.zip' + 'certificate-encryptor_1.0.10_ubuntu-64.zip' 'certificate-generator_0.1.3_ubuntu-64.zip' - 'new-common-cli_2.5.7_ubuntu-64.zip' - 'new-native-cli_2.5.7_ubuntu-64.zip' + 'new-common-cli_2.5.64_ubuntu-64.zip' + 'new-native-cli_2.5.64_ubuntu-64.zip' 'sdb_4.2.23_ubuntu-64.zip') download "$URL" "${PKG_ARR[@]}" @@ -172,7 +172,7 @@ function install_tizen_sdk() { download "$URL" "${PKG_ARR[@]}" # Base packages - URL="http://download.tizen.org/releases/milestone/tizen/base/latest/repos/standard/packages/armv7l/" + URL="http://download.tizen.org/releases/milestone/TIZEN/Tizen-$TIZEN_VERSION/Tizen-$TIZEN_VERSION-Base/latest/repos/standard/packages/armv7l/" PKG_ARR=( 'iniparser-*.armv7l.rpm' 'libblkid-devel-*.armv7l.rpm' @@ -189,8 +189,9 @@ function install_tizen_sdk() { download "$URL" "${PKG_ARR[@]}" # Unified packages - URL="http://download.tizen.org/releases/milestone/tizen/unified/latest/repos/standard/packages/armv7l/" + URL="http://download.tizen.org/releases/milestone/TIZEN/Tizen-$TIZEN_VERSION/Tizen-$TIZEN_VERSION-Unified/latest/repos/standard/packages/armv7l/" PKG_ARR=( + 'app-core-common-*.rpm' 'aul-0*.armv7l.rpm' 'aul-devel-*.armv7l.rpm' 'bundle-0*.armv7l.rpm' @@ -201,6 +202,8 @@ function install_tizen_sdk() { 'dbus-devel-*.armv7l.rpm' 'dbus-libs-1*.armv7l.rpm' 'glib2-devel-2*.armv7l.rpm' + 'hal-api-common-*.armv7l.rpm' + 'hal-api-sensor-*.armv7l.rpm' 'json-glib-devel-*.armv7l.rpm' 'libcynara-client-*.armv7l.rpm' 'libcynara-commons-*.armv7l.rpm' @@ -212,15 +215,21 @@ function install_tizen_sdk() { 'parcel-0*.armv7l.rpm' 'parcel-devel-*.armv7l.rpm' 'pkgmgr-info-*.armv7l.rpm' + 'sensord-devel-*.armv7l.rpm' + 'sensord-dummy-*.armv7l.rpm' 'vconf-compat-*.armv7l.rpm' 'vconf-internal-keys-devel-*.armv7l.rpm') download "$URL" "${PKG_ARR[@]}" # Unified packages (snapshots) - URL="http://download.tizen.org/snapshots/tizen/unified/latest/repos/standard/packages/armv7l/" + URL="http://download.tizen.org/snapshots/TIZEN/Tizen/Tizen-Unified/latest/repos/standard/packages/armv7l/" PKG_ARR=( + 'bluetooth-frwk-0*.armv7l.rpm' + 'capi-network-bluetooth-0*.armv7l.rpm' + 'capi-network-bluetooth-devel-*.armv7l.rpm' 'capi-network-nsd-*.armv7l.rpm' 'capi-network-thread-*.armv7l.rpm' + 'capi-system-resource-1*.armv7l.rpm' 'libnsd-dns-sd-*.armv7l.rpm') download "$URL" "${PKG_ARR[@]}" @@ -258,7 +267,7 @@ function install_tizen_sdk() { # Information on necessary environment variables warning "Before proceeding with Matter export environment variables as follows:" echo -n "$COLOR_YELLOW" - echo "export TIZEN_VESRSION=\"$TIZEN_VERSION\"" + echo "export TIZEN_VERSION=\"$TIZEN_VERSION\"" echo "export TIZEN_SDK_ROOT=\"$(realpath "$TIZEN_SDK_ROOT")\"" echo "export TIZEN_SDK_TOOLCHAIN=\"\$TIZEN_SDK_ROOT/tools/arm-linux-gnueabi-gcc-9.2\"" echo "export TIZEN_SDK_SYSROOT=\"\$TIZEN_SDK_ROOT/platforms/tizen-$TIZEN_VERSION/mobile/rootstraps/mobile-$TIZEN_VERSION-device.core\"" diff --git a/integrations/docker/images/chip-build-vscode/Dockerfile b/integrations/docker/images/chip-build-vscode/Dockerfile index 23c7a37058594e..5a8962b10aa4b6 100644 --- a/integrations/docker/images/chip-build-vscode/Dockerfile +++ b/integrations/docker/images/chip-build-vscode/Dockerfile @@ -61,6 +61,17 @@ RUN set -x \ && chmod -R a+w /opt/android/sdk/licenses \ && : # last line +# Download and install kotlin compiler +RUN set -x \ + && cd /usr/lib \ + && wget -q https://github.com/JetBrains/kotlin/releases/download/v1.8.10/kotlin-compiler-1.8.10.zip \ + && unzip kotlin-compiler-*.zip \ + && rm kotlin-compiler-*.zip \ + && rm -f kotlinc/bin/*.bat \ + && : # last line + +ENV PATH $PATH:/usr/lib/kotlinc/bin + # Required for the Tizen SDK RUN set -x \ && apt-get update \ @@ -107,7 +118,7 @@ ENV TI_SYSCONFIG_ROOT=/opt/ti/sysconfig_1.13.0 ENV ZEPHYR_BASE=/opt/NordicSemiconductor/nrfconnect/zephyr ENV ZEPHYR_TOOLCHAIN_VARIANT=gnuarmemb -ENV TIZEN_VERSION 6.0 +ENV TIZEN_VERSION 7.0 ENV TIZEN_SDK_ROOT /opt/tizen-sdk ENV TIZEN_SDK_TOOLCHAIN $TIZEN_SDK_ROOT/tools/arm-linux-gnueabi-gcc-9.2 ENV TIZEN_SDK_SYSROOT $TIZEN_SDK_ROOT/platforms/tizen-$TIZEN_VERSION/mobile/rootstraps/mobile-$TIZEN_VERSION-device.core diff --git a/integrations/docker/images/chip-build/version b/integrations/docker/images/chip-build/version index 17db6c29ba4e7a..28bc70ad831983 100644 --- a/integrations/docker/images/chip-build/version +++ b/integrations/docker/images/chip-build/version @@ -1 +1 @@ -0.6.41 Version bump reason: [K32W0] SDK 2.6.10 update +0.6.44 Version bump reason: [Java] update kotlin compiler version to 1.8.10 diff --git a/scripts/build/build/targets.py b/scripts/build/build/targets.py index 3b6e6b03805934..b7edc6d13dcc68 100755 --- a/scripts/build/build/targets.py +++ b/scripts/build/build/targets.py @@ -12,11 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import re -from itertools import combinations -from typing import Any, List, Optional - from builders.ameba import AmebaApp, AmebaBoard, AmebaBuilder from builders.android import AndroidApp, AndroidBoard, AndroidBuilder, AndroidProfile from builders.bouffalolab import BouffalolabApp, BouffalolabBoard, BouffalolabBuilder @@ -477,6 +472,7 @@ def BuildTizenTarget(): TargetPart('all-clusters-minimal', app=TizenApp.ALL_CLUSTERS_MINIMAL), TargetPart('chip-tool', app=TizenApp.CHIP_TOOL), TargetPart('light', app=TizenApp.LIGHT), + TargetPart('tests', app=TizenApp.TESTS), ]) target.AppendModifier(name="no-ble", enable_ble=False) @@ -556,6 +552,7 @@ def BuildTelinkTarget(): TargetPart('ota-requestor', app=TelinkApp.OTA_REQUESTOR), TargetPart('pump', app=TelinkApp.PUMP), TargetPart('pump-controller', app=TelinkApp.PUMP_CONTROLLER), + TargetPart('temperature-measurement', app=TelinkApp.TEMPERATURE_MEASUREMENT), TargetPart('thermostat', app=TelinkApp.THERMOSTAT), ]) diff --git a/scripts/build/builders/efr32.py b/scripts/build/builders/efr32.py index 5cf7ca2f93d22f..f4aa05fc762842 100644 --- a/scripts/build/builders/efr32.py +++ b/scripts/build/builders/efr32.py @@ -167,16 +167,16 @@ def __init__(self, if chip_build_libshell: self.extra_gn_options.append('chip_build_libshell=true') - if chip_logging == False: + if chip_logging is False: self.extra_gn_options.append('chip_logging=false') - if chip_openthread_ftd == False: + if chip_openthread_ftd is False: self.extra_gn_options.append('chip_openthread_ftd=false') if enable_heap_monitoring: self.extra_gn_options.append('enable_heap_monitoring=true') - if enable_openthread_cli == False: + if enable_openthread_cli is False: self.extra_gn_options.append('enable_openthread_cli=false') if show_qr_code: @@ -208,7 +208,8 @@ def __init__(self, if enable_ot_coap_lib: self.extra_gn_options.append( - 'use_silabs_thread_lib=true chip_openthread_target="../silabs:ot-efr32-cert" use_thread_coap_lib=true openthread_external_platform=""') + 'use_silabs_thread_lib=true chip_openthread_target="../silabs:ot-efr32-cert" ' + 'use_thread_coap_lib=true openthread_external_platform=""') if not no_version: shortCommitSha = subprocess.check_output( diff --git a/scripts/build/builders/esp32.py b/scripts/build/builders/esp32.py index 456ebc09c7f0c2..b6685e1af19895 100644 --- a/scripts/build/builders/esp32.py +++ b/scripts/build/builders/esp32.py @@ -100,7 +100,8 @@ def IsCompatible(self, board: Esp32Board): if board == Esp32Board.QEMU: return self == Esp32App.TESTS elif board == Esp32Board.M5Stack: - return self == Esp32App.ALL_CLUSTERS or self == Esp32App.ALL_CLUSTERS_MINIMAL or self == Esp32App.OTA_REQUESTOR or self == Esp32App.OTA_PROVIDER + return (self == Esp32App.ALL_CLUSTERS or self == Esp32App.ALL_CLUSTERS_MINIMAL or + self == Esp32App.OTA_REQUESTOR or self == Esp32App.OTA_PROVIDER) elif board == Esp32Board.C3DevKit: return self == Esp32App.ALL_CLUSTERS or self == Esp32App.ALL_CLUSTERS_MINIMAL else: @@ -244,5 +245,5 @@ def flashbundle(self): with open(os.path.join(self.output_dir, self.app.FlashBundleName), 'r') as fp: return { - l.strip(): os.path.join(self.output_dir, l.strip()) for l in fp.readlines() if l.strip() + line.strip(): os.path.join(self.output_dir, line.strip()) for line in fp.readlines() if line.strip() } diff --git a/scripts/build/builders/telink.py b/scripts/build/builders/telink.py index d4434019874ef5..348233d331baf9 100644 --- a/scripts/build/builders/telink.py +++ b/scripts/build/builders/telink.py @@ -30,6 +30,7 @@ class TelinkApp(Enum): OTA_REQUESTOR = auto() PUMP = auto() PUMP_CONTROLLER = auto() + TEMPERATURE_MEASUREMENT = auto() THERMOSTAT = auto() def ExampleName(self): @@ -51,6 +52,8 @@ def ExampleName(self): return 'pump-app' elif self == TelinkApp.PUMP_CONTROLLER: return 'pump-controller-app' + elif self == TelinkApp.TEMPERATURE_MEASUREMENT: + return 'temperature-measurement-app' elif self == TelinkApp.THERMOSTAT: return 'thermostat' else: @@ -75,6 +78,8 @@ def AppNamePrefix(self): return 'chip-telink-pump-example' elif self == TelinkApp.PUMP_CONTROLLER: return 'chip-telink-pump-controller-example' + elif self == TelinkApp.TEMPERATURE_MEASUREMENT: + return 'chip-telink-temperature-measurement-example' elif self == TelinkApp.THERMOSTAT: return 'chip-telink-thermostat-example' else: diff --git a/scripts/build/builders/tizen.py b/scripts/build/builders/tizen.py index 71ae18e9f1cce6..a8f9b3d497b5fb 100644 --- a/scripts/build/builders/tizen.py +++ b/scripts/build/builders/tizen.py @@ -20,9 +20,15 @@ from .gn import GnBuilder +Board = namedtuple('Board', ['target_cpu']) App = namedtuple('App', ['name', 'source', 'outputs']) Tool = namedtuple('Tool', ['name', 'source', 'outputs']) -Board = namedtuple('Board', ['target_cpu']) +TestDriver = namedtuple('TestDriver', ['name', 'source']) + + +class TizenBoard(Enum): + + ARM = Board('arm') class TizenApp(Enum): @@ -49,11 +55,19 @@ class TizenApp(Enum): ('chip-tool', 'chip-tool.map')) + TESTS = TestDriver( + 'tests', + 'src/test_driver/tizen') + @property def is_tpk(self): """If True, this app is a TPK.""" return isinstance(self.value, App) + @property + def package(self): + return f'{self.package_name}-{self.package_version}.tpk' + @property def package_name(self): return self.manifest.get('package') @@ -66,11 +80,6 @@ def parse_manifest(self, manifest: str): self.manifest = ET.parse(manifest).getroot() -class TizenBoard(Enum): - - ARM = Board('arm') - - class TizenBuilder(GnBuilder): def __init__(self, @@ -143,7 +152,8 @@ def build_outputs(self): def flashbundle(self): if not self.app.is_tpk: return {} - tpk = f'{self.app.package_name}-{self.app.package_version}.tpk' return { - tpk: os.path.join(self.output_dir, 'package', 'out', tpk), + self.app.package: os.path.join(self.output_dir, + self.app.package_name, 'out', + self.app.package), } diff --git a/scripts/build/test.py b/scripts/build/test.py index 8cf88b0571c44e..4156a898f153d7 100644 --- a/scripts/build/test.py +++ b/scripts/build/test.py @@ -26,8 +26,8 @@ def build_expected_output(source: str, root: str, out: str) -> List[str]: with open(os.path.join(SCRIPT_ROOT, source), 'rt') as f: - for l in f.readlines(): - yield l.replace("{root}", root).replace("{out}", out) + for line in f.readlines(): + yield line.replace("{root}", root).replace("{out}", out) def build_actual_output(root: str, out: str, args: List[str]) -> List[str]: @@ -57,7 +57,7 @@ def build_actual_output(root: str, out: str, args: List[str]) -> List[str]: '--out-prefix', out, ] + args, stdout=subprocess.PIPE, check=True, encoding='UTF-8', env=runenv) - result = [l + '\n' for l in retval.stdout.split('\n')] + result = [line + '\n' for line in retval.stdout.split('\n')] # ensure a single terminating newline: easier to edit since autoformat # often strips ending double newlines on text files @@ -73,22 +73,22 @@ def assertCommandOutput(self, expected_file: str, args: List[str]): ROOT = '/TEST/BUILD/ROOT' OUT = '/OUTPUT/DIR' - expected = [l for l in build_expected_output(expected_file, ROOT, OUT)] - actual = [l for l in build_actual_output(ROOT, OUT, args)] + expected = [line for line in build_expected_output(expected_file, ROOT, OUT)] + actual = [line for line in build_actual_output(ROOT, OUT, args)] diffs = [line for line in difflib.unified_diff(expected, actual)] if diffs: reference = os.path.basename(expected_file) + '.actual' with open(reference, 'wt') as fo: - for l in build_actual_output(ROOT, OUT, args): - fo.write(l.replace(ROOT, '{root}').replace(OUT, '{out}')) + for line in build_actual_output(ROOT, OUT, args): + fo.write(line.replace(ROOT, '{root}').replace(OUT, '{out}')) msg = "DIFFERENCE between expected and generated output in %s\n" % expected_file msg += "Expected file can be found in %s" % reference - for l in diffs: - msg += ("\n " + l.replace(ROOT, - '{root}').replace(OUT, '{out}').strip()) + for line in diffs: + msg += ("\n " + line.replace(ROOT, + '{root}').replace(OUT, '{out}').strip()) self.fail(msg) @unittest.skipUnless(sys.platform == 'linux', 'Build on linux test') diff --git a/scripts/build/testdata/all_targets_linux_x64.txt b/scripts/build/testdata/all_targets_linux_x64.txt index ae69c14420f112..f22e7d1820d549 100644 --- a/scripts/build/testdata/all_targets_linux_x64.txt +++ b/scripts/build/testdata/all_targets_linux_x64.txt @@ -18,6 +18,6 @@ mw320-all-clusters-app nrf-{nrf5340dk,nrf52840dk,nrf52840dongle}-{all-clusters,all-clusters-minimal,lock,light,light-switch,shell,pump,pump-controller,window-covering}[-rpc] nrf-native-posix-64-tests qpg-qpg6105-{lock,light,shell,persistent-storage} -tizen-arm-{all-clusters,all-clusters-minimal,chip-tool,light}[-no-ble][-no-wifi][-asan][-ubsan] -telink-tlsr9518adk80d-{all-clusters,all-clusters-minimal,contact-sensor,light,light-switch,lock,ota-requestor,pump,pump-controller,thermostat}[-rpc] +tizen-arm-{all-clusters,all-clusters-minimal,chip-tool,light,tests}[-no-ble][-no-wifi][-asan][-ubsan] +telink-tlsr9518adk80d-{all-clusters,all-clusters-minimal,contact-sensor,light,light-switch,lock,ota-requestor,pump,pump-controller,temperature-measurement,thermostat}[-rpc] openiotsdk-{shell,lock} diff --git a/scripts/examples/tests/test.py b/scripts/examples/tests/test.py index 2430e27ccd854e..4a4b537e27a655 100644 --- a/scripts/examples/tests/test.py +++ b/scripts/examples/tests/test.py @@ -27,9 +27,9 @@ def build_expected_output(root: str, out: str) -> List[str]: - with open(os.path.join(SCRIPT_ROOT, 'expected_test_cmakelists.txt'), 'rt') as f: - for l in f.readlines(): - yield l.replace("{root}", root).replace("{out}", out) + with open(os.path.join(SCRIPT_ROOT, 'expected_test_cmakelists.txt'), 'rt') as file: + for line in file.readlines(): + yield line.replace("{root}", root).replace("{out}", out) def build_actual_output(root: str, out: str) -> List[str]: @@ -37,14 +37,14 @@ def build_actual_output(root: str, out: str) -> List[str]: binary = os.path.join(SCRIPT_ROOT, '../gn_to_cmakelists.py') project = os.path.join(SCRIPT_ROOT, "test_project.json") cmake = os.path.join(SCRIPT_ROOT, "../../../out/CMakeLists.txt") - retval = subprocess.run([ + subprocess.run([ binary, project, ], stdout=subprocess.PIPE, check=True, encoding='UTF-8', ) with open(cmake, 'rt') as f: - for l in f.readlines(): - yield l + for line in f.readlines(): + yield line def main(): @@ -54,15 +54,15 @@ def main(): ROOT = '/TEST/BUILD/ROOT' OUT = '/OUTPUT/DIR' - expected = [l for l in build_expected_output(ROOT, OUT)] - actual = [l for l in build_actual_output(ROOT, OUT)] + expected = [line for line in build_expected_output(ROOT, OUT)] + actual = [line for line in build_actual_output(ROOT, OUT)] diffs = [line for line in difflib.unified_diff(expected, actual)] if diffs: logging.error("DIFFERENCE between expected and generated output") - for l in diffs: - logging.warning(" " + l.strip()) + for line in diffs: + logging.warning(" " + line.strip()) sys.exit(1) diff --git a/scripts/gen_test_driver.py b/scripts/gen_test_driver.py index 9ef28a833f1c57..8bf03bf05d2c84 100644 --- a/scripts/gen_test_driver.py +++ b/scripts/gen_test_driver.py @@ -1,4 +1,3 @@ - #!/usr/bin/env python # Copyright (c) 2020 Project CHIP Authors @@ -74,8 +73,8 @@ def main(argv): TEST_SUITE_RE = re.compile(r'\s*CHIP_REGISTER_TEST_SUITE\(([^)]*)\)') with open(options.input_file, 'r') as input_file: - for l in input_file.readlines(): - match = TEST_SUITE_RE.match(l) + for line in input_file.readlines(): + match = TEST_SUITE_RE.match(line) if not match: continue diff --git a/scripts/tests/chiptest/__init__.py b/scripts/tests/chiptest/__init__.py index fbfc79cf39b212..55e125c29bac19 100644 --- a/scripts/tests/chiptest/__init__.py +++ b/scripts/tests/chiptest/__init__.py @@ -22,7 +22,7 @@ from typing import Iterator, Set from . import linux, runner -from .test_definition import ApplicationPaths, TestDefinition, TestRunTime, TestTag, TestTarget +from .test_definition import ApplicationPaths, TestDefinition, TestTag, TestTarget _DEFAULT_CHIP_ROOT = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "..")) @@ -149,7 +149,13 @@ def _AllYamlTests(): def target_for_name(name: str): - if name.startswith("TV_") or name.startswith("Test_TC_MC_") or name.startswith("Test_TC_LOWPOWER_") or name.startswith("Test_TC_KEYPADINPUT_") or name.startswith("Test_TC_APPLAUNCHER_") or name.startswith("Test_TC_MEDIAINPUT_") or name.startswith("Test_TC_WAKEONLAN_") or name.startswith("Test_TC_CHANNEL_") or name.startswith("Test_TC_MEDIAPLAYBACK_") or name.startswith("Test_TC_AUDIOOUTPUT_") or name.startswith("Test_TC_TGTNAV_") or name.startswith("Test_TC_APBSC_") or name.startswith("Test_TC_CONTENTLAUNCHER_") or name.startswith("Test_TC_ALOGIN_"): + if (name.startswith("TV_") or name.startswith("Test_TC_MC_") or + name.startswith("Test_TC_LOWPOWER_") or name.startswith("Test_TC_KEYPADINPUT_") or + name.startswith("Test_TC_APPLAUNCHER_") or name.startswith("Test_TC_MEDIAINPUT_") or + name.startswith("Test_TC_WAKEONLAN_") or name.startswith("Test_TC_CHANNEL_") or + name.startswith("Test_TC_MEDIAPLAYBACK_") or name.startswith("Test_TC_AUDIOOUTPUT_") or + name.startswith("Test_TC_TGTNAV_") or name.startswith("Test_TC_APBSC_") or + name.startswith("Test_TC_CONTENTLAUNCHER_") or name.startswith("Test_TC_ALOGIN_")): return TestTarget.TV if name.startswith("DL_") or name.startswith("Test_TC_DRLK_"): return TestTarget.LOCK diff --git a/scripts/tests/chiptest/runner.py b/scripts/tests/chiptest/runner.py index 71cf0c7d5e433f..d2f3d96503210e 100644 --- a/scripts/tests/chiptest/runner.py +++ b/scripts/tests/chiptest/runner.py @@ -56,8 +56,8 @@ def CapturedLogContains(self, txt: str, index=0): return False, len(self.captured_logs) def FindLastMatchingLine(self, matcher): - for l in reversed(self.captured_logs): - match = re.match(matcher, l) + for line in reversed(self.captured_logs): + match = re.match(matcher, line) if match: return match return None diff --git a/scripts/tests/chiptest/test_definition.py b/scripts/tests/chiptest/test_definition.py index b9842647105ab5..bbce2428b619a9 100644 --- a/scripts/tests/chiptest/test_definition.py +++ b/scripts/tests/chiptest/test_definition.py @@ -15,14 +15,12 @@ import logging import os -import sys import threading import time import typing from dataclasses import dataclass, field from datetime import datetime from enum import Enum, auto -from random import randrange TEST_NODE_ID = '0x12344321' @@ -94,7 +92,7 @@ def wait(self, timeout=None): if self.killed: return 0 # If the App was never started, wait cannot be called on the process - if self.process == None: + if self.process is None: time.sleep(0.1) continue code = self.process.wait(timeout) @@ -169,7 +167,8 @@ class ApplicationPaths: chip_tool_with_python_cmd: typing.List[str] def items(self): - return [self.chip_tool, self.all_clusters_app, self.lock_app, self.ota_provider_app, self.ota_requestor_app, self.tv_app, self.bridge_app, self.chip_repl_yaml_tester_cmd, self.chip_tool_with_python_cmd] + return [self.chip_tool, self.all_clusters_app, self.lock_app, self.ota_provider_app, self.ota_requestor_app, + self.tv_app, self.bridge_app, self.chip_repl_yaml_tester_cmd, self.chip_tool_with_python_cmd] @dataclass @@ -253,7 +252,8 @@ def tags_str(self) -> str: """Get a human readable list of tags applied to this test""" return ", ".join([t.to_s() for t in self.tags]) - def Run(self, runner, apps_register, paths: ApplicationPaths, pics_file: str, timeout_seconds: typing.Optional[int], dry_run=False, test_runtime: TestRunTime = TestRunTime.CHIP_TOOL_BUILTIN): + def Run(self, runner, apps_register, paths: ApplicationPaths, pics_file: str, + timeout_seconds: typing.Optional[int], dry_run=False, test_runtime: TestRunTime = TestRunTime.CHIP_TOOL_BUILTIN): """ Executes the given test case using the provided runner for execution. """ diff --git a/scripts/tests/cirque_tests.sh b/scripts/tests/cirque_tests.sh index 07f243c8bc13ac..6f8f41215ce391 100755 --- a/scripts/tests/cirque_tests.sh +++ b/scripts/tests/cirque_tests.sh @@ -44,6 +44,7 @@ CIRQUE_TESTS=( "SplitCommissioningTest" "CommissioningFailureTest" "CommissioningFailureOnReportTest" + "PythonCommissioningTest" "CommissioningWindowTest" ) diff --git a/scripts/tests/java/base.py b/scripts/tests/java/base.py index ce0e04bbccceb7..f849c08d352f39 100755 --- a/scripts/tests/java/base.py +++ b/scripts/tests/java/base.py @@ -17,11 +17,8 @@ # limitations under the License. # -import asyncio import datetime # Commissioning test. -import logging -import os import queue import subprocess import sys @@ -39,7 +36,7 @@ def EnqueueLogOutput(fp, tag, q): try: timestamp = float(line[1:18].decode()) line = line[19:] - except Exception as ex: + except Exception: pass sys.stdout.buffer.write( (f"[{datetime.datetime.fromtimestamp(timestamp).isoformat(sep=' ')}]").encode() + tag + line) diff --git a/scripts/tests/java/commissioning_test.py b/scripts/tests/java/commissioning_test.py index c306afada822ce..138c9efa49065a 100755 --- a/scripts/tests/java/commissioning_test.py +++ b/scripts/tests/java/commissioning_test.py @@ -18,12 +18,9 @@ # import argparse -import asyncio import logging -import os import queue import subprocess -import sys import threading import typing @@ -46,7 +43,8 @@ def __init__(self, thread_list: typing.List[threading.Thread], queue: queue.Queu parser.add_argument('-s', '--setup-payload', dest='setup_payload', help="Setup Payload (manual pairing code or QR code content)") parser.add_argument('-c', '--setup-pin-code', dest='setup_pin_code', - help="Setup PIN code which can be used for password-authenticated session establishment (PASE) with the Commissionee") + help=("Setup PIN code which can be used for password-authenticated " + "session establishment (PASE) with the Commissionee")) parser.add_argument('-n', '--nodeid', help="The Node ID issued to the device", default='1') parser.add_argument('-d', '--discriminator', help="Discriminator of the device", default='3840') parser.add_argument('-u', '--paa-trust-store-path', dest='paa_trust_store_path', diff --git a/scripts/tests/java/discover_test.py b/scripts/tests/java/discover_test.py index 80c18d6c30e301..ed9fea7bc03c8a 100755 --- a/scripts/tests/java/discover_test.py +++ b/scripts/tests/java/discover_test.py @@ -18,12 +18,9 @@ # import argparse -import asyncio import logging -import os import queue import subprocess -import sys import threading import typing diff --git a/scripts/tests/run_java_test.py b/scripts/tests/run_java_test.py index 12d7f7c7b6c3cf..9205c3fec49322 100755 --- a/scripts/tests/run_java_test.py +++ b/scripts/tests/run_java_test.py @@ -16,8 +16,6 @@ import logging import os -import pathlib -import pty import queue import re import shlex @@ -34,12 +32,18 @@ @click.command() -@click.option("--app", type=click.Path(exists=True), default=None, help='Path to local application to use, omit to use external apps.') -@click.option("--app-args", type=str, default='', help='The extra arguments passed to the device.') -@click.option("--tool-path", type=click.Path(exists=True), default=None, help='Path to java-matter-controller.') -@click.option("--tool-cluster", type=str, default='pairing', help='The cluster name passed to the java-matter-controller.') -@click.option("--tool-args", type=str, default='', help='The arguments passed to the java-matter-controller.') -@click.option("--factoryreset", is_flag=True, help='Remove app configs (/tmp/chip*) before running the tests.') +@click.option("--app", type=click.Path(exists=True), default=None, + help='Path to local application to use, omit to use external apps.') +@click.option("--app-args", type=str, default='', + help='The extra arguments passed to the device.') +@click.option("--tool-path", type=click.Path(exists=True), default=None, + help='Path to java-matter-controller.') +@click.option("--tool-cluster", type=str, default='pairing', + help='The cluster name passed to the java-matter-controller.') +@click.option("--tool-args", type=str, default='', + help='The arguments passed to the java-matter-controller.') +@click.option("--factoryreset", is_flag=True, + help='Remove app configs (/tmp/chip*) before running the tests.') def main(app: str, app_args: str, tool_path: str, tool_cluster: str, tool_args: str, factoryreset: bool): logging.info("Execute: {script_command}") diff --git a/scripts/tests/run_python_test.py b/scripts/tests/run_python_test.py index 4318ee1221da35..87f6044c2a35f0 100755 --- a/scripts/tests/run_python_test.py +++ b/scripts/tests/run_python_test.py @@ -44,7 +44,7 @@ def EnqueueLogOutput(fp, tag, q): try: timestamp = float(line[1:18].decode()) line = line[19:] - except Exception as ex: + except Exception: pass sys.stdout.buffer.write( (f"[{datetime.datetime.fromtimestamp(timestamp).isoformat(sep=' ')}]").encode() + tag + line) @@ -67,12 +67,23 @@ def DumpProgramOutputToQueue(thread_list: typing.List[threading.Thread], tag: st @click.command() -@click.option("--app", type=click.Path(exists=True), default=None, help='Path to local application to use, omit to use external apps.') -@click.option("--factoryreset", is_flag=True, help='Remove app config and repl configs (/tmp/chip* and /tmp/repl*) before running the tests.') -@click.option("--app-args", type=str, default='', help='The extra arguments passed to the device.') -@click.option("--script", type=click.Path(exists=True), default=os.path.join(DEFAULT_CHIP_ROOT, 'src', 'controller', 'python', 'test', 'test_scripts', 'mobile-device-test.py'), help='Test script to use.') -@click.option("--script-args", type=str, default='', help='Path to the test script to use, omit to use the default test script (mobile-device-test.py).') -@click.option("--script-gdb", is_flag=True, help='Run script through gdb') +@click.option("--app", type=click.Path(exists=True), default=None, + help='Path to local application to use, omit to use external apps.') +@click.option("--factoryreset", is_flag=True, + help='Remove app config and repl configs (/tmp/chip* and /tmp/repl*) before running the tests.') +@click.option("--app-args", type=str, default='', + help='The extra arguments passed to the device.') +@click.option("--script", type=click.Path(exists=True), default=os.path.join(DEFAULT_CHIP_ROOT, + 'src', + 'controller', + 'python', + 'test', + 'test_scripts', + 'mobile-device-test.py'), help='Test script to use.') +@click.option("--script-args", type=str, default='', + help='Path to the test script to use, omit to use the default test script (mobile-device-test.py).') +@click.option("--script-gdb", is_flag=True, + help='Run script through gdb') def main(app: str, factoryreset: bool, app_args: str, script: str, script_args: str, script_gdb: bool): if factoryreset: # Remove native app config @@ -123,10 +134,12 @@ def main(app: str, factoryreset: bool, app_args: str, script: str, script_args: if script_gdb: # - # When running through Popen, we need to preserve some space-delimited args to GDB as a single logical argument. To do that, let's use '|' as a placeholder - # for the space character so that the initial split will not tokenize them, and then replace that with the space char there-after. + # When running through Popen, we need to preserve some space-delimited args to GDB as a single logical argument. + # To do that, let's use '|' as a placeholder for the space character so that the initial split will not tokenize them, + # and then replace that with the space char there-after. # - script_command = "gdb -batch -return-child-result -q -ex run -ex thread|apply|all|bt --args python3".split() + script_command + script_command = ("gdb -batch -return-child-result -q -ex run -ex " + "thread|apply|all|bt --args python3".split() + script_command) else: script_command = "/usr/bin/env python3".split() + script_command diff --git a/scripts/tests/run_test_suite.py b/scripts/tests/run_test_suite.py index 357148da8a35b5..3b70bd81d7cffe 100755 --- a/scripts/tests/run_test_suite.py +++ b/scripts/tests/run_test_suite.py @@ -18,11 +18,9 @@ import logging import os import sys -import tempfile import time import typing from dataclasses import dataclass, field -from pathlib import Path import chiptest import click @@ -30,9 +28,7 @@ from chiptest.accessories import AppsRegister from chiptest.glob_matcher import GlobMatcher from chiptest.test_definition import TestRunTime, TestTag -from diskcache import Cache - -cache = Cache(os.path.join(tempfile.gettempdir(), 'yaml_runner_cache')) +from yaml.paths_finder import PathsFinder DEFAULT_CHIP_ROOT = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) @@ -44,26 +40,6 @@ class ManualHandling(enum.Enum): ONLY = enum.auto() -def FindBinaryPath(name: str): - binary_path = cache.get(name) - if binary_path: - if Path(binary_path).is_file(): - return binary_path - else: - del cache[name] - - start = time.time() - for path in Path(DEFAULT_CHIP_ROOT).rglob(name): - if not path.is_file(): - continue - if path.name != name: - continue - cache[name] = str(path) - return str(path) - - return 'NOT_FOUND_IN_OUTPUT_' + name - - # Supported log levels, mapping string values required for argument # parsing into logging constants __LOG_LEVELS__ = { @@ -170,7 +146,8 @@ def main(context, dry_run, log_level, target, target_glob, target_skip_glob, if chip_tool is None and not runtime == TestRunTime.CHIP_REPL_PYTHON: # non yaml tests REQUIRE chip-tool. Yaml tests should not require chip-tool - chip_tool = FindBinaryPath('chip-tool') + paths_finder = PathsFinder() + chip_tool = paths_finder.get('chip-tool') if include_tags: include_tags = set([TestTag.__members__[t] for t in include_tags]) @@ -289,32 +266,35 @@ def cmd_list(context): type=int, help='If provided, fail if a test runs for longer than this time') @click.pass_context -def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, ota_requestor_app, tv_app, bridge_app, chip_repl_yaml_tester, chip_tool_with_python, pics_file, keep_going, test_timeout_seconds): +def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, ota_requestor_app, + tv_app, bridge_app, chip_repl_yaml_tester, chip_tool_with_python, pics_file, keep_going, test_timeout_seconds): runner = chiptest.runner.Runner() + paths_finder = PathsFinder() + if all_clusters_app is None: - all_clusters_app = FindBinaryPath('chip-all-clusters-app') + all_clusters_app = paths_finder.get('chip-all-clusters-app') if lock_app is None: - lock_app = FindBinaryPath('chip-lock-app') + lock_app = paths_finder.get('chip-lock-app') if ota_provider_app is None: - ota_provider_app = FindBinaryPath('chip-ota-provider-app') + ota_provider_app = paths_finder.get('chip-ota-provider-app') if ota_requestor_app is None: - ota_requestor_app = FindBinaryPath('chip-ota-requestor-app') + ota_requestor_app = paths_finder.get('chip-ota-requestor-app') if tv_app is None: - tv_app = FindBinaryPath('chip-tv-app') + tv_app = paths_finder.get('chip-tv-app') if bridge_app is None: - bridge_app = FindBinaryPath('chip-bridge-app') + bridge_app = paths_finder.get('chip-bridge-app') if chip_repl_yaml_tester is None: - chip_repl_yaml_tester = FindBinaryPath('yamltest_with_chip_repl_tester.py') + chip_repl_yaml_tester = paths_finder.get('yamltest_with_chip_repl_tester.py') if chip_tool_with_python is None: - chip_tool_with_python = FindBinaryPath('chiptool.py') + chip_tool_with_python = paths_finder.get('chiptool.py') # Command execution requires an array paths = chiptest.ApplicationPaths( diff --git a/scripts/tools/generate_esp32_chip_factory_bin.py b/scripts/tools/generate_esp32_chip_factory_bin.py index 69ee282cf83aa9..a88e999d1010ea 100755 --- a/scripts/tools/generate_esp32_chip_factory_bin.py +++ b/scripts/tools/generate_esp32_chip_factory_bin.py @@ -21,8 +21,6 @@ import enum import logging import os -import shutil -import subprocess import sys from types import SimpleNamespace @@ -198,7 +196,7 @@ def calendar_types_to_uint32(calendar_types): def ishex(s): try: - n = int(s, 16) + _ = int(s, 16) return True except ValueError: return False @@ -464,11 +462,13 @@ def any_base_int(s): return int(s, 0) parser.add_argument('--mfg-date', help='Manufacturing date in format YYYY-MM-DD') parser.add_argument('--serial-num', help='Serial number') parser.add_argument('--rd-id-uid', - help='128-bit unique identifier for generating rotating device identifier, provide 32-byte hex string, e.g. "1234567890abcdef1234567890abcdef"') + help=('128-bit unique identifier for generating rotating device identifier, ' + 'provide 32-byte hex string, e.g. "1234567890abcdef1234567890abcdef"')) # These will be used by DeviceInfoProvider parser.add_argument('--calendar-types', nargs='+', - help='List of supported calendar types.\nSupported Calendar Types: Buddhist, Chinese, Coptic, Ethiopian, Gregorian, Hebrew, Indian, Islamic, Japanese, Korean, Persian, Taiwanese') + help=('List of supported calendar types.\nSupported Calendar Types: Buddhist, Chinese, Coptic, Ethiopian, ' + 'Gregorian, Hebrew, Indian, Islamic, Japanese, Korean, Persian, Taiwanese')) parser.add_argument('--locales', nargs='+', help='List of supported locales, Language Tag as defined by BCP47, eg. en-US en-GB') parser.add_argument('--fixed-labels', nargs='+', help='List of fixed labels, eg: "0/orientation/up" "1/orientation/down" "2/orientation/down"') diff --git a/scripts/tools/nxp/factory_data_generator/custom.py b/scripts/tools/nxp/factory_data_generator/custom.py index e272921d81fc7a..13b1a34fc98c12 100644 --- a/scripts/tools/nxp/factory_data_generator/custom.py +++ b/scripts/tools/nxp/factory_data_generator/custom.py @@ -49,7 +49,7 @@ def custom_function(self): from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import load_der_private_key -from default import * +from default import Base64Argument, FileArgument, IntArgument, StrArgument class Verifier(Base64Argument): diff --git a/scripts/tools/nxp/factory_data_generator/default.py b/scripts/tools/nxp/factory_data_generator/default.py index c2c3b39d615bc2..13dc0866aaed40 100644 --- a/scripts/tools/nxp/factory_data_generator/default.py +++ b/scripts/tools/nxp/factory_data_generator/default.py @@ -29,7 +29,6 @@ import base64 import logging -import sys class InputArgument: diff --git a/scripts/tools/nxp/factory_data_generator/generate.py b/scripts/tools/nxp/factory_data_generator/generate.py index 24beb361a31c13..daf1b5d1267cb7 100755 --- a/scripts/tools/nxp/factory_data_generator/generate.py +++ b/scripts/tools/nxp/factory_data_generator/generate.py @@ -19,11 +19,12 @@ import argparse import hashlib import logging -import os import subprocess import sys -from custom import * +from custom import (CertDeclaration, DacCert, DacPKey, Discriminator, HardwareVersion, HardwareVersionStr, IterationCount, + ManufacturingDate, PaiCert, PartNumber, ProductId, ProductLabel, ProductName, ProductURL, Salt, SerialNum, + SetupPasscode, StrArgument, UniqueId, VendorId, VendorName, Verifier) from default import InputArgument diff --git a/scripts/tools/zap/test_generate.py b/scripts/tools/zap/test_generate.py index f6ea2ab3d5abfb..ff7ea2c20e74c2 100755 --- a/scripts/tools/zap/test_generate.py +++ b/scripts/tools/zap/test_generate.py @@ -14,7 +14,6 @@ # limitations under the License. import glob -import logging import os import shutil import subprocess @@ -116,7 +115,7 @@ def run_test_cases(self, checker: unittest.TestCase): try: subprocess.check_call(["diff", actual, expected]) - except: + except subprocess.CalledProcessError: if self.context.regenerate_golden: print( f"Copying updated golden image from {actual} to {expected}") diff --git a/scripts/tools/zap/tests/inputs/all-clusters-app.zap b/scripts/tools/zap/tests/inputs/all-clusters-app.zap index 6147c36f3d2698..c8116d751b694b 100644 --- a/scripts/tools/zap/tests/inputs/all-clusters-app.zap +++ b/scripts/tools/zap/tests/inputs/all-clusters-app.zap @@ -1977,7 +1977,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1993,7 +1993,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/scripts/tools/zap/tests/inputs/lighting-app.zap b/scripts/tools/zap/tests/inputs/lighting-app.zap index 9289a1f57ad3a8..111412a75256c4 100644 --- a/scripts/tools/zap/tests/inputs/lighting-app.zap +++ b/scripts/tools/zap/tests/inputs/lighting-app.zap @@ -1971,7 +1971,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "NVM", "singleton": 0, @@ -1987,7 +1987,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "NVM", "singleton": 0, diff --git a/src/app/EventLoggingTypes.h b/src/app/EventLoggingTypes.h index 3a2de1a6f4c974..351774256d2a45 100644 --- a/src/app/EventLoggingTypes.h +++ b/src/app/EventLoggingTypes.h @@ -95,7 +95,7 @@ static_assert(sizeof(std::underlying_type_t) <= sizeof(unsigned), */ struct Timestamp { - enum class Type + enum class Type : uint8_t { kSystem = 0, kEpoch diff --git a/src/app/MessageDef/AttributePathIB.cpp b/src/app/MessageDef/AttributePathIB.cpp index bc79aa1c170789..90d6ea5a6225b2 100644 --- a/src/app/MessageDef/AttributePathIB.cpp +++ b/src/app/MessageDef/AttributePathIB.cpp @@ -171,8 +171,11 @@ CHIP_ERROR AttributePathIB::Parser::GetListIndex(DataModel::Nullable return GetNullableUnsignedInteger(to_underlying(Tag::kListIndex), apListIndex); } -CHIP_ERROR AttributePathIB::Parser::GetListIndex(ConcreteDataAttributePath & aAttributePath) const +CHIP_ERROR AttributePathIB::Parser::GetGroupAttributePath(ConcreteDataAttributePath & aAttributePath) const { + ReturnErrorOnFailure(GetCluster(&aAttributePath.mClusterId)); + ReturnErrorOnFailure(GetAttribute(&aAttributePath.mAttributeId)); + CHIP_ERROR err = CHIP_NO_ERROR; DataModel::Nullable listIndex; err = GetListIndex(&(listIndex)); @@ -198,6 +201,14 @@ CHIP_ERROR AttributePathIB::Parser::GetListIndex(ConcreteDataAttributePath & aAt return err; } +CHIP_ERROR AttributePathIB::Parser::GetConcreteAttributePath(ConcreteDataAttributePath & aAttributePath) const +{ + ReturnErrorOnFailure(GetGroupAttributePath(aAttributePath)); + + // And now read our endpoint. + return GetEndpoint(&aAttributePath.mEndpointId); +} + CHIP_ERROR AttributePathIB::Parser::ParsePath(AttributePathParams & aAttribute) const { CHIP_ERROR err = CHIP_NO_ERROR; diff --git a/src/app/MessageDef/AttributePathIB.h b/src/app/MessageDef/AttributePathIB.h index 0f7fb623ae44ea..906fd319a4113f 100644 --- a/src/app/MessageDef/AttributePathIB.h +++ b/src/app/MessageDef/AttributePathIB.h @@ -131,14 +131,27 @@ class Parser : public ListParser CHIP_ERROR GetListIndex(DataModel::Nullable * const apListIndex) const; /** - * @brief Get the ListIndex, and set the mListIndex and mListOp fields in the ConcreteDataAttributePath accordingly. It will set - * ListOp to NotList when the list index is missing, users should interpret it as ReplaceAll according to the context. + * @brief Get the concrete attribute path. This will set the ListOp to + * NotList when there is no ListIndex. Consumers should interpret NotList + * as ReplaceAll if that's appropriate to their context. * - * @param [in] aAttributePath The attribute path object for setting list index and list op. + * @param [in] aAttributePath The attribute path object to write to. * * @return #CHIP_NO_ERROR on success */ - CHIP_ERROR GetListIndex(ConcreteDataAttributePath & aAttributePath) const; + CHIP_ERROR GetConcreteAttributePath(ConcreteDataAttributePath & aAttributePath) const; + + /** + * @brief Get a group attribute path. This will set the ListOp to + * NotList when there is no ListIndex. Consumers should interpret NotList + * as ReplaceAll if that's appropriate to their context. The + * endpoint id of the resulting path might have any value. + * + * @param [in] aAttributePath The attribute path object to write to. + * + * @return #CHIP_NO_ERROR on success + */ + CHIP_ERROR GetGroupAttributePath(ConcreteDataAttributePath & aAttributePath) const; // TODO(#14934) Add a function to get ConcreteDataAttributePath from AttributePathIB::Parser directly. diff --git a/src/app/ReadClient.cpp b/src/app/ReadClient.cpp index 5a24bbbefb1d4f..cbd131f288c3b0 100644 --- a/src/app/ReadClient.cpp +++ b/src/app/ReadClient.cpp @@ -639,13 +639,7 @@ CHIP_ERROR ReadClient::ProcessAttributePath(AttributePathIB::Parser & aAttribute { CHIP_ERROR err = CHIP_NO_ERROR; // The ReportData must contain a concrete attribute path - err = aAttributePathParser.GetEndpoint(&(aAttributePath.mEndpointId)); - VerifyOrReturnError(err == CHIP_NO_ERROR, CHIP_ERROR_IM_MALFORMED_ATTRIBUTE_PATH_IB); - err = aAttributePathParser.GetCluster(&(aAttributePath.mClusterId)); - VerifyOrReturnError(err == CHIP_NO_ERROR, CHIP_ERROR_IM_MALFORMED_ATTRIBUTE_PATH_IB); - err = aAttributePathParser.GetAttribute(&(aAttributePath.mAttributeId)); - VerifyOrReturnError(err == CHIP_NO_ERROR, CHIP_ERROR_IM_MALFORMED_ATTRIBUTE_PATH_IB); - err = aAttributePathParser.GetListIndex(aAttributePath); + err = aAttributePathParser.GetConcreteAttributePath(aAttributePath); VerifyOrReturnError(err == CHIP_NO_ERROR, CHIP_ERROR_IM_MALFORMED_ATTRIBUTE_PATH_IB); return CHIP_NO_ERROR; } diff --git a/src/app/WriteClient.cpp b/src/app/WriteClient.cpp index c29242d7a146fa..42992f42e8ee50 100644 --- a/src/app/WriteClient.cpp +++ b/src/app/WriteClient.cpp @@ -521,13 +521,7 @@ CHIP_ERROR WriteClient::ProcessAttributeStatusIB(AttributeStatusIB::Parser & aAt err = aAttributeStatusIB.GetPath(&attributePathParser); SuccessOrExit(err); - err = attributePathParser.GetCluster(&(attributePath.mClusterId)); - SuccessOrExit(err); - err = attributePathParser.GetEndpoint(&(attributePath.mEndpointId)); - SuccessOrExit(err); - err = attributePathParser.GetAttribute(&(attributePath.mAttributeId)); - SuccessOrExit(err); - err = attributePathParser.GetListIndex(attributePath); + err = attributePathParser.GetConcreteAttributePath(attributePath); SuccessOrExit(err); err = aAttributeStatusIB.GetErrorStatus(&(StatusIBParser)); diff --git a/src/app/WriteHandler.cpp b/src/app/WriteHandler.cpp index 1e5343432a3709..b4dca470a33219 100644 --- a/src/app/WriteHandler.cpp +++ b/src/app/WriteHandler.cpp @@ -292,16 +292,7 @@ CHIP_ERROR WriteHandler::ProcessAttributeDataIBs(TLV::TLVReader & aAttributeData err = element.GetPath(&attributePath); SuccessOrExit(err); - err = attributePath.GetEndpoint(&(dataAttributePath.mEndpointId)); - SuccessOrExit(err); - - err = attributePath.GetCluster(&(dataAttributePath.mClusterId)); - SuccessOrExit(err); - - err = attributePath.GetAttribute(&(dataAttributePath.mAttributeId)); - SuccessOrExit(err); - - err = attributePath.GetListIndex(dataAttributePath); + err = attributePath.GetConcreteAttributePath(dataAttributePath); SuccessOrExit(err); err = element.GetData(&dataReader); @@ -407,13 +398,7 @@ CHIP_ERROR WriteHandler::ProcessGroupAttributeDataIBs(TLV::TLVReader & aAttribut err = element.GetPath(&attributePath); SuccessOrExit(err); - err = attributePath.GetCluster(&(dataAttributePath.mClusterId)); - SuccessOrExit(err); - - err = attributePath.GetAttribute(&(dataAttributePath.mAttributeId)); - SuccessOrExit(err); - - err = attributePath.GetListIndex(dataAttributePath); + err = attributePath.GetGroupAttributePath(dataAttributePath); SuccessOrExit(err); err = element.GetData(&dataReader); diff --git a/src/app/clusters/level-control/level-control.cpp b/src/app/clusters/level-control/level-control.cpp index 8ad6844775226a..77242ea554f39d 100644 --- a/src/app/clusters/level-control/level-control.cpp +++ b/src/app/clusters/level-control/level-control.cpp @@ -1223,9 +1223,7 @@ void emberAfLevelControlClusterServerInitCallback(EndpointId endpoint) Attributes::MinLevel::Get(endpoint, &state->minLevel); Attributes::MaxLevel::Get(endpoint, &state->maxLevel); - uint32_t featureMap; - if (Attributes::FeatureMap::Get(endpoint, &featureMap) == EMBER_ZCL_STATUS_SUCCESS && - READBITS(featureMap, EMBER_AF_LEVEL_CONTROL_FEATURE_LIGHTING)) + if (LevelControlHasFeature(endpoint, LevelControlFeature::kLighting)) { if (state->minLevel < LEVEL_CONTROL_LIGHTING_MIN_LEVEL) { diff --git a/src/app/clusters/time-format-localization-server/time-format-localization-server.cpp b/src/app/clusters/time-format-localization-server/time-format-localization-server.cpp index 6f792a16d8be21..f4eb458a0a8313 100644 --- a/src/app/clusters/time-format-localization-server/time-format-localization-server.cpp +++ b/src/app/clusters/time-format-localization-server/time-format-localization-server.cpp @@ -67,7 +67,7 @@ CHIP_ERROR TimeFormatLocalizationAttrAccess::ReadSupportedCalendarTypes(Attribut if (it) { err = aEncoder.EncodeList([&it](const auto & encoder) -> CHIP_ERROR { - CalendarType type; + CalendarTypeEnum type; while (it->Next(type)) { @@ -108,10 +108,10 @@ CHIP_ERROR TimeFormatLocalizationAttrAccess::Read(const ConcreteReadAttributePat // Returns whether newType is a valid calendar type. If it's not, validType is set to a valid calendar type, // if there are any, and to kBuddhist if there are not. -bool IsSupportedCalendarType(CalendarType newType, CalendarType & validType) +bool IsSupportedCalendarType(CalendarTypeEnum newType, CalendarTypeEnum & validType) { // Reset valid type if no supported calendar types found. - validType = CalendarType::kBuddhist; + validType = CalendarTypeEnum::kBuddhist; DeviceLayer::DeviceInfoProvider * provider = DeviceLayer::GetDeviceInfoProvider(); @@ -121,7 +121,7 @@ bool IsSupportedCalendarType(CalendarType newType, CalendarType & validType) if (it) { - CalendarType type; + CalendarTypeEnum type; while (it->Next(type)) { @@ -148,10 +148,10 @@ bool IsSupportedCalendarType(CalendarType newType, CalendarType & validType) // ============================================================================= static Protocols::InteractionModel::Status emberAfPluginTimeFormatLocalizationOnCalendarTypeChange(EndpointId EndpointId, - CalendarType newType) + CalendarTypeEnum newType) { Protocols::InteractionModel::Status res; - CalendarType validType = CalendarType::kBuddhist; + CalendarTypeEnum validType = CalendarTypeEnum::kBuddhist; if (IsSupportedCalendarType(newType, validType)) { @@ -184,7 +184,7 @@ Protocols::InteractionModel::Status MatterTimeFormatLocalizationClusterServerPre if (sizeof(uint8_t) == size) { res = emberAfPluginTimeFormatLocalizationOnCalendarTypeChange(attributePath.mEndpointId, - static_cast(*value)); + static_cast(*value)); } else { @@ -202,8 +202,8 @@ Protocols::InteractionModel::Status MatterTimeFormatLocalizationClusterServerPre void emberAfTimeFormatLocalizationClusterServerInitCallback(EndpointId endpoint) { - CalendarType calendarType; - CalendarType validType; + CalendarTypeEnum calendarType; + CalendarTypeEnum validType; EmberAfStatus status = ActiveCalendarType::Get(endpoint, &calendarType); VerifyOrReturn(EMBER_ZCL_STATUS_SUCCESS == status, diff --git a/src/app/common/templates/config-data.yaml b/src/app/common/templates/config-data.yaml index 350237e753a46e..c30b43d7267ed2 100644 --- a/src/app/common/templates/config-data.yaml +++ b/src/app/common/templates/config-data.yaml @@ -7,8 +7,6 @@ WeakEnums: - ColorLoopAction - ColorLoopDirection - ColorMode - - ContentLaunchStatus - - ContentLaunchStreamingType - EnhancedColorMode - HardwareFaultEnum - HueDirection @@ -18,7 +16,6 @@ WeakEnums: - IdentifyEffectVariant - IdentifyIdentifyType - InterfaceTypeEnum - - LevelControlOptions - MoveMode - NetworkFaultEnum - PHYRateEnum @@ -34,7 +31,6 @@ DefineBitmaps: - BarrierControlCapabilities - BarrierControlSafetyStatus - ColorLoopUpdateFlags - - LevelControlFeature # We need a more configurable way of deciding which clusters have which init functions.... # See https://github.com/project-chip/connectedhomeip/issues/4369 diff --git a/src/app/reporting/Engine.cpp b/src/app/reporting/Engine.cpp index 96d55fdaeeebf8..89c09e697ed862 100644 --- a/src/app/reporting/Engine.cpp +++ b/src/app/reporting/Engine.cpp @@ -584,7 +584,7 @@ void Engine::Run(System::Layer * aSystemLayer, void * apAppState) CHIP_ERROR Engine::ScheduleRun() { - if (mRunScheduled) + if (IsRunScheduled()) { return CHIP_NO_ERROR; } diff --git a/src/app/reporting/Engine.h b/src/app/reporting/Engine.h index e0f81f4474397c..5a8638185549f3 100644 --- a/src/app/reporting/Engine.h +++ b/src/app/reporting/Engine.h @@ -143,6 +143,8 @@ class Engine friend class TestReportingEngine; friend class ::chip::app::TestReadInteraction; + bool IsRunScheduled() const { return mRunScheduled; } + struct AttributePathParamsWithGeneration : public AttributePathParams { AttributePathParamsWithGeneration() {} diff --git a/src/app/server/CommissioningWindowManager.cpp b/src/app/server/CommissioningWindowManager.cpp index a899179c85e545..3ddde11a931acb 100644 --- a/src/app/server/CommissioningWindowManager.cpp +++ b/src/app/server/CommissioningWindowManager.cpp @@ -239,7 +239,7 @@ CHIP_ERROR CommissioningWindowManager::AdvertiseAndListenForPASE() #endif ReturnErrorOnFailure(mServer->GetExchangeManager().RegisterUnsolicitedMessageHandlerForType( - Protocols::SecureChannel::MsgType::PBKDFParamRequest, &mPairingSession)); + Protocols::SecureChannel::MsgType::PBKDFParamRequest, this)); mListeningForPASE = true; if (mUseECM) @@ -584,4 +584,32 @@ void CommissioningWindowManager::UpdateOpenerFabricIndex(Nullable a mOpenerFabricIndex = aNewOpenerFabricIndex; } +CHIP_ERROR CommissioningWindowManager::OnUnsolicitedMessageReceived(const PayloadHeader & payloadHeader, + Messaging::ExchangeDelegate *& newDelegate) +{ + using Protocols::SecureChannel::MsgType; + + // Must be a PBKDFParamRequest message. Stop listening to new + // PBKDFParamRequest messages and hand it off to mPairingSession. If + // mPairingSession's OnMessageReceived fails, it will call our + // OnSessionEstablishmentError, and that will either start listening for a + // new PBKDFParamRequest or not, depending on how many failures we had seen. + // + // It's very important that we stop listening here, so that new incoming + // PASE establishment attempts don't interrupt our existing establishment. + mServer->GetExchangeManager().UnregisterUnsolicitedMessageHandlerForType(MsgType::PBKDFParamRequest); + newDelegate = &mPairingSession; + return CHIP_NO_ERROR; +} + +void CommissioningWindowManager::OnExchangeCreationFailed(Messaging::ExchangeDelegate * delegate) +{ + using Protocols::SecureChannel::MsgType; + + // We couldn't create an exchange, so didn't manage to call + // OnMessageReceived on mPairingSession. Just go back to listening for + // PBKDFParamRequest messages. + mServer->GetExchangeManager().RegisterUnsolicitedMessageHandlerForType(MsgType::PBKDFParamRequest, this); +} + } // namespace chip diff --git a/src/app/server/CommissioningWindowManager.h b/src/app/server/CommissioningWindowManager.h index cfd13eecb06c11..cc2eda2ea29199 100644 --- a/src/app/server/CommissioningWindowManager.h +++ b/src/app/server/CommissioningWindowManager.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,8 @@ enum class CommissioningWindowAdvertisement class Server; -class CommissioningWindowManager : public SessionEstablishmentDelegate, +class CommissioningWindowManager : public Messaging::UnsolicitedMessageHandler, + public SessionEstablishmentDelegate, public app::CommissioningModeProvider, public SessionDelegate { @@ -104,6 +106,11 @@ class CommissioningWindowManager : public SessionEstablishmentDelegate, // CommissioningModeProvider implementation. Dnssd::CommissioningMode GetCommissioningMode() const override; + //// UnsolicitedMessageHandler Implementation //// + CHIP_ERROR OnUnsolicitedMessageReceived(const PayloadHeader & payloadHeader, + Messaging::ExchangeDelegate *& newDelegate) override; + void OnExchangeCreationFailed(Messaging::ExchangeDelegate * delegate) override; + //////////// SessionEstablishmentDelegate Implementation /////////////// void OnSessionEstablishmentError(CHIP_ERROR error) override; void OnSessionEstablishmentStarted() override; @@ -195,7 +202,8 @@ class CommissioningWindowManager : public SessionEstablishmentDelegate, Spake2pVerifier mECMPASEVerifier; uint16_t mECMDiscriminator = 0; // mListeningForPASE is true only when we are listening for - // PBKDFParamRequest messages. + // PBKDFParamRequest messages or when we're in the middle of a PASE + // handshake. bool mListeningForPASE = false; // Boolean that tracks whether we have a live commissioning timeout timer. bool mCommissioningTimeoutTimerArmed = false; diff --git a/src/app/tests/TestReadInteraction.cpp b/src/app/tests/TestReadInteraction.cpp index ee526f4fd6ce5e..b7f0d2b3df0e0e 100644 --- a/src/app/tests/TestReadInteraction.cpp +++ b/src/app/tests/TestReadInteraction.cpp @@ -1556,7 +1556,7 @@ void TestReadInteraction::TestSubscribeRoundtrip(nlTestSuite * apSuite, void * a dirtyPath5.mAttributeId = 4; // Test report with 2 different path - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mGotEventResponse = false; delegate.mNumAttributeResponse = 0; @@ -1573,7 +1573,7 @@ void TestReadInteraction::TestSubscribeRoundtrip(nlTestSuite * apSuite, void * a NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 2); // Test report with 2 different path, and 1 same path - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; err = engine->GetReportingEngine().SetDirty(dirtyPath1); @@ -1589,7 +1589,7 @@ void TestReadInteraction::TestSubscribeRoundtrip(nlTestSuite * apSuite, void * a NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 2); // Test report with 3 different path, and one path is overlapped with another - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; err = engine->GetReportingEngine().SetDirty(dirtyPath1); @@ -1605,7 +1605,7 @@ void TestReadInteraction::TestSubscribeRoundtrip(nlTestSuite * apSuite, void * a NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 2); // Test report with 3 different path, all are not overlapped, one path is not interested for current subscription - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; err = engine->GetReportingEngine().SetDirty(dirtyPath1); @@ -1621,16 +1621,12 @@ void TestReadInteraction::TestSubscribeRoundtrip(nlTestSuite * apSuite, void * a NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 2); // Test empty report - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldSync, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldSync, false); + NL_TEST_ASSERT(apSuite, engine->GetReportingEngine().IsRunScheduled()); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; - // TODO: Fix - // https://github.com/project-chip/connectedhomeip/issues/23260 so this - // test is testing what it thinks it's testing. - // Make sure the reporting engine actually runs. - ctx.DrainAndServiceIO(); NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 0); @@ -1793,7 +1789,7 @@ void TestReadInteraction::TestSubscribeUrgentWildcardEvent(nlTestSuite * apSuite // There should be no reporting run scheduled. This is very important; // otherwise we can get a false-positive pass below because the run was // already scheduled by here. - NL_TEST_ASSERT(apSuite, !InteractionModelEngine::GetInstance()->GetReportingEngine().mRunScheduled); + NL_TEST_ASSERT(apSuite, !InteractionModelEngine::GetInstance()->GetReportingEngine().IsRunScheduled()); // Generate some events, which should get reported. GenerateEvents(apSuite, apContext); @@ -1881,7 +1877,7 @@ void TestReadInteraction::TestSubscribeWildcard(nlTestSuite * apSuite, void * ap // Set a concrete path dirty { - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; @@ -1902,7 +1898,7 @@ void TestReadInteraction::TestSubscribeWildcard(nlTestSuite * apSuite, void * ap // Set a endpoint dirty { - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; @@ -1987,7 +1983,7 @@ void TestReadInteraction::TestSubscribePartialOverlap(nlTestSuite * apSuite, voi // Set a partial overlapped path dirty { - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; @@ -2064,7 +2060,7 @@ void TestReadInteraction::TestSubscribeSetDirtyFullyOverlap(nlTestSuite * apSuit // Set a full overlapped path dirty and expect to receive one E2C3A1 { - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; @@ -2185,9 +2181,9 @@ void TestReadInteraction::TestSubscribeInvalidAttributePathRoundtrip(nlTestSuite NL_TEST_ASSERT(apSuite, engine->ActiveHandlerAt(0) != nullptr); delegate.mpReadHandler = engine->ActiveHandlerAt(0); - // TODO: This bit is not testing anything, because removing that flag - // manually like this will not cause the reporting engine to run! - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldSync, false); + NL_TEST_ASSERT(apSuite, engine->GetReportingEngine().IsRunScheduled()); ctx.DrainAndServiceIO(); @@ -2361,7 +2357,7 @@ void TestReadInteraction::TestPostSubscribeRoundtripStatusReportTimeout(nlTestSu dirtyPath2.mAttributeId = 2; // Test report with 2 different path - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; @@ -2375,8 +2371,8 @@ void TestReadInteraction::TestPostSubscribeRoundtripStatusReportTimeout(nlTestSu NL_TEST_ASSERT(apSuite, delegate.mGotReport); NL_TEST_ASSERT(apSuite, delegate.mNumAttributeResponse == 2); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldSync, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldSync, false); delegate.mGotReport = false; delegate.mNumAttributeResponse = 0; ctx.ExpireSessionBobToAlice(); @@ -2385,6 +2381,7 @@ void TestReadInteraction::TestPostSubscribeRoundtripStatusReportTimeout(nlTestSu NL_TEST_ASSERT(apSuite, err == CHIP_NO_ERROR); err = engine->GetReportingEngine().SetDirty(dirtyPath2); NL_TEST_ASSERT(apSuite, err == CHIP_NO_ERROR); + NL_TEST_ASSERT(apSuite, engine->GetReportingEngine().IsRunScheduled()); ctx.DrainAndServiceIO(); @@ -2722,8 +2719,8 @@ void TestReadInteraction::TestPostSubscribeRoundtripChunkStatusReportTimeout(nlT dirtyPath1.mEndpointId = Test::kMockEndpoint3; dirtyPath1.mAttributeId = Test::MockAttributeId(4); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldSync, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldSync, false); err = engine->GetReportingEngine().SetDirty(dirtyPath1); NL_TEST_ASSERT(apSuite, err == CHIP_NO_ERROR); delegate.mGotReport = false; @@ -2824,8 +2821,8 @@ void TestReadInteraction::TestPostSubscribeRoundtripChunkReportTimeout(nlTestSui dirtyPath1.mEndpointId = Test::kMockEndpoint3; dirtyPath1.mAttributeId = Test::MockAttributeId(4); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); - delegate.mpReadHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldSync, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); + delegate.mpReadHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldSync, false); err = engine->GetReportingEngine().SetDirty(dirtyPath1); NL_TEST_ASSERT(apSuite, err == CHIP_NO_ERROR); delegate.mGotReport = false; @@ -4201,7 +4198,7 @@ void TestReadInteraction::TestSubscriptionReportWithDefunctSession(nlTestSuite * NL_TEST_ASSERT(apSuite, SessionHandle(*readHandler->GetSession()) == ctx.GetSessionAliceToBob()); // Test that we send reports as needed. - readHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + readHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; engine->GetReportingEngine().SetDirty(subscribePath); @@ -4215,7 +4212,7 @@ void TestReadInteraction::TestSubscriptionReportWithDefunctSession(nlTestSuite * // Test that if the session is defunct we don't send reports and clean // up properly. readHandler->GetSession()->MarkAsDefunct(); - readHandler->mFlags.Set(ReadHandler::ReadHandlerFlags::HoldReport, false); + readHandler->SetStateFlag(ReadHandler::ReadHandlerFlags::HoldReport, false); delegate.mGotReport = false; engine->GetReportingEngine().SetDirty(subscribePath); diff --git a/src/app/tests/suites/certification/information.py b/src/app/tests/suites/certification/information.py index 1efda20315df66..bf5bbaa418bd30 100755 --- a/src/app/tests/suites/certification/information.py +++ b/src/app/tests/suites/certification/information.py @@ -55,7 +55,6 @@ def checkPythonVersion(): def parseTestPlans(filepath): tests_names = [] tests_statuses = [] - rv = [] for name, test_plan in parseYaml(filepath)['Test Plans'].items(): for section, tests in test_plan['tests'].items(): @@ -81,12 +80,12 @@ def parseTestPlan(filepath): for test_definition in parseYaml(filepath)['tests']: if 'disabled' in test_definition: - if is_pending_test == False: + if is_pending_test is False: return TestStatus.partial else: is_pending_test = False - if is_pending_test == True: + if is_pending_test is True: return TestStatus.pending return TestStatus.complete diff --git a/src/app/util/af-types.h b/src/app/util/af-types.h index ced52bb9241a06..9c4a7ca6a5f069 100644 --- a/src/app/util/af-types.h +++ b/src/app/util/af-types.h @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -123,7 +124,7 @@ typedef struct */ typedef struct { - uint16_t deviceId; + chip::DeviceTypeId deviceId; uint8_t deviceVersion; } EmberAfDeviceType; diff --git a/src/app/zap-templates/zcl/data-model/chip/time-format-localization-cluster.xml b/src/app/zap-templates/zcl/data-model/chip/time-format-localization-cluster.xml index 5692f9b8ff241d..323c486d9fbeaa 100644 --- a/src/app/zap-templates/zcl/data-model/chip/time-format-localization-cluster.xml +++ b/src/app/zap-templates/zcl/data-model/chip/time-format-localization-cluster.xml @@ -16,12 +16,12 @@ limitations under the License. --> - + - + @@ -48,8 +48,8 @@ limitations under the License. or audibly convey time information need a mechanism by which they can be configured to use a user’s preferred format. - HourFormat - ActiveCalendarType - SupportedCalendarTypes + HourFormat + ActiveCalendarType + SupportedCalendarTypes diff --git a/src/controller/data_model/controller-clusters.matter b/src/controller/data_model/controller-clusters.matter index e0b6536b5ca1d3..e25b2be2554fde 100644 --- a/src/controller/data_model/controller-clusters.matter +++ b/src/controller/data_model/controller-clusters.matter @@ -899,7 +899,7 @@ client cluster LocalizationConfiguration = 43 { } client cluster TimeFormatLocalization = 44 { - enum CalendarType : ENUM8 { + enum CalendarTypeEnum : ENUM8 { kBuddhist = 0; kChinese = 1; kCoptic = 2; @@ -914,14 +914,14 @@ client cluster TimeFormatLocalization = 44 { kTaiwanese = 11; } - enum HourFormat : ENUM8 { + enum HourFormatEnum : ENUM8 { k12hr = 0; k24hr = 1; } - attribute HourFormat hourFormat = 0; - attribute CalendarType activeCalendarType = 1; - readonly attribute CalendarType supportedCalendarTypes[] = 2; + attribute HourFormatEnum hourFormat = 0; + attribute CalendarTypeEnum activeCalendarType = 1; + readonly attribute CalendarTypeEnum supportedCalendarTypes[] = 2; readonly attribute command_id generatedCommandList[] = 65528; readonly attribute command_id acceptedCommandList[] = 65529; readonly attribute event_id eventList[] = 65530; diff --git a/src/controller/data_model/controller-clusters.zap b/src/controller/data_model/controller-clusters.zap index 214744b60efbe5..f2bccecc78e23e 100644 --- a/src/controller/data_model/controller-clusters.zap +++ b/src/controller/data_model/controller-clusters.zap @@ -3811,7 +3811,7 @@ "code": 0, "mfgCode": null, "side": "server", - "type": "HourFormat", + "type": "HourFormatEnum", "included": 1, "storageOption": "RAM", "singleton": 0, @@ -3827,7 +3827,7 @@ "code": 1, "mfgCode": null, "side": "server", - "type": "CalendarType", + "type": "CalendarTypeEnum", "included": 1, "storageOption": "RAM", "singleton": 0, diff --git a/src/controller/java/CHIPDeviceController-JNI.cpp b/src/controller/java/CHIPDeviceController-JNI.cpp index 7601f867dd4487..a19d023b8e2e17 100644 --- a/src/controller/java/CHIPDeviceController-JNI.cpp +++ b/src/controller/java/CHIPDeviceController-JNI.cpp @@ -83,7 +83,8 @@ static CHIP_ERROR ParseAttributePathList(jobject attributePathList, static CHIP_ERROR ParseAttributePath(jobject attributePath, EndpointId & outEndpointId, ClusterId & outClusterId, AttributeId & outAttributeId); static CHIP_ERROR ParseEventPathList(jobject eventPathList, std::vector & outEventPathParamsList); -static CHIP_ERROR ParseEventPath(jobject eventPath, EndpointId & outEndpointId, ClusterId & outClusterId, EventId & outEventId); +static CHIP_ERROR ParseEventPath(jobject eventPath, EndpointId & outEndpointId, ClusterId & outClusterId, EventId & outEventId, + bool & outIsUrgent); static CHIP_ERROR IsWildcardChipPathId(jobject chipPathId, bool & isWildcard); static CHIP_ERROR CreateDeviceAttestationDelegateBridge(JNIEnv * env, jlong handle, jobject deviceAttestationDelegate, jint failSafeExpiryTimeoutSecs, @@ -1436,20 +1437,23 @@ CHIP_ERROR ParseEventPathList(jobject eventPathList, std::vectorCallObjectMethod(eventPath, getEndpointIdMethod); VerifyOrReturnError(endpointIdObj != nullptr, CHIP_ERROR_INCORRECT_STATE); @@ -1464,6 +1469,7 @@ CHIP_ERROR ParseEventPath(jobject eventPath, EndpointId & outEndpointId, Cluster VerifyOrReturnError(clusterIdObj != nullptr, CHIP_ERROR_INCORRECT_STATE); jobject eventIdObj = env->CallObjectMethod(eventPath, getEventIdMethod); VerifyOrReturnError(eventIdObj != nullptr, CHIP_ERROR_INCORRECT_STATE); + jboolean isUrgent = env->CallBooleanMethod(eventPath, isUrgentMethod); uint32_t endpointId = 0; ReturnErrorOnFailure(GetChipPathIdValue(endpointIdObj, kInvalidEndpointId, endpointId)); @@ -1475,6 +1481,7 @@ CHIP_ERROR ParseEventPath(jobject eventPath, EndpointId & outEndpointId, Cluster outEndpointId = static_cast(endpointId); outClusterId = static_cast(clusterId); outEventId = static_cast(eventId); + outIsUrgent = (isUrgent == JNI_TRUE); return CHIP_NO_ERROR; } diff --git a/src/controller/java/src/chip/devicecontroller/model/ChipEventPath.java b/src/controller/java/src/chip/devicecontroller/model/ChipEventPath.java index 56d4a6c36297e4..b5d1a95315516a 100644 --- a/src/controller/java/src/chip/devicecontroller/model/ChipEventPath.java +++ b/src/controller/java/src/chip/devicecontroller/model/ChipEventPath.java @@ -23,11 +23,14 @@ /** An event path that should be used for requests. */ public class ChipEventPath { private ChipPathId endpointId, clusterId, eventId; + private boolean isUrgent; - private ChipEventPath(ChipPathId endpointId, ChipPathId clusterId, ChipPathId eventId) { + private ChipEventPath( + ChipPathId endpointId, ChipPathId clusterId, ChipPathId eventId, boolean isUrgent) { this.endpointId = endpointId; this.clusterId = clusterId; this.eventId = eventId; + this.isUrgent = isUrgent; } public ChipPathId getEndpointId() { @@ -42,36 +45,64 @@ public ChipPathId getEventId() { return eventId; } + public boolean isUrgent() { + return isUrgent; + } + @Override public boolean equals(Object object) { if (object instanceof ChipEventPath) { ChipEventPath that = (ChipEventPath) object; return Objects.equals(this.endpointId, that.endpointId) && Objects.equals(this.clusterId, that.clusterId) - && Objects.equals(this.eventId, that.eventId); + && Objects.equals(this.eventId, that.eventId) + && (this.isUrgent == that.isUrgent); } return false; } @Override public int hashCode() { - return Objects.hash(endpointId, clusterId, eventId); + return Objects.hash(endpointId, clusterId, eventId, isUrgent); } @Override public String toString() { return String.format( - Locale.ENGLISH, "Endpoint %s, cluster %s, event %s", endpointId, clusterId, eventId); + Locale.ENGLISH, + "Endpoint %s, cluster %s, event %s, isUrgent %s", + endpointId, + clusterId, + eventId, + isUrgent ? "true" : "false"); } public static ChipEventPath newInstance( ChipPathId endpointId, ChipPathId clusterId, ChipPathId eventId) { - return new ChipEventPath(endpointId, clusterId, eventId); + return new ChipEventPath(endpointId, clusterId, eventId, false); } /** Create a new {@link ChipEventPath} with only concrete ids. */ public static ChipEventPath newInstance(long endpointId, long clusterId, long eventId) { return new ChipEventPath( - ChipPathId.forId(endpointId), ChipPathId.forId(clusterId), ChipPathId.forId(eventId)); + ChipPathId.forId(endpointId), + ChipPathId.forId(clusterId), + ChipPathId.forId(eventId), + false); + } + + public static ChipEventPath newInstance( + ChipPathId endpointId, ChipPathId clusterId, ChipPathId eventId, boolean isUrgent) { + return new ChipEventPath(endpointId, clusterId, eventId, isUrgent); + } + + /** Create a new {@link ChipEventPath} with only concrete ids. */ + public static ChipEventPath newInstance( + long endpointId, long clusterId, long eventId, boolean isUrgent) { + return new ChipEventPath( + ChipPathId.forId(endpointId), + ChipPathId.forId(clusterId), + ChipPathId.forId(eventId), + isUrgent); } } diff --git a/src/controller/java/zap-generated/CHIPReadCallbacks.cpp b/src/controller/java/zap-generated/CHIPReadCallbacks.cpp index f32eb447345bcd..9190ebb0086b71 100644 --- a/src/controller/java/zap-generated/CHIPReadCallbacks.cpp +++ b/src/controller/java/zap-generated/CHIPReadCallbacks.cpp @@ -6750,7 +6750,7 @@ CHIPTimeFormatLocalizationSupportedCalendarTypesAttributeCallback:: } void CHIPTimeFormatLocalizationSupportedCalendarTypesAttributeCallback::CallbackFn( - void * context, const chip::app::DataModel::DecodableList & list) + void * context, const chip::app::DataModel::DecodableList & list) { chip::DeviceLayer::StackUnlock unlock; CHIP_ERROR err = CHIP_NO_ERROR; diff --git a/src/controller/java/zap-generated/CHIPReadCallbacks.h b/src/controller/java/zap-generated/CHIPReadCallbacks.h index 7d1df204dcd207..c61e125fe4d2f6 100644 --- a/src/controller/java/zap-generated/CHIPReadCallbacks.h +++ b/src/controller/java/zap-generated/CHIPReadCallbacks.h @@ -2842,7 +2842,7 @@ class CHIPTimeFormatLocalizationSupportedCalendarTypesAttributeCallback static void CallbackFn(void * context, - const chip::app::DataModel::DecodableList & list); + const chip::app::DataModel::DecodableList & list); static void OnSubscriptionEstablished(void * context) { CHIP_ERROR err = chip::JniReferences::GetInstance().CallSubscriptionEstablished( diff --git a/src/controller/python/BUILD.gn b/src/controller/python/BUILD.gn index da9b080e4fcd6d..750de2b837c170 100644 --- a/src/controller/python/BUILD.gn +++ b/src/controller/python/BUILD.gn @@ -68,6 +68,8 @@ shared_library("ChipDeviceCtrl") { "OpCredsBinding.cpp", "chip/clusters/attribute.cpp", "chip/clusters/command.cpp", + "chip/credentials/cert.cpp", + "chip/crypto/p256keypair.cpp", "chip/discovery/NodeResolution.cpp", "chip/interaction_model/Delegate.cpp", "chip/interaction_model/Delegate.h", @@ -213,7 +215,13 @@ chip_python_wheel_action("chip-core") { "chip/clusters/Command.py", "chip/clusters/__init__.py", "chip/clusters/enum.py", + "chip/commissioning/__init__.py", + "chip/commissioning/commissioning_flow_blocks.py", + "chip/commissioning/pase.py", "chip/configuration/__init__.py", + "chip/credentials/cert.py", + "chip/crypto/fabric.py", + "chip/crypto/p256keypair.py", "chip/discovery/__init__.py", "chip/discovery/library_handle.py", "chip/discovery/types.py", @@ -270,7 +278,10 @@ chip_python_wheel_action("chip-core") { "chip.ble", "chip.ble.commissioning", "chip.configuration", + "chip.commissioning", "chip.clusters", + "chip.credentials", + "chip.crypto", "chip.utils", "chip.discovery", "chip.exceptions", diff --git a/src/controller/python/ChipDeviceController-StorageDelegate.cpp b/src/controller/python/ChipDeviceController-StorageDelegate.cpp index 0c382e01c1a80d..9fb19efccdd2db 100644 --- a/src/controller/python/ChipDeviceController-StorageDelegate.cpp +++ b/src/controller/python/ChipDeviceController-StorageDelegate.cpp @@ -24,6 +24,7 @@ #include #include +#include #include namespace chip { @@ -39,7 +40,13 @@ CHIP_ERROR PythonPersistentStorageDelegate::SyncGetKeyValue(const char * key, vo return CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND; } - uint16_t neededSize = val->second.size(); + if (!CanCastTo(val->second.size())) + { + size = 0; + return CHIP_ERROR_BUFFER_TOO_SMALL; + } + + uint16_t neededSize = static_cast(val->second.size()); ReturnErrorCodeIf(size == 0 && neededSize == 0, CHIP_NO_ERROR); ReturnErrorCodeIf(value == nullptr, CHIP_ERROR_BUFFER_TOO_SMALL); diff --git a/src/controller/python/OpCredsBinding.cpp b/src/controller/python/OpCredsBinding.cpp index c7f3e1dc20497c..3c4da74f904683 100644 --- a/src/controller/python/OpCredsBinding.cpp +++ b/src/controller/python/OpCredsBinding.cpp @@ -23,6 +23,7 @@ #include "ChipDeviceController-ScriptDevicePairingDelegate.h" #include "ChipDeviceController-StorageDelegate.h" +#include "controller/python/chip/crypto/p256keypair.h" #include "controller/python/chip/interaction_model/Delegate.h" #include @@ -36,6 +37,7 @@ #include #include +#include #include #include #include @@ -57,6 +59,8 @@ const chip::Credentials::AttestationTrustStore * GetTestFileAttestationTrustStor return &attestationTrustStore; } + +chip::Python::PlaceholderOperationalCredentialsIssuer sPlaceholderOperationalCredentialsIssuer; } // namespace namespace chip { @@ -369,11 +373,74 @@ void pychip_OnCommissioningStatusUpdate(chip::PeerId peerId, chip::Controller::C return sTestCommissioner.OnCommissioningStatusUpdate(peerId, stageCompleted, err); } +/** + * Allocates a controller that does not use auto-commisioning. + * + * TODO(#25214): Need clean up API + * + */ +PyChipError pychip_OpCreds_AllocateControllerForPythonCommissioningFLow(chip::Controller::DeviceCommissioner ** outDevCtrl, + chip::python::pychip_P256Keypair * operationalKey, + uint8_t * noc, uint32_t nocLen, uint8_t * icac, + uint32_t icacLen, uint8_t * rcac, uint32_t rcacLen, + const uint8_t * ipk, uint32_t ipkLen, + chip::VendorId adminVendorId, bool enableServerInteractions) +{ + ReturnErrorCodeIf(nocLen > Controller::kMaxCHIPDERCertLength, ToPyChipError(CHIP_ERROR_NO_MEMORY)); + ReturnErrorCodeIf(icacLen > Controller::kMaxCHIPDERCertLength, ToPyChipError(CHIP_ERROR_NO_MEMORY)); + ReturnErrorCodeIf(rcacLen > Controller::kMaxCHIPDERCertLength, ToPyChipError(CHIP_ERROR_NO_MEMORY)); + + ChipLogDetail(Controller, "Creating New Device Controller"); + + auto devCtrl = std::make_unique(); + VerifyOrReturnError(devCtrl != nullptr, ToPyChipError(CHIP_ERROR_NO_MEMORY)); + + Controller::SetupParams initParams; + initParams.pairingDelegate = &sPairingDelegate; + initParams.operationalCredentialsDelegate = &sPlaceholderOperationalCredentialsIssuer; + initParams.operationalKeypair = operationalKey; + initParams.controllerRCAC = ByteSpan(rcac, rcacLen); + initParams.controllerICAC = ByteSpan(icac, icacLen); + initParams.controllerNOC = ByteSpan(noc, nocLen); + initParams.enableServerInteractions = enableServerInteractions; + initParams.controllerVendorId = adminVendorId; + initParams.permitMultiControllerFabrics = true; + initParams.hasExternallyOwnedOperationalKeypair = true; + + CHIP_ERROR err = Controller::DeviceControllerFactory::GetInstance().SetupCommissioner(initParams, *devCtrl); + VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + + // Setup IPK in Group Data Provider for controller after Commissioner init which sets-up the fabric table entry + uint8_t compressedFabricId[sizeof(uint64_t)] = { 0 }; + chip::MutableByteSpan compressedFabricIdSpan(compressedFabricId); + + err = devCtrl->GetCompressedFabricIdBytes(compressedFabricIdSpan); + VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + + ChipLogProgress(Support, "Setting up group data for Fabric Index %u with Compressed Fabric ID:", + static_cast(devCtrl->GetFabricIndex())); + ChipLogByteSpan(Support, compressedFabricIdSpan); + + chip::ByteSpan fabricIpk = + (ipk == nullptr) ? chip::GroupTesting::DefaultIpkValue::GetDefaultIpk() : chip::ByteSpan(ipk, ipkLen); + err = + chip::Credentials::SetSingleIpkEpochKey(&sGroupDataProvider, devCtrl->GetFabricIndex(), fabricIpk, compressedFabricIdSpan); + VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + + *outDevCtrl = devCtrl.release(); + + return ToPyChipError(CHIP_NO_ERROR); +} + +// TODO(#25214): Need clean up API PyChipError pychip_OpCreds_AllocateController(OpCredsContext * context, chip::Controller::DeviceCommissioner ** outDevCtrl, FabricId fabricId, chip::NodeId nodeId, chip::VendorId adminVendorId, const char * paaTrustStorePath, bool useTestCommissioner, - bool enableServerInteractions, CASEAuthTag * caseAuthTags, uint32_t caseAuthTagLen) + bool enableServerInteractions, CASEAuthTag * caseAuthTags, uint32_t caseAuthTagLen, + chip::python::pychip_P256Keypair * operationalKey) { + CHIP_ERROR err = CHIP_NO_ERROR; + ChipLogDetail(Controller, "Creating New Device Controller"); VerifyOrReturnError(context != nullptr, ToPyChipError(CHIP_ERROR_INVALID_ARGUMENT)); @@ -393,8 +460,18 @@ PyChipError pychip_OpCreds_AllocateController(OpCredsContext * context, chip::Co SetDeviceAttestationVerifier(GetDefaultDACVerifier(testingRootStore)); chip::Crypto::P256Keypair ephemeralKey; - CHIP_ERROR err = ephemeralKey.Initialize(chip::Crypto::ECPKeyTarget::ECDSA); - VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + chip::Crypto::P256Keypair * controllerKeyPair; + + if (operationalKey == nullptr) + { + err = ephemeralKey.Initialize(chip::Crypto::ECPKeyTarget::ECDSA); + VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + controllerKeyPair = &ephemeralKey; + } + else + { + controllerKeyPair = operationalKey; + } chip::Platform::ScopedMemoryBuffer noc; ReturnErrorCodeIf(!noc.Alloc(Controller::kMaxCHIPDERCertLength), ToPyChipError(CHIP_ERROR_NO_MEMORY)); @@ -419,19 +496,21 @@ PyChipError pychip_OpCreds_AllocateController(OpCredsContext * context, chip::Co memcpy(catValues.values.data(), caseAuthTags, caseAuthTagLen * sizeof(CASEAuthTag)); - err = context->mAdapter->GenerateNOCChain(nodeId, fabricId, catValues, ephemeralKey.Pubkey(), rcacSpan, icacSpan, nocSpan); + err = + context->mAdapter->GenerateNOCChain(nodeId, fabricId, catValues, controllerKeyPair->Pubkey(), rcacSpan, icacSpan, nocSpan); VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); Controller::SetupParams initParams; - initParams.pairingDelegate = &sPairingDelegate; - initParams.operationalCredentialsDelegate = context->mAdapter.get(); - initParams.operationalKeypair = &ephemeralKey; - initParams.controllerRCAC = rcacSpan; - initParams.controllerICAC = icacSpan; - initParams.controllerNOC = nocSpan; - initParams.enableServerInteractions = enableServerInteractions; - initParams.controllerVendorId = adminVendorId; - initParams.permitMultiControllerFabrics = true; + initParams.pairingDelegate = &sPairingDelegate; + initParams.operationalCredentialsDelegate = context->mAdapter.get(); + initParams.operationalKeypair = controllerKeyPair; + initParams.controllerRCAC = rcacSpan; + initParams.controllerICAC = icacSpan; + initParams.controllerNOC = nocSpan; + initParams.enableServerInteractions = enableServerInteractions; + initParams.controllerVendorId = adminVendorId; + initParams.permitMultiControllerFabrics = true; + initParams.hasExternallyOwnedOperationalKeypair = operationalKey != nullptr; if (useTestCommissioner) { @@ -505,6 +584,22 @@ PyChipError pychip_DeviceController_DeleteDeviceController(chip::Controller::Dev return ToPyChipError(CHIP_NO_ERROR); } +PyChipError pychip_DeviceController_SetIpk(chip::Controller::DeviceCommissioner * devCtrl, const uint8_t * ipk, size_t ipkLen) +{ + VerifyOrReturnError(ipk != nullptr, ToPyChipError(CHIP_ERROR_INVALID_ARGUMENT)); + + uint8_t compressedFabricId[sizeof(uint64_t)] = { 0 }; + chip::MutableByteSpan compressedFabricIdSpan(compressedFabricId); + + CHIP_ERROR err = devCtrl->GetCompressedFabricIdBytes(compressedFabricIdSpan); + VerifyOrReturnError(err == CHIP_NO_ERROR, ToPyChipError(err)); + + err = chip::Credentials::SetSingleIpkEpochKey(&sGroupDataProvider, devCtrl->GetFabricIndex(), ByteSpan(ipk, ipkLen), + compressedFabricIdSpan); + + return ToPyChipError(err); +} + bool pychip_TestCommissionerUsed() { return sTestCommissioner.GetTestCommissionerUsed(); diff --git a/src/controller/python/chip/ChipDeviceCtrl.py b/src/controller/python/chip/ChipDeviceCtrl.py index 14419e091ac006..3626dac0027e76 100644 --- a/src/controller/python/chip/ChipDeviceCtrl.py +++ b/src/controller/python/chip/ChipDeviceCtrl.py @@ -51,6 +51,7 @@ from .clusters import Command as ClusterCommand from .clusters import Objects as GeneratedObjects from .clusters.CHIPClusters import * +from .crypto import p256keypair from .exceptions import * from .interaction_model import InteractionModelError from .interaction_model import delegate as im @@ -203,12 +204,10 @@ def numTotalSessions(self) -> int: DiscoveryFilterType = discovery.FilterType -class ChipDeviceController(): +class ChipDeviceControllerBase(): activeList = set() - def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, - catTags: typing.List[int] = [], paaTrustStorePath: str = "", useTestCommissioner: bool = False, - fabricAdmin: FabricAdmin.FabricAdmin = None, name: str = None): + def __init__(self, name: str = ''): self.state = DCState.NOT_INITIALIZED self.devCtrl = None self._ChipStack = builtins.chipStack @@ -216,39 +215,15 @@ def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, self._InitLib() - self._dmLib.pychip_DeviceController_SetIssueNOCChainCallbackPythonCallback(_IssueNOCChainCallbackPythonCallback) - devCtrl = c_void_p(None) - c_catTags = (c_uint32 * len(catTags))() - - for i, item in enumerate(catTags): - c_catTags[i] = item - - self._dmLib.pychip_OpCreds_AllocateController.argtypes = [c_void_p, POINTER( - c_void_p), c_uint64, c_uint64, c_uint16, c_char_p, c_bool, c_bool, POINTER(c_uint32), c_uint32] - self._dmLib.pychip_OpCreds_AllocateController.restype = PyChipError - - # TODO(erjiaqing@): Figure out how to control enableServerInteractions for a single device controller (node) - self._ChipStack.Call( - lambda: self._dmLib.pychip_OpCreds_AllocateController(c_void_p( - opCredsContext), pointer(devCtrl), fabricId, nodeId, adminVendorId, c_char_p(None if len(paaTrustStorePath) == 0 else str.encode(paaTrustStorePath)), useTestCommissioner, self._ChipStack.enableServerInteractions, c_catTags, len(catTags)) - ).raise_on_error() - self.devCtrl = devCtrl - self._fabricAdmin = fabricAdmin - self._fabricId = fabricId - self._nodeId = nodeId - self._caIndex = fabricAdmin.caIndex - - if name is None: - self._name = "caIndex(%x)/fabricId(0x%016X)/nodeId(0x%016X)" % (fabricAdmin.caIndex, fabricId, nodeId) - else: - self._name = name + self.name = name self._Cluster = ChipClusters(builtins.chipStack) self._Cluster.InitLib(self._dmLib) + def _set_dev_ctrl(self, devCtrl): def HandleCommissioningComplete(nodeid, err): if err.is_success: print("Commissioning complete") @@ -292,6 +267,8 @@ def HandlePASEEstablishmentComplete(err: PyChipError): if not err.is_success: HandleCommissioningComplete(0, err) + self.devCtrl = devCtrl + self.cbHandlePASEEstablishmentCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct( HandlePASEEstablishmentComplete) self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback( @@ -312,9 +289,11 @@ def HandlePASEEstablishmentComplete(err: PyChipError): # Validate FabricID/NodeID followed from NOC Chain self._fabricId = self.GetFabricIdInternal() - assert self._fabricId == fabricId self._nodeId = self.GetNodeIdInternal() - assert self._nodeId == nodeId + + def _finish_init(self): + self.state = DCState.IDLE + self._isActive = True ChipDeviceController.activeList.add(self) @@ -330,10 +309,6 @@ def nodeId(self) -> int: def fabricId(self) -> int: return self._fabricId - @property - def caIndex(self) -> int: - return self._caIndex - @property def name(self) -> str: return self._name @@ -458,20 +433,6 @@ def EstablishPASESessionIP(self, ipaddr: str, setupPinCode: int, nodeid: int): self.devCtrl, ipaddr.encode("utf-8"), setupPinCode, nodeid) ) - def Commission(self, nodeid): - self.CheckIsActive() - self._ChipStack.commissioningCompleteEvent.clear() - self.state = DCState.COMMISSIONING - - self._ChipStack.CallAsync( - lambda: self._dmLib.pychip_DeviceController_Commission( - self.devCtrl, nodeid) - ) - if not self._ChipStack.commissioningCompleteEvent.isSet(): - # Error 50 is a timeout - return False - return self._ChipStack.commissioningEventRes == 0 - def GetTestCommissionerUsed(self): return self._ChipStack.Call( lambda: self._dmLib.pychip_TestCommissionerUsed() @@ -500,116 +461,11 @@ def CheckTestCommissionerCallbacks(self): def CheckTestCommissionerPaseConnection(self, nodeid): return self._dmLib.pychip_TestPaseConnection(nodeid) - def CommissionOnNetwork(self, nodeId: int, setupPinCode: int, filterType: DiscoveryFilterType = DiscoveryFilterType.NONE, filter: typing.Any = None): - ''' - Does the routine for OnNetworkCommissioning, with a filter for mDNS discovery. - Supported filters are: - - DiscoveryFilterType.NONE - DiscoveryFilterType.SHORT_DISCRIMINATOR - DiscoveryFilterType.LONG_DISCRIMINATOR - DiscoveryFilterType.VENDOR_ID - DiscoveryFilterType.DEVICE_TYPE - DiscoveryFilterType.COMMISSIONING_MODE - DiscoveryFilterType.INSTANCE_NAME - DiscoveryFilterType.COMMISSIONER - DiscoveryFilterType.COMPRESSED_FABRIC_ID - - The filter can be an integer, a string or None depending on the actual type of selected filter. - ''' - self.CheckIsActive() - - # IP connection will run through full commissioning, so we need to wait - # for the commissioning complete event, not just any callback. - self.state = DCState.COMMISSIONING - - # Convert numerical filters to string for passing down to binding. - if isinstance(filter, int): - filter = str(filter) - - self._ChipStack.commissioningCompleteEvent.clear() - - self._ChipStack.CallAsync( - lambda: self._dmLib.pychip_DeviceController_OnNetworkCommission( - self.devCtrl, nodeId, setupPinCode, int(filterType), str(filter).encode("utf-8") + b"\x00" if filter is not None else None) - ) - if not self._ChipStack.commissioningCompleteEvent.isSet(): - # Error 50 is a timeout - return False, -1 - return self._ChipStack.commissioningEventRes == 0, self._ChipStack.commissioningEventRes - - def CommissionWithCode(self, setupPayload: str, nodeid: int): - self.CheckIsActive() - - setupPayload = setupPayload.encode() + b'\0' - - # IP connection will run through full commissioning, so we need to wait - # for the commissioning complete event, not just any callback. - self.state = DCState.COMMISSIONING - - self._ChipStack.commissioningCompleteEvent.clear() - - self._ChipStack.CallAsync( - lambda: self._dmLib.pychip_DeviceController_ConnectWithCode( - self.devCtrl, setupPayload, nodeid) - ) - if not self._ChipStack.commissioningCompleteEvent.isSet(): - # Error 50 is a timeout - return False - return self._ChipStack.commissioningEventRes == 0 - - def CommissionIP(self, ipaddr: str, setupPinCode: int, nodeid: int): - """ DEPRECATED, DO NOT USE! Use `CommissionOnNetwork` or `CommissionWithCode` """ - self.CheckIsActive() - - # IP connection will run through full commissioning, so we need to wait - # for the commissioning complete event, not just any callback. - self.state = DCState.COMMISSIONING - - self._ChipStack.commissioningCompleteEvent.clear() - - self._ChipStack.CallAsync( - lambda: self._dmLib.pychip_DeviceController_ConnectIP( - self.devCtrl, ipaddr.encode("utf-8"), setupPinCode, nodeid) - ) - if not self._ChipStack.commissioningCompleteEvent.isSet(): - # Error 50 is a timeout - return False - return self._ChipStack.commissioningEventRes == 0 - def NOCChainCallback(self, nocChain): self._ChipStack.callbackRes = nocChain self._ChipStack.completeEvent.set() return - def CommissionThread(self, discriminator, setupPinCode, nodeId, threadOperationalDataset: bytes): - ''' Commissions a Thread device over BLE - ''' - self.SetThreadOperationalDataset(threadOperationalDataset) - return self.ConnectBLE(discriminator, setupPinCode, nodeId) - - def CommissionWiFi(self, discriminator, setupPinCode, nodeId, ssid: str, credentials: str): - ''' Commissions a WiFi device over BLE - ''' - self.SetWiFiCredentials(ssid, credentials) - return self.ConnectBLE(discriminator, setupPinCode, nodeId) - - def SetWiFiCredentials(self, ssid: str, credentials: str): - self.CheckIsActive() - - self._ChipStack.Call( - lambda: self._dmLib.pychip_DeviceController_SetWiFiCredentials( - ssid.encode("utf-8"), credentials.encode("utf-8")) - ).raise_on_error() - - def SetThreadOperationalDataset(self, threadOperationalDataset): - self.CheckIsActive() - - self._ChipStack.Call( - lambda: self._dmLib.pychip_DeviceController_SetThreadOperationalDataset( - threadOperationalDataset, len(threadOperationalDataset)) - ).raise_on_error() - def ResolveNode(self, nodeid): self.CheckIsActive() @@ -1319,15 +1175,10 @@ def SetBlockingCB(self, blockingCB): self._ChipStack.blockingCB = blockingCB - def IssueNOCChain(self, csr: Clusters.OperationalCredentials.Commands.CSRResponse, nodeId: int): - """Issue an NOC chain using the associated OperationalCredentialsDelegate. - The NOC chain will be provided in TLV cert format.""" - self.CheckIsActive() - - return self._ChipStack.CallAsync( - lambda: self._dmLib.pychip_DeviceController_IssueNOCChain( - self.devCtrl, py_object(self), csr.NOCSRElements, len(csr.NOCSRElements), nodeId) - ) + def SetIpk(self, ipk: bytes): + self._ChipStack.Call( + lambda: self._dmLib.pychip_DeviceController_SetIpk(self.devCtrl, ipk, len(ipk)) + ).raise_on_error() def InitGroupTestingData(self): """Populates the Device Controller's GroupDataProvider with known test group info and keys.""" @@ -1520,3 +1371,236 @@ def _InitLib(self): self._dmLib.pychip_DeviceController_GetLogFilter = [None] self._dmLib.pychip_DeviceController_GetLogFilter = c_uint8 + + self._dmLib.pychip_OpCreds_AllocateController.argtypes = [c_void_p, POINTER( + c_void_p), c_uint64, c_uint64, c_uint16, c_char_p, c_bool, c_bool, POINTER(c_uint32), c_uint32, c_void_p] + self._dmLib.pychip_OpCreds_AllocateController.restype = PyChipError + + self._dmLib.pychip_OpCreds_AllocateControllerForPythonCommissioningFLow.argtypes = [ + POINTER(c_void_p), c_void_p, POINTER(c_char), c_uint32, POINTER(c_char), c_uint32, POINTER(c_char), c_uint32, POINTER(c_char), c_uint32, c_uint16, c_bool] + self._dmLib.pychip_OpCreds_AllocateControllerForPythonCommissioningFLow.restype = PyChipError + + self._dmLib.pychip_DeviceController_SetIpk.argtypes = [c_void_p, POINTER(c_char), c_size_t] + self._dmLib.pychip_DeviceController_SetIpk.restype = PyChipError + + +class ChipDeviceController(ChipDeviceControllerBase): + ''' The ChipDeviceCommissioner binding, named as ChipDeviceController + + TODO: This class contains DEPRECATED functions, we should update the test scripts to avoid the usage of those functions. + ''' + + def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, catTags: typing.List[int] = [], paaTrustStorePath: str = "", useTestCommissioner: bool = False, fabricAdmin: FabricAdmin = None, name: str = None, keypair: p256keypair.P256Keypair = None): + super().__init__( + name or + f"caIndex({fabricAdmin.caIndex:x})/fabricId(0x{fabricId:016X})/nodeId(0x{nodeId:016X})" + ) + + self._dmLib.pychip_DeviceController_SetIssueNOCChainCallbackPythonCallback(_IssueNOCChainCallbackPythonCallback) + + devCtrl = c_void_p(None) + + c_catTags = (c_uint32 * len(catTags))() + + for i, item in enumerate(catTags): + c_catTags[i] = item + + # TODO(erjiaqing@): Figure out how to control enableServerInteractions for a single device controller (node) + self._externalKeyPair = keypair + self._ChipStack.Call( + lambda: self._dmLib.pychip_OpCreds_AllocateController(c_void_p( + opCredsContext), pointer(devCtrl), fabricId, nodeId, adminVendorId, c_char_p(None if len(paaTrustStorePath) == 0 else str.encode(paaTrustStorePath)), useTestCommissioner, self._ChipStack.enableServerInteractions, c_catTags, len(catTags), None if keypair is None else keypair.native_object) + ).raise_on_error() + + self._fabricAdmin = fabricAdmin + self._fabricId = fabricId + self._nodeId = nodeId + self._caIndex = fabricAdmin.caIndex + + self._set_dev_ctrl(devCtrl=devCtrl) + + self._finish_init() + + assert self._fabricId == fabricId + assert self._nodeId == nodeId + + @property + def caIndex(self) -> int: + return self._caIndex + + @property + def fabricAdmin(self) -> FabricAdmin: + return self._fabricAdmin + + def Commission(self, nodeid) -> bool: + ''' + Start the auto-commissioning process on a node after establishing a PASE connection. + This function is intended to be used in conjunction with `EstablishPASESessionBLE` or + `EstablishPASESessionIP`. It can be called either before or after the DevicePairingDelegate + receives the OnPairingComplete call. Commissioners that want to perform simple + auto-commissioning should use the supplied "PairDevice" functions above, which will + establish the PASE connection and commission automatically. + + Return: + bool: True if successful, False otherwise. + ''' + self.CheckIsActive() + self._ChipStack.commissioningCompleteEvent.clear() + self.state = DCState.COMMISSIONING + + self._ChipStack.CallAsync( + lambda: self._dmLib.pychip_DeviceController_Commission( + self.devCtrl, nodeid) + ) + return (self._ChipStack.commissioningCompleteEvent.isSet() and (self._ChipStack.commissioningEventRes == 0)) + + def CommissionThread(self, discriminator, setupPinCode, nodeId, threadOperationalDataset: bytes): + ''' Commissions a Thread device over BLE + ''' + self.SetThreadOperationalDataset(threadOperationalDataset) + return self.ConnectBLE(discriminator, setupPinCode, nodeId) + + def CommissionWiFi(self, discriminator, setupPinCode, nodeId, ssid: str, credentials: str): + ''' Commissions a WiFi device over BLE + ''' + self.SetWiFiCredentials(ssid, credentials) + return self.ConnectBLE(discriminator, setupPinCode, nodeId) + + def SetWiFiCredentials(self, ssid: str, credentials: str): + self.CheckIsActive() + + self._ChipStack.Call( + lambda: self._dmLib.pychip_DeviceController_SetWiFiCredentials( + ssid.encode("utf-8"), credentials.encode("utf-8")) + ).raise_on_error() + + def SetThreadOperationalDataset(self, threadOperationalDataset): + self.CheckIsActive() + + self._ChipStack.Call( + lambda: self._dmLib.pychip_DeviceController_SetThreadOperationalDataset( + threadOperationalDataset, len(threadOperationalDataset)) + ).raise_on_error() + + def CommissionOnNetwork(self, nodeId: int, setupPinCode: int, filterType: DiscoveryFilterType = DiscoveryFilterType.NONE, filter: typing.Any = None): + ''' + Does the routine for OnNetworkCommissioning, with a filter for mDNS discovery. + Supported filters are: + + DiscoveryFilterType.NONE + DiscoveryFilterType.SHORT_DISCRIMINATOR + DiscoveryFilterType.LONG_DISCRIMINATOR + DiscoveryFilterType.VENDOR_ID + DiscoveryFilterType.DEVICE_TYPE + DiscoveryFilterType.COMMISSIONING_MODE + DiscoveryFilterType.INSTANCE_NAME + DiscoveryFilterType.COMMISSIONER + DiscoveryFilterType.COMPRESSED_FABRIC_ID + + The filter can be an integer, a string or None depending on the actual type of selected filter. + ''' + self.CheckIsActive() + + # IP connection will run through full commissioning, so we need to wait + # for the commissioning complete event, not just any callback. + self.state = DCState.COMMISSIONING + + # Convert numerical filters to string for passing down to binding. + if isinstance(filter, int): + filter = str(filter) + + self._ChipStack.commissioningCompleteEvent.clear() + + self._ChipStack.CallAsync( + lambda: self._dmLib.pychip_DeviceController_OnNetworkCommission( + self.devCtrl, nodeId, setupPinCode, int(filterType), str(filter).encode("utf-8") + b"\x00" if filter is not None else None) + ) + if not self._ChipStack.commissioningCompleteEvent.isSet(): + return False, -1 + return self._ChipStack.commissioningEventRes == 0, self._ChipStack.commissioningEventRes + + def CommissionWithCode(self, setupPayload: str, nodeid: int): + self.CheckIsActive() + + setupPayload = setupPayload.encode() + b'\0' + + # IP connection will run through full commissioning, so we need to wait + # for the commissioning complete event, not just any callback. + self.state = DCState.COMMISSIONING + + self._ChipStack.commissioningCompleteEvent.clear() + + self._ChipStack.CallAsync( + lambda: self._dmLib.pychip_DeviceController_ConnectWithCode( + self.devCtrl, setupPayload, nodeid) + ) + if not self._ChipStack.commissioningCompleteEvent.isSet(): + return False + return self._ChipStack.commissioningEventRes == 0 + + def CommissionIP(self, ipaddr: str, setupPinCode: int, nodeid: int): + """ DEPRECATED, DO NOT USE! Use `CommissionOnNetwork` or `CommissionWithCode` """ + self.CheckIsActive() + + # IP connection will run through full commissioning, so we need to wait + # for the commissioning complete event, not just any callback. + self.state = DCState.COMMISSIONING + + self._ChipStack.commissioningCompleteEvent.clear() + + self._ChipStack.CallAsync( + lambda: self._dmLib.pychip_DeviceController_ConnectIP( + self.devCtrl, ipaddr.encode("utf-8"), setupPinCode, nodeid) + ) + if not self._ChipStack.commissioningCompleteEvent.isSet(): + return False + return self._ChipStack.commissioningEventRes == 0 + + def IssueNOCChain(self, csr: Clusters.OperationalCredentials.Commands.CSRResponse, nodeId: int): + """Issue an NOC chain using the associated OperationalCredentialsDelegate. + The NOC chain will be provided in TLV cert format.""" + self.CheckIsActive() + + return self._ChipStack.CallAsync( + lambda: self._dmLib.pychip_DeviceController_IssueNOCChain( + self.devCtrl, py_object(self), csr.NOCSRElements, len(csr.NOCSRElements), nodeId) + ) + + +class BareChipDeviceController(ChipDeviceControllerBase): + ''' A bare device controller without AutoCommissioner support. + ''' + + def __init__(self, operationalKey: p256keypair.P256Keypair, noc: bytes, icac: typing.Union[bytes, None], rcac: bytes, ipk: typing.Union[bytes, None], adminVendorId: int, name: str = None): + '''Creates a controller without autocommissioner. + + The allocated controller uses the noc, icac, rcac and ipk instead of the default, + random generated certificates / keys. Which is suitable for creating a controller + for manually signing certificates for testing. + + Args: + operationalKey: A P256Keypair object for the operational key of the controller. + noc: The NOC for the controller, in bytes. + icac: The optional ICAC for the controller. + rcac: The RCAC for the controller. + ipk: The optional IPK for the controller, when None is provided, the defaultIpk + will be used. + adminVendorId: The adminVendorId of the controller. + name: The name of the controller, for debugging use only. + ''' + super().__init__(name or f"ctrl(v/{adminVendorId})") + + devCtrl = c_void_p(None) + + # Device should hold a reference to the key to avoid it being GC-ed. + self._externalKeyPair = operationalKey + nativeKey = operationalKey.create_native_object() + + self._ChipStack.Call( + lambda: self._dmLib.pychip_OpCreds_AllocateControllerForPythonCommissioningFLow( + c_void_p(devCtrl), nativeKey, noc, len(noc), icac, len(icac) if icac else 0, rcac, len(rcac), ipk, len(ipk) if ipk else 0, adminVendorId, self._ChipStack.enableServerInteractions) + ).raise_on_error() + + self._set_dev_ctrl(devCtrl) + + self._finish_init() diff --git a/src/controller/python/chip/FabricAdmin.py b/src/controller/python/chip/FabricAdmin.py index fbd4bc2165d11f..873344d540a4ae 100644 --- a/src/controller/python/chip/FabricAdmin.py +++ b/src/controller/python/chip/FabricAdmin.py @@ -23,6 +23,7 @@ from typing import * from chip import CertificateAuthority, ChipDeviceCtrl +from chip.crypto import p256keypair from chip.native import GetLibraryHandle @@ -65,7 +66,7 @@ def __init__(self, certificateAuthority: CertificateAuthority.CertificateAuthori self._isActive = True self._activeControllers = [] - def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTestCommissioner: bool = False, catTags: List[int] = []): + def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTestCommissioner: bool = False, catTags: List[int] = [], keypair: p256keypair.P256Keypair = None): ''' Create a new chip.ChipDeviceCtrl.ChipDeviceController instance on this fabric. When vending ChipDeviceController instances on a given fabric, each controller instance @@ -104,7 +105,8 @@ def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTest paaTrustStorePath=paaTrustStorePath, useTestCommissioner=useTestCommissioner, fabricAdmin=self, - catTags=catTags) + catTags=catTags, + keypair=keypair) self._activeControllers.append(controller) return controller diff --git a/src/controller/python/chip/ble/LinuxImpl.cpp b/src/controller/python/chip/ble/LinuxImpl.cpp index c084f957d44d1c..2aa869ed1a530f 100644 --- a/src/controller/python/chip/ble/LinuxImpl.cpp +++ b/src/controller/python/chip/ble/LinuxImpl.cpp @@ -68,9 +68,12 @@ class ScannerDelegateImpl : public ChipDeviceScannerDelegate using DeviceScannedCallback = void (*)(PyObject * context, const char * address, uint16_t discriminator, uint16_t vendorId, uint16_t productId); using ScanCompleteCallback = void (*)(PyObject * context); + using ScanErrorCallback = void (*)(PyObject * context, CHIP_ERROR::StorageType error); - ScannerDelegateImpl(PyObject * context, DeviceScannedCallback scanCallback, ScanCompleteCallback completeCallback) : - mContext(context), mScanCallback(scanCallback), mCompleteCallback(completeCallback) + ScannerDelegateImpl(PyObject * context, DeviceScannedCallback scanCallback, ScanCompleteCallback completeCallback, + ScanErrorCallback errorCallback) : + mContext(context), + mScanCallback(scanCallback), mCompleteCallback(completeCallback), mErrorCallback(errorCallback) {} void SetScanner(std::unique_ptr scanner) { mScanner = std::move(scanner); } @@ -94,20 +97,31 @@ class ScannerDelegateImpl : public ChipDeviceScannerDelegate delete this; } + virtual void OnScanError(CHIP_ERROR error) override + { + if (mErrorCallback) + { + mErrorCallback(mContext, error.AsInteger()); + } + } + private: std::unique_ptr mScanner; PyObject * const mContext; const DeviceScannedCallback mScanCallback; const ScanCompleteCallback mCompleteCallback; + const ScanErrorCallback mErrorCallback; }; } // namespace extern "C" void * pychip_ble_start_scanning(PyObject * context, void * adapter, uint32_t timeoutMs, ScannerDelegateImpl::DeviceScannedCallback scanCallback, - ScannerDelegateImpl::ScanCompleteCallback completeCallback) + ScannerDelegateImpl::ScanCompleteCallback completeCallback, + ScannerDelegateImpl::ScanErrorCallback errorCallback) { - std::unique_ptr delegate = std::make_unique(context, scanCallback, completeCallback); + std::unique_ptr delegate = + std::make_unique(context, scanCallback, completeCallback, errorCallback); std::unique_ptr scanner = ChipDeviceScanner::Create(static_cast(adapter), delegate.get()); diff --git a/src/controller/python/chip/ble/darwin/Scanning.mm b/src/controller/python/chip/ble/darwin/Scanning.mm index cfe653ca9481ab..c26395281e5529 100644 --- a/src/controller/python/chip/ble/darwin/Scanning.mm +++ b/src/controller/python/chip/ble/darwin/Scanning.mm @@ -11,6 +11,7 @@ using DeviceScannedCallback = void (*)(PyObject * context, const char * address, uint16_t discriminator, uint16_t vendorId, uint16_t productId); using ScanCompleteCallback = void (*)(PyObject * context); +using ScanErrorCallback = void (*)(PyObject * context, uint32_t error); } @interface ChipDeviceBleScanner : NSObject @@ -23,10 +24,12 @@ @interface ChipDeviceBleScanner : NSObject @property (assign, nonatomic) PyObject * context; @property (assign, nonatomic) DeviceScannedCallback scanCallback; @property (assign, nonatomic) ScanCompleteCallback completeCallback; +@property (assign, nonatomic) ScanErrorCallback errorCallback; - (id)initWithContext:(PyObject *)context scanCallback:(DeviceScannedCallback)scanCallback completeCallback:(ScanCompleteCallback)completeCallback + errorCallback:(ScanErrorCallback)errorCallback timeoutMs:(uint32_t)timeout; - (void)stopTimeoutReached; @@ -38,6 +41,7 @@ @implementation ChipDeviceBleScanner - (id)initWithContext:(PyObject *)context scanCallback:(DeviceScannedCallback)scanCallback completeCallback:(ScanCompleteCallback)completeCallback + errorCallback:(ScanErrorCallback)errorCallback timeoutMs:(uint32_t)timeout { self = [super init]; @@ -50,6 +54,7 @@ - (id)initWithContext:(PyObject *)context _context = context; _scanCallback = scanCallback; _completeCallback = completeCallback; + _errorCallback = errorCallback; dispatch_source_set_event_handler(_timer, ^{ [self stopTimeoutReached]; @@ -100,6 +105,7 @@ - (void)stopTimeoutReached ChipLogProgress(Ble, "Scan timeout reached."); _completeCallback(_context); + _errorCallback(_context, CHIP_ERROR_TIMEOUT.AsInteger()); dispatch_source_cancel(_timer); [self.centralManager stopScan]; @@ -125,14 +131,15 @@ - (void)centralManager:(CBCentralManager *)central didConnectPeripheral:(CBPerip @end -extern "C" void * pychip_ble_start_scanning( - PyObject * context, void * adapter, uint32_t timeout, DeviceScannedCallback scanCallback, ScanCompleteCallback completeCallback) +extern "C" void * pychip_ble_start_scanning(PyObject * context, void * adapter, uint32_t timeout, + DeviceScannedCallback scanCallback, ScanCompleteCallback completeCallback, ScanErrorCallback errorCallback) { // NOTE: adapter is ignored as it does not apply to mac ChipDeviceBleScanner * scanner = [[ChipDeviceBleScanner alloc] initWithContext:context scanCallback:scanCallback completeCallback:completeCallback + errorCallback:errorCallback timeoutMs:timeout]; return (__bridge_retained void *) (scanner); diff --git a/src/controller/python/chip/clusters/Attribute.py b/src/controller/python/chip/clusters/Attribute.py index 240579071def38..5cad75c444a871 100644 --- a/src/controller/python/chip/clusters/Attribute.py +++ b/src/controller/python/chip/clusters/Attribute.py @@ -977,8 +977,8 @@ def WriteGroupAttributes(groupId: int, devCtrl: c_void_p, attributes: List[Attri # This struct matches the PyReadAttributeParams in attribute.cpp, for passing various params together. _ReadParams = construct.Struct( - "MinInterval" / construct.Int32ul, - "MaxInterval" / construct.Int32ul, + "MinInterval" / construct.Int16ul, + "MaxInterval" / construct.Int16ul, "IsSubscription" / construct.Flag, "IsFabricFiltered" / construct.Flag, "KeepSubscriptions" / construct.Flag, diff --git a/src/controller/python/chip/clusters/Objects.py b/src/controller/python/chip/clusters/Objects.py index 4ae7e97ecb9fc7..61079f4c51d42f 100644 --- a/src/controller/python/chip/clusters/Objects.py +++ b/src/controller/python/chip/clusters/Objects.py @@ -5355,9 +5355,9 @@ class TimeFormatLocalization(Cluster): def descriptor(cls) -> ClusterObjectDescriptor: return ClusterObjectDescriptor( Fields=[ - ClusterObjectFieldDescriptor(Label="hourFormat", Tag=0x00000000, Type=TimeFormatLocalization.Enums.HourFormat), - ClusterObjectFieldDescriptor(Label="activeCalendarType", Tag=0x00000001, Type=typing.Optional[TimeFormatLocalization.Enums.CalendarType]), - ClusterObjectFieldDescriptor(Label="supportedCalendarTypes", Tag=0x00000002, Type=typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarType]]), + ClusterObjectFieldDescriptor(Label="hourFormat", Tag=0x00000000, Type=TimeFormatLocalization.Enums.HourFormatEnum), + ClusterObjectFieldDescriptor(Label="activeCalendarType", Tag=0x00000001, Type=typing.Optional[TimeFormatLocalization.Enums.CalendarTypeEnum]), + ClusterObjectFieldDescriptor(Label="supportedCalendarTypes", Tag=0x00000002, Type=typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarTypeEnum]]), ClusterObjectFieldDescriptor(Label="generatedCommandList", Tag=0x0000FFF8, Type=typing.List[uint]), ClusterObjectFieldDescriptor(Label="acceptedCommandList", Tag=0x0000FFF9, Type=typing.List[uint]), ClusterObjectFieldDescriptor(Label="eventList", Tag=0x0000FFFA, Type=typing.List[uint]), @@ -5366,9 +5366,9 @@ def descriptor(cls) -> ClusterObjectDescriptor: ClusterObjectFieldDescriptor(Label="clusterRevision", Tag=0x0000FFFD, Type=uint), ]) - hourFormat: 'TimeFormatLocalization.Enums.HourFormat' = None - activeCalendarType: 'typing.Optional[TimeFormatLocalization.Enums.CalendarType]' = None - supportedCalendarTypes: 'typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarType]]' = None + hourFormat: 'TimeFormatLocalization.Enums.HourFormatEnum' = None + activeCalendarType: 'typing.Optional[TimeFormatLocalization.Enums.CalendarTypeEnum]' = None + supportedCalendarTypes: 'typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarTypeEnum]]' = None generatedCommandList: 'typing.List[uint]' = None acceptedCommandList: 'typing.List[uint]' = None eventList: 'typing.List[uint]' = None @@ -5377,7 +5377,7 @@ def descriptor(cls) -> ClusterObjectDescriptor: clusterRevision: 'uint' = None class Enums: - class CalendarType(MatterIntEnum): + class CalendarTypeEnum(MatterIntEnum): kBuddhist = 0x00 kChinese = 0x01 kCoptic = 0x02 @@ -5396,7 +5396,7 @@ class CalendarType(MatterIntEnum): # enum value. This specific should never be transmitted. kUnknownEnumValue = 12, - class HourFormat(MatterIntEnum): + class HourFormatEnum(MatterIntEnum): k12hr = 0x00 k24hr = 0x01 # All received enum values that are not listed above will be mapped @@ -5418,9 +5418,9 @@ def attribute_id(cls) -> int: @ChipUtility.classproperty def attribute_type(cls) -> ClusterObjectFieldDescriptor: - return ClusterObjectFieldDescriptor(Type=TimeFormatLocalization.Enums.HourFormat) + return ClusterObjectFieldDescriptor(Type=TimeFormatLocalization.Enums.HourFormatEnum) - value: 'TimeFormatLocalization.Enums.HourFormat' = 0 + value: 'TimeFormatLocalization.Enums.HourFormatEnum' = 0 @dataclass class ActiveCalendarType(ClusterAttributeDescriptor): @@ -5434,9 +5434,9 @@ def attribute_id(cls) -> int: @ChipUtility.classproperty def attribute_type(cls) -> ClusterObjectFieldDescriptor: - return ClusterObjectFieldDescriptor(Type=typing.Optional[TimeFormatLocalization.Enums.CalendarType]) + return ClusterObjectFieldDescriptor(Type=typing.Optional[TimeFormatLocalization.Enums.CalendarTypeEnum]) - value: 'typing.Optional[TimeFormatLocalization.Enums.CalendarType]' = None + value: 'typing.Optional[TimeFormatLocalization.Enums.CalendarTypeEnum]' = None @dataclass class SupportedCalendarTypes(ClusterAttributeDescriptor): @@ -5450,9 +5450,9 @@ def attribute_id(cls) -> int: @ChipUtility.classproperty def attribute_type(cls) -> ClusterObjectFieldDescriptor: - return ClusterObjectFieldDescriptor(Type=typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarType]]) + return ClusterObjectFieldDescriptor(Type=typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarTypeEnum]]) - value: 'typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarType]]' = None + value: 'typing.Optional[typing.List[TimeFormatLocalization.Enums.CalendarTypeEnum]]' = None @dataclass class GeneratedCommandList(ClusterAttributeDescriptor): diff --git a/src/controller/python/chip/clusters/attribute.cpp b/src/controller/python/chip/clusters/attribute.cpp index 1ed6d7e1cb82c6..5e7ffcdc679008 100644 --- a/src/controller/python/chip/clusters/attribute.cpp +++ b/src/controller/python/chip/clusters/attribute.cpp @@ -238,8 +238,8 @@ extern "C" { struct __attribute__((packed)) PyReadAttributeParams { - uint32_t minInterval; // MinInterval in subscription request - uint32_t maxInterval; // MaxInterval in subscription request + uint16_t minInterval; // MinInterval in subscription request + uint16_t maxInterval; // MaxInterval in subscription request bool isSubscription; bool isFabricFiltered; bool keepSubscriptions; diff --git a/src/controller/python/chip/commissioning/PlaceholderOperationalCredentialsIssuer.h b/src/controller/python/chip/commissioning/PlaceholderOperationalCredentialsIssuer.h new file mode 100644 index 00000000000000..11bfcd69a5d227 --- /dev/null +++ b/src/controller/python/chip/commissioning/PlaceholderOperationalCredentialsIssuer.h @@ -0,0 +1,62 @@ +/* + * + * Copyright (c) 2021-2022 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * This file contains class definition of an example operational certificate + * issuer for CHIP devices. The class can be used as a guideline on how to + * construct your own certificate issuer. It can also be used in tests and tools + * if a specific signing authority is not required. + * + * NOTE: This class stores the encryption key in clear storage. This is not suited + * for production use. This should only be used in test tools. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace chip { +namespace Python { + +class DLL_EXPORT PlaceholderOperationalCredentialsIssuer : public Controller::OperationalCredentialsDelegate +{ +public: + PlaceholderOperationalCredentialsIssuer() {} + ~PlaceholderOperationalCredentialsIssuer() override {} + + CHIP_ERROR GenerateNOCChain(const ByteSpan & csrElements, const ByteSpan & csrNonce, const ByteSpan & attestationSignature, + const ByteSpan & attestationChallenge, const ByteSpan & DAC, const ByteSpan & PAI, + Callback::Callback * onCompletion) override + { + return CHIP_ERROR_NOT_IMPLEMENTED; + } + + void SetNodeIdForNextNOCRequest(NodeId nodeId) override {} + + void SetFabricIdForNextNOCRequest(FabricId fabricId) override {} +}; + +} // namespace Python +} // namespace chip diff --git a/src/controller/python/chip/commissioning/__init__.py b/src/controller/python/chip/commissioning/__init__.py new file mode 100644 index 00000000000000..2bf37ba74e2edf --- /dev/null +++ b/src/controller/python/chip/commissioning/__init__.py @@ -0,0 +1,140 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import dataclasses +import enum +import os +from typing import Set, Tuple, Union + +ROOT_ENDPOINT_ID = 0 + + +@dataclasses.dataclass +class CommissioneeInfo: + endpoints: Set[int] + is_thread_device: bool = False + is_wifi_device: bool = False + is_ethernet_device: bool = False + + +class RegulatoryLocationType(enum.IntEnum): + INDOOR = 0 + OUTDOOR = 1 + INDOOR_OUTDOOR = 2 + + +@dataclasses.dataclass +class RegulatoryConfig: + location_type: RegulatoryLocationType + country_code: str + + +@dataclasses.dataclass +class PaseParameters: + setup_pin: int + temporary_nodeid: int + + +@dataclasses.dataclass +class PaseOverBLEParameters(PaseParameters): + discriminator: int + + def __str__(self): + return f"BLE:0x{self.discriminator:03x}" + + +@dataclasses.dataclass +class PaseOverIPParameters(PaseParameters): + long_discriminator: int + + def __str__(self): + return f"Discriminator:0x{self.long_discriminator:03x}" + + +@dataclasses.dataclass +class WiFiCredentials: + ssid: bytes + passphrase: bytes + + +@dataclasses.dataclass +class Parameters: + pase_param: Union[PaseOverBLEParameters, PaseOverIPParameters] + regulatory_config: RegulatoryConfig + fabric_label: str + commissionee_info: CommissioneeInfo + wifi_credentials: WiFiCredentials + thread_credentials: bytes + failsafe_expiry_length_seconds: int = 600 + + +class NetworkCommissioningFeatureMap(enum.IntEnum): + WIFI_NETWORK_FEATURE_MAP = 1 + THREAD_NETWORK_FEATURE_MAP = 2 + + +class CommissionFailure(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return f"CommissionFailure({self.msg})" + + +@dataclasses.dataclass +class GetCommissioneeCredentialsRequest: + dac: bytes + pai: bytes + attestation_signature: bytes + attestation_nonce: bytes + attestation_elements: bytes + csr_signature: bytes + csr_nonce: bytes + csr_elements: bytes + vendor_id: int + product_id: int + + +@dataclasses.dataclass +class GetCommissioneeCredentialsResponse: + rcac: bytes + noc: bytes + icac: bytes + ipk: bytes + case_admin_node: int + admin_vendor_id: int + node_id: int = None + fabric_id: int = None + + +class CredentialProvider: + async def get_commissionee_nonces(self) -> Tuple[bytes, bytes]: + ''' Returns the `attestation_nonce` and `csr_nonce` for the commissionee. + ''' + return os.urandom(32), os.urandom(32) + + @abc.abstractmethod + async def get_commissionee_credentials(self, request: GetCommissioneeCredentialsRequest) -> GetCommissioneeCredentialsResponse: + ''' Returns certifications and infomations for the commissioning. + ''' + raise NotImplementedError() + + +class ExampleCredentialProvider: + async def get_commissionee_credentials(self, request: GetCommissioneeCredentialsRequest) -> GetCommissioneeCredentialsResponse: + pass diff --git a/src/controller/python/chip/commissioning/commissioning_flow_blocks.py b/src/controller/python/chip/commissioning/commissioning_flow_blocks.py new file mode 100644 index 00000000000000..986064af8b6e9e --- /dev/null +++ b/src/controller/python/chip/commissioning/commissioning_flow_blocks.py @@ -0,0 +1,246 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import base64 +import logging + +import chip.credentials.cert +import chip.crypto.fabric +from chip import ChipDeviceCtrl +from chip import clusters as Clusters +from chip import commissioning +from cryptography import x509 +from cryptography.hazmat.primitives import serialization + + +class CommissioningFlowBlocks: + def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceControllerBase, credential_provider: commissioning.CredentialProvider, logger: logging.Logger): + self._devCtrl = devCtrl + self._logger = logger + self._credential_provider = credential_provider + + async def arm_failsafe(self, node_id: int, duration_seconds: int = 180): + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.GeneralCommissioning.Commands.ArmFailSafe( + expiryLengthSeconds=duration_seconds + )) + if response.errorCode != 0: + raise commissioning.CommissionFailure(repr(response)) + + async def operational_credentials_commissioning(self, parameter: commissioning.Parameters, node_id: int): + self._logger.info("Getting Remote Device Info") + device_info = (await self._devCtrl.ReadAttribute(node_id, [ + (commissioning.ROOT_ENDPOINT_ID, Clusters.BasicInformation.Attributes.VendorID), + (commissioning.ROOT_ENDPOINT_ID, Clusters.BasicInformation.Attributes.ProductID)], returnClusterObject=True))[commissioning.ROOT_ENDPOINT_ID][Clusters.BasicInformation] + + self._logger.info("Getting AttestationNonce") + attestation_nonce = await self._credential_provider.get_attestation_nonce() + + self._logger.info("Getting CSR Nonce") + csr_nonce = await self._credential_provider.get_csr_nonce() + + self._logger.info("Sending AttestationRequest") + try: + attestation_elements = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.AttestationRequest( + attestationNonce=attestation_nonce + )) + except Exception as ex: + raise commissioning.CommissionFailure(f"Failed to get AttestationElements: {ex}") + + self._logger.info("Getting CertificateChain - DAC") + # Failures are exceptions + try: + dac = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.CertificateChainRequest( + certificateType=1 + )) + except Exception as ex: + raise commissioning.CommissionFailure(f"Failed to get DAC: {ex}") + + self._logger.info("Getting CertificateChain - PAI") + try: + pai = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.CertificateChainRequest( + certificateType=2 + )) + except Exception as ex: + raise commissioning.CommissionFailure(f"Failed to get PAI: {ex}") + + self._logger.info("Getting OpCSRRequest") + try: + csr = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.CSRRequest( + CSRNonce=csr_nonce + )) + except Exception as ex: + raise commissioning.CommissionFailure(f"Failed to get OpCSRRequest: {ex}") + + self._logger.info("Getting device certificate") + commissionee_credentials = await self._credential_provider.get_commissionee_credentials( + commissioning.GetCommissioneeCredentialsRequest( + dac=dac, pai=pai, + attestation_nonce=attestation_nonce, + attestation_elements=attestation_elements.attestationElements, + attestation_signature=attestation_elements.attestationSignature, + csr_nonce=csr_nonce, + csr_elements=csr.NOCSRElements, + csr_signature=csr.attestationSignature, + vendor_id=device_info.vendorID, + product_id=device_info.productID)) + + self._logger.info("Adding Trusted Root Certificate") + try: + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.AddTrustedRootCertificate( + rootCACertificate=commissionee_credentials.rcac + )) + except Exception as ex: + raise commissioning.CommissionFailure(f"Failed to add Root Certificate: {ex}") + + try: + x509_rcac = x509.load_pem_x509_certificate( + b'''-----BEGIN CERTIFICATE-----\n''' + + base64.b64encode(chip.credentials.cert.convert_chip_cert_to_x509_cert(commissionee_credentials.rcac)) + + b'''\n-----END CERTIFICATE-----''') + root_public_key = x509_rcac.public_key().public_bytes(serialization.Encoding.X962, + serialization.PublicFormat.UncompressedPoint) + + x509_noc = x509.load_pem_x509_certificate( + b'''-----BEGIN CERTIFICATE-----\n''' + + base64.b64encode(chip.credentials.cert.convert_chip_cert_to_x509_cert(commissionee_credentials.noc)) + + b'''\n-----END CERTIFICATE-----''') + + for subject in x509_noc.subject: + if subject.oid.dotted_string == '1.3.6.1.4.1.37244.1.1': + cert_fabric_id = int(subject.value, 16) + elif subject.oid.dotted_string == '1.3.6.1.4.1.37244.1.5': + cert_node_id = int(subject.value, 16) + + if cert_fabric_id != commissionee_credentials.fabric_id: + self._logger.warning("Fabric ID in certificate does not match the fabric id in commissionee credentials struct.") + if cert_node_id != commissionee_credentials.node_id: + self._logger.warning("Node ID in certificate does not match the node id in commissionee credentials struct.") + + compressed_fabric_id = chip.crypto.fabric.generate_compressed_fabric_id(root_public_key, cert_fabric_id) + + except Exception: + self._logger.exception("The certificate should be a valid CHIP Certificate, but failed to parse it") + raise + + self._logger.info( + f"Commissioning FabricID: {cert_fabric_id:016X} " + f"Compressed FabricID: {compressed_fabric_id:016X} " + f"Node ID: {cert_node_id:016X}") + + self._logger.info("Adding Operational Certificate") + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.AddNOC( + NOCValue=commissionee_credentials.noc, + ICACValue=commissionee_credentials.icac, + IPKValue=commissionee_credentials.ipk, + caseAdminSubject=commissionee_credentials.case_admin_node, + adminVendorId=commissionee_credentials.admin_vendor_id + )) + if response.statusCode != 0: + raise commissioning.CommissionFailure(repr(response)) + + self._logger.info("Update controller IPK") + self._devCtrl.SetIpk(commissionee_credentials.ipk) + + self._logger.info("Setting fabric label") + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.OperationalCredentials.Commands.UpdateFabricLabel( + label=parameter.fabric_label + )) + if response.statusCode != 0: + raise commissioning.CommissionFailure(repr(response)) + + return commissionee_credentials.node_id + + async def network_commissioning_thread(self, parameter: commissioning.Parameters, node_id: int): + if not parameter.thread_credentials: + raise TypeError("The device requires a Thread network dataset") + + self._logger.info("Adding Thread network") + response = await self._devCtrl.SendCommand(nodeid=node_id, endpoint=commissioning.ROOT_ENDPOINT_ID, payload=Clusters.NetworkCommissioning.Commands.AddOrUpdateThreadNetwork( + operationalDataset=parameter.thread_credentials)) + if response.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: + raise commissioning.CommissionFailure(f"Unexpected result for adding network: {response.networkingStatus}") + + network_list = (await self._devCtrl.ReadAttribute(nodeid=node_id, attributes=[(commissioning.ROOT_ENDPOINT_ID, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True))[commissioning.ROOT_ENDPOINT_ID][Clusters.NetworkCommissioning].networks + network_id = network_list[response.networkIndex].networkID + + self._logger.info("Enabling Thread network") + response = await self._devCtrl.SendCommand(nodeid=node_id, endpoint=commissioning.ROOT_ENDPOINT_ID, payload=Clusters.NetworkCommissioning.Commands.ConnectNetwork(networkID=network_id), interactionTimeoutMs=self._devCtrl.ComputeRoundTripTimeout(node_id, upperLayerProcessingTimeoutMs=30000)) + if response.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: + raise commissioning.CommissionFailure(f"Unexpected result for enabling network: {response.networkingStatus}") + + self._logger.info("Thread network commissioning finished") + + async def network_commissioning_wifi(self, parameter: commissioning.Parameters, node_id: int): + if not parameter.wifi_credentials: + raise TypeError("The device requires WiFi credentials") + + self._logger.info("Adding WiFi network") + response = await self._devCtrl.SendCommand(nodeid=node_id, endpoint=commissioning.ROOT_ENDPOINT_ID, payload=Clusters.NetworkCommissioning.Commands.AddOrUpdateWiFiNetwork(ssid=parameter.wifi_credentials.ssid, credentials=parameter.wifi_credentials.passphrase)) + if response.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: + raise commissioning.CommissionFailure(f"Unexpected result for adding network: {response.networkingStatus}") + + network_list = (await self._devCtrl.ReadAttribute(nodeid=node_id, attributes=[(commissioning.ROOT_ENDPOINT_ID, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True))[commissioning.ROOT_ENDPOINT_ID][Clusters.NetworkCommissioning].networks + network_id = network_list[response.networkIndex].networkID + + self._logger.info("Enabling WiFi network") + response = await self._devCtrl.SendCommand(nodeid=node_id, endpoint=commissioning.ROOT_ENDPOINT_ID, payload=Clusters.NetworkCommissioning.Commands.ConnectNetwork(networkID=network_id), interactionTimeoutMs=self._devCtrl.ComputeRoundTripTimeout(node_id, upperLayerProcessingTimeoutMs=30000)) + if response.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: + raise commissioning.CommissionFailure(f"Unexpected result for enabling network: {response.networkingStatus}") + + self._logger.info("WiFi network commissioning finished") + + async def network_commissioning(self, parameter: commissioning.Parameters, node_id: int): + clusters = await self._devCtrl.ReadAttribute(nodeid=node_id, attributes=[(Clusters.Descriptor.Attributes.ServerList)], returnClusterObject=True) + if Clusters.NetworkCommissioning.id not in clusters[commissioning.ROOT_ENDPOINT_ID][Clusters.Descriptor].serverList: + self._logger.info( + f"Network commissioning cluster {commissioning.ROOT_ENDPOINT_ID} is not enabled on this device.") + return + + network_commissioning_cluster_state = (await self._devCtrl.ReadAttribute( + nodeid=node_id, + attributes=[(commissioning.ROOT_ENDPOINT_ID, Clusters.NetworkCommissioning)], returnClusterObject=True))[0][Clusters.NetworkCommissioning] + + if network_commissioning_cluster_state.networks: + for networks in network_commissioning_cluster_state.networks: + if networks.connected: + self._logger.info( + f"Device already connected to {networks.networkID.hex()} skip network commissioning") + return + + if parameter.commissionee_info.is_wifi_device: + if network_commissioning_cluster_state.featureMap != commissioning.NetworkCommissioningFeatureMap.WIFI_NETWORK_FEATURE_MAP: + raise AssertionError("Device is expected to be a WiFi device") + return await self.network_commissioning_wifi(parameter=parameter, node_id=node_id) + elif parameter.commissionee_info.is_thread_device: + if network_commissioning_cluster_state.featureMap != commissioning.NetworkCommissioningFeatureMap.THREAD_NETWORK_FEATURE_MAP: + raise AssertionError("Device is expected to be a Thread device") + return await self.network_commissioning_thread(parameter=parameter, node_id=node_id) + + async def send_regulatory_config(self, parameter: commissioning.Parameters, node_id: int): + self._logger.info("Sending Regulatory Config") + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.GeneralCommissioning.Commands.SetRegulatoryConfig( + newRegulatoryConfig=Clusters.GeneralCommissioning.Enums.RegulatoryLocationType( + parameter.regulatory_config.location_type), + countryCode=parameter.regulatory_config.country_code + )) + if response.errorCode != 0: + raise commissioning.CommissionFailure(repr(response)) + + async def complete_commission(self, node_id: int): + response = await self._devCtrl.SendCommand(node_id, commissioning.ROOT_ENDPOINT_ID, Clusters.GeneralCommissioning.Commands.CommissioningComplete()) + if response.errorCode != 0: + raise commissioning.CommissionFailure(repr(response)) diff --git a/src/controller/python/chip/commissioning/pase.py b/src/controller/python/chip/commissioning/pase.py new file mode 100644 index 00000000000000..9b7e8c5077242f --- /dev/null +++ b/src/controller/python/chip/commissioning/pase.py @@ -0,0 +1,70 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import ipaddress + +from chip import ChipDeviceCtrl, commissioning, discovery + + +@dataclasses.dataclass +class Session: + node_id: int + device: ChipDeviceCtrl.DeviceProxyWrapper + + +class ContextManager: + def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceControllerBase, node_id: int, is_ble: bool): + self.devCtrl = devCtrl + self.node_id = node_id + self.is_ble = is_ble + + def __enter__(self) -> Session: + return Session( + node_id=self.node_id, + device=self.devCtrl.GetConnectedDeviceSync(self.node_id, allowPASE=True, timeoutMs=1000)) + + def __exit__(self, type, value, traceback): + self.devCtrl.CloseSession(self.node_id) + if self.is_ble: + self.devCtrl.CloseBLEConnection(self.is_ble) + + +def establish_session(devCtrl: ChipDeviceCtrl.ChipDeviceControllerBase, parameter: commissioning.PaseParameters) -> ContextManager: + if isinstance(parameter, commissioning.PaseOverBLEParameters): + devCtrl.EstablishPASESessionBLE(parameter.setup_pin, parameter.discriminator, parameter.temporary_nodeid) + elif isinstance(parameter, commissioning.PaseOverIPParameters): + device = devCtrl.DiscoverCommissionableNodes(filterType=discovery.FilterType.LONG_DISCRIMINATOR, + filter=parameter.long_discriminator, stopOnFirst=True) + if not device: + raise ValueError("No commissionable device found") + selected_address = None + for ip in device[0].addresses: + if ipaddress.ip_address(ip).is_link_local: + # TODO(erjiaqing): To connect a device using link local address requires an interface identifier, + # however, the link local address returned from DiscoverCommissionableNodes does not have an + # interface identifier. + continue + selected_address = ip + break + if selected_address is None: + raise ValueError("The node for commissioning does not contains routable ip addresses information") + devCtrl.EstablishPASESessionIP(selected_address, parameter.setup_pin, parameter.temporary_nodeid) + else: + raise TypeError("Expect PaseOverBLEParameters or PaseOverIPParameters for establishing PASE session") + return ContextManager( + devCtrl=devCtrl, node_id=parameter.temporary_nodeid, is_ble=isinstance(parameter, commissioning.PaseOverBLEParameters)) diff --git a/src/controller/python/chip/credentials/cert.cpp b/src/controller/python/chip/credentials/cert.cpp new file mode 100644 index 00000000000000..813047ca02ec2e --- /dev/null +++ b/src/controller/python/chip/credentials/cert.cpp @@ -0,0 +1,49 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cert.h" + +#include +#include + +using namespace chip; +using namespace chip::Credentials; + +PyChipError pychip_ConvertX509CertToChipCert(const uint8_t * x509Cert, size_t x509CertLen, uint8_t * chipCert, size_t * chipCertLen) +{ + MutableByteSpan output(chipCert, *chipCertLen); + CHIP_ERROR err = CHIP_NO_ERROR; + + VerifyOrReturnError((err = ConvertX509CertToChipCert(ByteSpan(x509Cert, x509CertLen), output)) == CHIP_NO_ERROR, + ToPyChipError(err)); + *chipCertLen = output.size(); + + return ToPyChipError(err); +} + +PyChipError pychip_ConvertChipCertToX509Cert(const uint8_t * chipCert, size_t chipCertLen, uint8_t * x509Cert, size_t * x509CertLen) +{ + MutableByteSpan output(x509Cert, *x509CertLen); + CHIP_ERROR err = CHIP_NO_ERROR; + + VerifyOrReturnError((err = ConvertChipCertToX509Cert(ByteSpan(chipCert, chipCertLen), output)) == CHIP_NO_ERROR, + ToPyChipError(err)); + *x509CertLen = output.size(); + + return ToPyChipError(err); +} diff --git a/src/controller/python/chip/credentials/cert.h b/src/controller/python/chip/credentials/cert.h new file mode 100644 index 00000000000000..5d86273db01d03 --- /dev/null +++ b/src/controller/python/chip/credentials/cert.h @@ -0,0 +1,30 @@ +/* + * + * Copyright (c) 2023 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +extern "C" { +PyChipError pychip_ConvertX509CertToChipCert(const uint8_t * x509Cert, size_t x509CertLen, uint8_t * chipCert, + size_t * chipCertLen); +PyChipError pychip_ConvertChipCertToX509Cert(const uint8_t * chipCert, size_t chipCertLen, uint8_t * x509Cert, + size_t * x509CertLen); +} diff --git a/src/controller/python/chip/credentials/cert.py b/src/controller/python/chip/credentials/cert.py new file mode 100644 index 00000000000000..786c1a423103a6 --- /dev/null +++ b/src/controller/python/chip/credentials/cert.py @@ -0,0 +1,51 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import ctypes + +import chip.native + + +def _handle(): + handle = chip.native.GetLibraryHandle() + if handle.pychip_ConvertX509CertToChipCert.argtypes is None: + setter = chip.native.NativeLibraryHandleMethodArguments(handle) + setter.Set("pychip_ConvertX509CertToChipCert", chip.native.PyChipError, [ctypes.POINTER( + ctypes.c_uint8), ctypes.c_size_t, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_size_t)]) + setter.Set("pychip_ConvertChipCertToX509Cert", chip.native.PyChipError, [ctypes.POINTER( + ctypes.c_uint8), ctypes.c_size_t, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_size_t)]) + return handle + + +def convert_x509_cert_to_chip_cert(x509Cert: bytes) -> bytes: + """Converts a x509 certificate to CHIP Certificate.""" + output_buffer = (ctypes.c_uint8 * 1024)() + output_size = ctypes.c_size_t(1024) + + _handle().pychip_ConvertX509CertToChipCert(x509Cert, len(x509Cert), output_buffer, ctypes.byref(output_size)).raise_on_error() + + return bytes(output_buffer)[:output_size.value] + + +def convert_chip_cert_to_x509_cert(chipCert: bytes) -> bytes: + """Converts a x509 certificate to CHIP Certificate.""" + output_buffer = (ctypes.c_byte * 1024)() + output_size = ctypes.c_size_t(1024) + + _handle().pychip_ConvertChipCertToX509Cert(chipCert, len(chipCert), output_buffer, ctypes.byref(output_size)).raise_on_error() + + return bytes(output_buffer)[:output_size.value] diff --git a/src/controller/python/chip/crypto/fabric.py b/src/controller/python/chip/crypto/fabric.py new file mode 100644 index 00000000000000..3662fc164306d9 --- /dev/null +++ b/src/controller/python/chip/crypto/fabric.py @@ -0,0 +1,38 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.hkdf import HKDF + +from . import p256keypair + + +def generate_compressed_fabric_id(root_public_key: bytes, fabric_id: int) -> int: + """Generates compressed fabric id from Root CA's public key and fabric id. + + Returns: + Compressed fabric id as a int + """ + if len(root_public_key) != p256keypair.P256_PUBLIC_KEY_LENGTH and root_public_key[0] != b'\x04': + raise ValueError("Root public key must be an uncompressed P256 point.") + + return int.from_bytes(HKDF( + algorithm=hashes.SHA256(), + length=8, + salt=fabric_id.to_bytes(length=8, byteorder="big", signed=False), + info=b"CompressedFabric", + ).derive(key_material=root_public_key[1:]), byteorder="big") diff --git a/src/controller/python/chip/crypto/p256keypair.cpp b/src/controller/python/chip/crypto/p256keypair.cpp new file mode 100644 index 00000000000000..eef918a5a796b1 --- /dev/null +++ b/src/controller/python/chip/crypto/p256keypair.cpp @@ -0,0 +1,112 @@ +/* + * + * Copyright (c) 2022 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +using namespace chip; +using namespace chip::python; +using namespace chip::Crypto; + +pychip_P256Keypair::pychip_P256Keypair(void * aPyContext, pychip_P256Keypair_ECDSA_sign_msg aSignMsgFunct, + pychip_P256Keypair_ECDH_derive_secret aDeriveSecretFunct) : + mPyContext(aPyContext), + mSignMsgFunct(aSignMsgFunct), mDeriveSecretFunct(aDeriveSecretFunct) +{} + +pychip_P256Keypair::~pychip_P256Keypair() +{ + // Just override the initialize routing to avoid calling the Initialize from the platform's code. +} + +CHIP_ERROR pychip_P256Keypair::Initialize(Crypto::ECPKeyTarget key_target) +{ + // Just override the initialize routing to avoid calling the Initialize from the platform's code. + return CHIP_NO_ERROR; +} + +CHIP_ERROR pychip_P256Keypair::Serialize(Crypto::P256SerializedKeypair & output) const +{ + return CHIP_ERROR_NOT_IMPLEMENTED; +} + +CHIP_ERROR pychip_P256Keypair::Deserialize(Crypto::P256SerializedKeypair & input) +{ + return CHIP_ERROR_NOT_IMPLEMENTED; +} + +CHIP_ERROR pychip_P256Keypair::NewCertificateSigningRequest(uint8_t * csr, size_t & csr_length) const +{ + return CHIP_ERROR_NOT_IMPLEMENTED; +} + +CHIP_ERROR pychip_P256Keypair::ECDSA_sign_msg(const uint8_t * msg, size_t msg_length, + Crypto::P256ECDSASignature & out_signature) const +{ + VerifyOrReturnError(mSignMsgFunct != nullptr, CHIP_ERROR_NOT_IMPLEMENTED); + + size_t signatureLength = out_signature.Capacity(); + + VerifyOrReturnError(mSignMsgFunct(mPyContext, msg, msg_length, out_signature.Bytes(), &signatureLength), CHIP_ERROR_INTERNAL); + out_signature.SetLength(signatureLength); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR pychip_P256Keypair::ECDH_derive_secret(const Crypto::P256PublicKey & remote_public_key, + Crypto::P256ECDHDerivedSecret & out_secret) const +{ + VerifyOrReturnError(mDeriveSecretFunct != nullptr, CHIP_ERROR_NOT_IMPLEMENTED); + + size_t secretLength = out_secret.Capacity(); + + VerifyOrReturnError(mDeriveSecretFunct(mPyContext, remote_public_key.ConstBytes(), out_secret.Bytes(), &secretLength), + CHIP_ERROR_INTERNAL); + out_secret.SetLength(secretLength); + + return CHIP_NO_ERROR; +} + +void pychip_P256Keypair::UpdatePubkey(const FixedByteSpan & aPublicKey) +{ + mPublicKey = aPublicKey; + mInitialized = true; +} + +chip::python::pychip_P256Keypair * pychip_NewP256Keypair(void * pyObject, pychip_P256Keypair_ECDSA_sign_msg aSignMsgFunct, + pychip_P256Keypair_ECDH_derive_secret aDeriveSecretFunct) +{ + auto res = new pychip_P256Keypair(pyObject, aSignMsgFunct, aDeriveSecretFunct); + + return res; +} + +PyChipError pychip_P256Keypair_UpdatePubkey(chip::python::pychip_P256Keypair * this_, uint8_t * aPubKey, size_t aPubKeyLen) +{ + VerifyOrReturnError(aPubKeyLen == kP256_PublicKey_Length, ToPyChipError(CHIP_ERROR_INVALID_ARGUMENT)); + this_->UpdatePubkey(FixedByteSpan(aPubKey)); + return ToPyChipError(CHIP_NO_ERROR); +} + +void pychip_DeleteP256Keypair(chip::python::pychip_P256Keypair * this_) +{ + delete this_; +} diff --git a/src/controller/python/chip/crypto/p256keypair.h b/src/controller/python/chip/crypto/p256keypair.h new file mode 100644 index 00000000000000..5aa98461477a5b --- /dev/null +++ b/src/controller/python/chip/crypto/p256keypair.h @@ -0,0 +1,124 @@ +/* + * + * Copyright (c) 2022 Project CHIP Authors + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include +#include + +typedef bool (*pychip_P256Keypair_ECDSA_sign_msg)(void * pyObject, const uint8_t * msg, size_t msg_length, uint8_t * out_signature, + size_t * signature_length); + +typedef bool (*pychip_P256Keypair_ECDH_derive_secret)(void * pyObject, const uint8_t * remote_public_key, uint8_t * out_secret, + size_t * out_secret_length); + +namespace chip { +namespace python { + +class pychip_P256Keypair : public Crypto::P256Keypair +{ +public: + pychip_P256Keypair(void * aPyContext, pychip_P256Keypair_ECDSA_sign_msg aSignMsgFunct, + pychip_P256Keypair_ECDH_derive_secret aDeriveSecretFunct); + ~pychip_P256Keypair() override; + + CHIP_ERROR Initialize(Crypto::ECPKeyTarget key_target) override; + + /** + * @brief Serialize the keypair. + * @return Returns a CHIP_ERROR on error, CHIP_NO_ERROR otherwise + **/ + CHIP_ERROR Serialize(Crypto::P256SerializedKeypair & output) const override; + + /** + * @brief Deserialize the keypair. + * @return Returns a CHIP_ERROR on error, CHIP_NO_ERROR otherwise + **/ + CHIP_ERROR Deserialize(Crypto::P256SerializedKeypair & input) override; + + /** + * @brief Generate a new Certificate Signing Request (CSR). + * @param csr Newly generated CSR in DER format + * @param csr_length The caller provides the length of input buffer (csr). The function returns the actual length of generated + *CSR. + * @return Returns a CHIP_ERROR on error, CHIP_NO_ERROR otherwise + **/ + CHIP_ERROR NewCertificateSigningRequest(uint8_t * csr, size_t & csr_length) const override; + + /** + * @brief A function to sign a msg using ECDSA + * @param msg Message that needs to be signed + * @param msg_length Length of message + * @param out_signature Buffer that will hold the output signature. The signature consists of: 2 EC elements (r and s), + * in raw point form (see SEC1). + * @return Returns a CHIP_ERROR on error, CHIP_NO_ERROR otherwise + **/ + CHIP_ERROR ECDSA_sign_msg(const uint8_t * msg, size_t msg_length, Crypto::P256ECDSASignature & out_signature) const override; + + /** + * @brief A function to derive a shared secret using ECDH + * + * This implements the CHIP_Crypto_ECDH(PrivateKey myPrivateKey, PublicKey theirPublicKey) cryptographic primitive + * from the specification, using this class's private key from `mKeypair` as `myPrivateKey` and the remote + * public key from `remote_public_key` as `theirPublicKey`. + * + * @param remote_public_key Public key of remote peer with which we are trying to establish secure channel. remote_public_key is + * ASN.1 DER encoded as padded big-endian field elements as described in SEC 1: Elliptic Curve Cryptography + * [https://www.secg.org/sec1-v2.pdf] + * @param out_secret Buffer to write out secret into. This is a byte array representing the x coordinate of the shared secret. + * @return Returns a CHIP_ERROR on error, CHIP_NO_ERROR otherwise + **/ + CHIP_ERROR ECDH_derive_secret(const Crypto::P256PublicKey & remote_public_key, + Crypto::P256ECDHDerivedSecret & out_secret) const override; + + /** + * @brief A function to update the public key recorded in the keypair for C++ interface. + * + * @param publicKey A buffer of publicKey, should have exactly `kP256_PublicKey_Length` bytes. + * + **/ + void UpdatePubkey(const FixedByteSpan & aPublicKey); + + /** @brief Return public key for the keypair. + **/ + const Crypto::P256PublicKey & Pubkey() const override + { + // The mPublicKey is a member of Crypto::P256Keypair and is set in Initialize + return mPublicKey; + } + +private: + void * mPyContext; + + pychip_P256Keypair_ECDSA_sign_msg mSignMsgFunct; + pychip_P256Keypair_ECDH_derive_secret mDeriveSecretFunct; +}; + +} // namespace python +} // namespace chip + +extern "C" { + +chip::python::pychip_P256Keypair * pychip_NewP256Keypair(void * pyObject, pychip_P256Keypair_ECDSA_sign_msg aSignMsgFunct, + pychip_P256Keypair_ECDH_derive_secret aDeriveSecretFunct); + +PyChipError pychip_P256Keypair_UpdatePubkey(chip::python::pychip_P256Keypair * this_, uint8_t * aPubKey, size_t aPubKeyLen); +void pychip_DeleteP256Keypair(chip::python::pychip_P256Keypair * this_); +} diff --git a/src/controller/python/chip/crypto/p256keypair.py b/src/controller/python/chip/crypto/p256keypair.py new file mode 100644 index 00000000000000..3267601b3b3e0f --- /dev/null +++ b/src/controller/python/chip/crypto/p256keypair.py @@ -0,0 +1,151 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import hashlib +from ctypes import CFUNCTYPE, POINTER, c_bool, c_char, c_size_t, c_uint8, c_uint32, c_void_p, memmove, py_object, string_at + +from chip import native +from ecdsa import ECDH, NIST256p, SigningKey + +_pychip_P256Keypair_ECDSA_sign_msg_func = CFUNCTYPE( + c_bool, py_object, POINTER(c_uint8), c_size_t, POINTER(c_uint8), POINTER(c_size_t)) + +_pychip_P256Keypair_ECDH_derive_secret_func = CFUNCTYPE(c_bool, py_object, POINTER(c_uint8), POINTER(c_uint8), POINTER(c_size_t)) + +P256_PUBLIC_KEY_LENGTH = 2 * 32 + 1 + + +@ _pychip_P256Keypair_ECDSA_sign_msg_func +def _pychip_ECDSA_sign_msg(self_: 'P256Keypair', message_buf: POINTER(c_uint8), message_size: int, signature_buf: POINTER(c_uint8), signature_buf_size: POINTER(c_size_t)) -> bool: + res = self_.ECDSA_sign_msg(string_at(message_buf, message_size)[:]) + memmove(signature_buf, res, len(res)) + signature_buf_size.content = len(res) + return True + + +@ _pychip_P256Keypair_ECDH_derive_secret_func +def _pychip_ECDH_derive_secret(self_: 'P256Keypair', remote_pubkey: POINTER(c_uint8), out_secret_buf: POINTER(c_uint8), out_secret_buf_size: POINTER(c_uint32)) -> bool: + res = self_.ECDH_derive_secret(string_at(remote_pubkey, P256_PUBLIC_KEY_LENGTH)[:]) + memmove(out_secret_buf, res, len(res)) + out_secret_buf_size.content = len(res) + return True + + +class P256Keypair: + """Represented a P256Keypair, should live longer than the one using it. + + Users are expected to hold a reference to the Keypair object. + + """ + + def __init__(self): + self._native_obj = None + + def __copy__(self): + raise NotImplementedError("P256Keypair should not be copied.") + + def __deepcopy__(self, _=None): + raise NotImplementedError("P256Keypair should not be copied.") + + def _create_native_object(self) -> c_void_p: + handle = native.GetLibraryHandle() + if not handle.pychip_NewP256Keypair.argtypes: + setter = native.NativeLibraryHandleMethodArguments(handle) + setter.Set("pychip_NewP256Keypair", c_void_p, [py_object, + _pychip_P256Keypair_ECDSA_sign_msg_func, _pychip_P256Keypair_ECDH_derive_secret_func]) + setter.Set("pychip_P256Keypair_UpdatePubkey", native.PyChipError, [c_void_p, POINTER(c_char), c_size_t]) + setter.Set("pychip_DeleteP256Keypair", None, [c_void_p]) + self._native_obj = handle.pychip_NewP256Keypair( + py_object(self), _pychip_ECDSA_sign_msg, _pychip_ECDH_derive_secret) + + self.UpdatePublicKey() + return self._native_obj + + def __del__(self): + if self._native_obj is not None: + handle = native.GetLibraryHandle() + handle.pychip_DeleteP256Keypair(c_void_p(self._native_obj)) + self._native_obj = None + + @property + def native_object(self) -> c_void_p: + if self._native_obj is None: + return self._create_native_object() + return self._native_obj + + def UpdatePublicKey(self) -> None: + ''' Update the PublicKey in the underlying C++ object. + + This function should be called when the implementation + generates a new keypair. + ''' + handle = native.GetLibraryHandle() + handle.pychip_P256Keypair_UpdatePubkey(c_void_p(self.native_object), self.public_key, len(self.public_key)).raise_on_error() + + @abc.abstractproperty + def public_key(self) -> bytes: + ''' Returns the public key of the key pair + + The return value should conform with the uncompressed format of + Section 2.3.3 of the SECG SEC 1 ("Elliptic Curve Cryptography") + standard. (i.e. 0x04 || X || Y) + + For P256Keypair, the output length should be exactly 65 bytes. + ''' + raise NotImplementedError() + + @abc.abstractmethod + def ECDSA_sign_msg(self, message: bytes) -> bytes: + raise NotImplementedError() + + @abc.abstractmethod + def ECDH_derive_secret(self, remote_pubkey: bytes) -> bytes: + ''' Derive shared secret from the local private key and remote public key. + + remote_pubkey will be a public key conforms with the uncompressed + format of section 2.3.3 of the SECG SEC 1 standard. + ''' + raise NotImplementedError() + + +class TestP256Keypair(P256Keypair): + ''' The P256Keypair for testing purpose. It is not safe for any productions use + ''' + + def __init__(self, private_key: SigningKey = None): + super().__init__() + + if private_key is None: + self._key = SigningKey.generate(NIST256p) + else: + self._key = private_key + + self._pubkey = self._key.verifying_key.to_string(encoding='uncompressed') + + @property + def public_key(self) -> bytes: + return self._pubkey + + def ECDSA_sign_msg(self, message: bytes) -> bytes: + return self._key.sign_deterministic(message, hashfunc=hashlib.sha256) + + def ECDH_derive_secret(self, remote_pubkey: bytes) -> bytes: + ecdh = ECDH(curve=NIST256p) + ecdh.load_private_key(self._key) + ecdh.load_received_public_key_bytes(remote_pubkey[1:]) + return ecdh.ecdh1.generate_sharedsecret_bytes() diff --git a/src/controller/python/chip/yaml/errors.py b/src/controller/python/chip/yaml/errors.py index 092128f5b8d90e..a96360bbd9ef98 100644 --- a/src/controller/python/chip/yaml/errors.py +++ b/src/controller/python/chip/yaml/errors.py @@ -15,12 +15,12 @@ # limitations under the License. # -class ParsingError(ValueError): +class ActionCreationError(Exception): def __init__(self, message): super().__init__(message) -class UnexpectedParsingError(ValueError): +class UnexpectedActionCreationError(Exception): def __init__(self, message): super().__init__(message) diff --git a/src/controller/python/chip/yaml/runner.py b/src/controller/python/chip/yaml/runner.py index 0e75b595e02061..5cf58f16ee94dd 100644 --- a/src/controller/python/chip/yaml/runner.py +++ b/src/controller/python/chip/yaml/runner.py @@ -29,7 +29,7 @@ from chip.clusters.Attribute import (AttributeStatus, EventReadResult, SubscriptionTransaction, TypedAttributePath, ValueDecodeFailure) from chip.exceptions import ChipStackError -from chip.yaml.errors import ParsingError, UnexpectedParsingError +from chip.yaml.errors import ActionCreationError, UnexpectedActionCreationError from matter_yamltests.pseudo_clusters.clusters.delay_commands import DelayCommands from matter_yamltests.pseudo_clusters.clusters.log_commands import LogCommands from matter_yamltests.pseudo_clusters.clusters.system_commands import SystemCommands @@ -125,7 +125,7 @@ def __init__(self, test_step): super().__init__(test_step) self._test_step = test_step if not _PSEUDO_CLUSTERS.supports(test_step): - raise ParsingError(f'Default cluster {test_step.cluster} {test_step.command}, not supported') + raise ActionCreationError(f'Default cluster {test_step.cluster} {test_step.command}, not supported') def run_action(self, dev_ctrl: ChipDeviceController) -> _ActionResult: resp = asyncio.run(_PSEUDO_CLUSTERS.execute(self._test_step)) @@ -143,9 +143,11 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster which to invoke action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - ParsingError: Raised if there is a benign error, and there is currently no - action to perform for this write attribute. - UnexpectedParsingError: Raised if there is an unexpected parsing error. + ActionCreationError: Raised if there is a benign error. This occurs when we + cannot find the action to invoke for the provided cluster. When this happens + it is expected that the action to invoke and the provided cluster is an action + to be invoked on a pseudo cluster. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step) self._busy_wait_ms = test_step.busy_wait_ms @@ -159,13 +161,14 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): self._group_id = test_step.group_id if self._node_id is None and self._group_id is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( 'Both node_id and group_id are None, at least one needs to be provided') command = context.data_model_lookup.get_command(self._cluster, self._command_name) if command is None: - raise ParsingError( + # If we have not found a command it could me that it is a pseudo cluster command. + raise ActionCreationError( f'Failed to find cluster:{self._cluster} Command:{self._command_name}') command_object = command() @@ -177,9 +180,7 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): request_data = Converter.convert_to_data_model_type( request_data_as_dict, type(command_object)) except ValueError: - # TODO after allowing out of bounds enums to be written this should be changed to - # UnexpectedParsingError. - raise ParsingError('Could not covert yaml type') + raise UnexpectedActionCreationError('Could not covert yaml type') self._request_object = command_object.FromDict(request_data) else: @@ -214,9 +215,7 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster read attribute action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - ParsingError: Raised if there is a benign error, and there is currently no - action to perform for this read attribute. - UnexpectedParsingError: Raised if there is an unexpected parsing error. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step) self._attribute_name = stringcase.pascalcase(test_step.attribute) @@ -232,22 +231,22 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): self._cluster_object = context.data_model_lookup.get_cluster(self._cluster) if self._cluster_object is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'ReadAttribute failed to find cluster object:{self._cluster}') self._request_object = context.data_model_lookup.get_attribute( self._cluster, self._attribute_name) if self._request_object is None: - raise ParsingError( + raise UnexpectedActionCreationError( f'ReadAttribute failed to find cluster:{self._cluster} ' f'Attribute:{self._attribute_name}') if test_step.arguments: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'ReadAttribute should not contain arguments. {self.label}') if self._request_object.attribute_type is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'ReadAttribute doesnt have valid attribute_type. {self.label}') def run_action(self, dev_ctrl: ChipDeviceController) -> _ActionResult: @@ -293,7 +292,7 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster read event action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - UnexpectedParsingError: Raised if there is an unexpected parsing error. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step) self._event_name = stringcase.pascalcase(test_step.event) @@ -311,11 +310,11 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): self._request_object = context.data_model_lookup.get_event(self._cluster, self._event_name) if self._request_object is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'ReadEvent failed to find cluster:{self._cluster} Event:{self._event_name}') if test_step.arguments: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'ReadEvent should not contain arguments. {self.label}') def run_action(self, dev_ctrl: ChipDeviceController) -> _ActionResult: @@ -424,19 +423,17 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster write attribute action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - ParsingError: Raised if there is a benign error, and there is currently no - action to perform for this write attribute. - UnexpectedParsingError: Raised if there is an unexpected parsing error. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step, cluster, context) self._context = context if test_step.min_interval is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'SubscribeAttribute action does not have min_interval {self.label}') self._min_interval = test_step.min_interval if test_step.max_interval is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'SubscribeAttribute action does not have max_interval {self.label}') self._max_interval = test_step.max_interval @@ -479,19 +476,17 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster subscribe event action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - ParsingError: Raised if there is a benign error, and there is currently no - action to perform for this subscribe event. - UnexpectedParsingError: Raised if there is an unexpected parsing error. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step, cluster, context) self._context = context if test_step.min_interval is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'SubscribeEvent action does not have min_interval {self.label}') self._min_interval = test_step.min_interval if test_step.max_interval is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'SubscribeEvent action does not have max_interval {self.label}') self._max_interval = test_step.max_interval @@ -536,9 +531,7 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): 'cluster': Name of cluster write attribute action is targeting. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - ParsingError: Raised if there is a benign error, and there is currently no - action to perform for this write attribute. - UnexpectedParsingError: Raised if there is an unexpected parsing error. + UnexpectedActionCreationError: Raised if there is an unexpected parsing error. ''' super().__init__(test_step) self._attribute_name = stringcase.pascalcase(test_step.attribute) @@ -551,29 +544,29 @@ def __init__(self, test_step, cluster: str, context: _ExecutionContext): self._request_object = None if self._node_id is None and self._group_id is None: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( 'Both node_id and group_id are None, at least one needs to be provided') attribute = context.data_model_lookup.get_attribute( self._cluster, self._attribute_name) if attribute is None: - raise ParsingError( + raise UnexpectedActionCreationError( f'WriteAttribute failed to find cluster:{self._cluster} ' f'Attribute:{self._attribute_name}') if not test_step.arguments: - raise UnexpectedParsingError(f'WriteAttribute action does have arguments {self.label}') + raise UnexpectedActionCreationError(f'WriteAttribute action does have arguments {self.label}') args = test_step.arguments['values'] if len(args) != 1: - raise UnexpectedParsingError(f'WriteAttribute is trying to write multiple values') + raise UnexpectedActionCreationError(f'WriteAttribute is trying to write multiple values') request_data_as_dict = args[0] try: # TODO this is an ugly hack request_data = Converter.convert_to_data_model_type( request_data_as_dict['value'], attribute.attribute_type.Type) except ValueError: - raise ParsingError('Could not covert yaml type') + raise UnexpectedActionCreationError('Could not covert yaml type') # Create a cluster object for the request from the provided YAML data. self._request_object = attribute(request_data) @@ -616,7 +609,7 @@ def __init__(self, test_step, context: _ExecutionContext): 'test_step': Step containing information required to run wait for report action. 'context': Contains test-wide common objects such as DataModelLookup instance. Raises: - UnexpectedParsingError: Raised if the expected queue does not exist. + UnexpectedActionCreationError: Raised if the expected queue does not exist. ''' super().__init__(test_step) if test_step.attribute is not None: @@ -624,12 +617,12 @@ def __init__(self, test_step, context: _ExecutionContext): elif test_step.event is not None: queue_name = stringcase.pascalcase(test_step.event) else: - raise UnexpectedParsingError( + raise UnexpectedActionCreationError( f'WaitForReport needs to wait on either attribute or event, neither were provided') self._output_queue = context.subscription_callback_result_queue.get(queue_name, None) if self._output_queue is None: - raise UnexpectedParsingError(f'Could not find output queue') + raise UnexpectedActionCreationError(f'Could not find output queue') def run_action(self, dev_ctrl: ChipDeviceController) -> _ActionResult: try: @@ -654,7 +647,7 @@ def __init__(self, test_step): Args: 'test_step': Step containing information required to run wait for report action. Raises: - UnexpectedParsingError: Raised if the expected queue does not exist. + UnexpectedActionCreationError: Raised if the expected queue does not exist. ''' super().__init__(test_step) self._command = test_step.command @@ -667,7 +660,7 @@ def __init__(self, test_step): self._setup_payload = request_data_as_dict['payload'] self._node_id = request_data_as_dict['nodeId'] else: - raise UnexpectedParsingError(f'Unexpected CommisionerCommand {test_step.command}') + raise UnexpectedActionCreationError(f'Unexpected CommisionerCommand {test_step.command}') def run_action(self, dev_ctrl: ChipDeviceController) -> _ActionResult: if self._command == 'GetCommissionerNodeId': @@ -714,7 +707,7 @@ def _filter_for_step(test_step) -> (discovery.FilterType, any): if test_step.command == 'FindCommissionableByVendorId': return discovery.FilterType.VENDOR_ID, filter - raise UnexpectedParsingError(f'Invalid command: {test_step.command}') + raise UnexpectedActionCreationError(f'Invalid command: {test_step.command}') def __init__(self, test_step): super().__init__(test_step) @@ -776,7 +769,7 @@ def _invoke_action_factory(self, test_step, cluster: str): ''' try: return InvokeAction(test_step, cluster, self._context) - except ParsingError: + except ActionCreationError: return None def _attribute_read_action_factory(self, test_step, cluster: str): @@ -788,10 +781,7 @@ def _attribute_read_action_factory(self, test_step, cluster: str): Returns: ReadAttributeAction if 'test_step' is a valid read attribute to be executed. ''' - try: - return ReadAttributeAction(test_step, cluster, self._context) - except ParsingError: - return None + return ReadAttributeAction(test_step, cluster, self._context) def _event_read_action_factory(self, test_step, cluster: str): return ReadEventAction(test_step, cluster, self._context) @@ -807,13 +797,7 @@ def _attribute_subscribe_action_factory(self, test_step, cluster: str): None if we were unable to use the provided 'test_step' for a known reason that is not fatal to test execution. ''' - try: - return SubscribeAttributeAction(test_step, cluster, self._context) - except ParsingError: - # TODO For now, ParsingErrors are largely issues that will be addressed soon. Once this - # runner has matched parity of the codegen YAML test, this exception should be - # propogated. - return None + return SubscribeAttributeAction(test_step, cluster, self._context) def _attribute_subscribe_event_factory(self, test_step, cluster: str): '''Creates subscribe event command from TestStep provided. @@ -837,39 +821,21 @@ def _attribute_write_action_factory(self, test_step, cluster: str): None if we were unable to use the provided 'test_step' for a known reason that is not fatal to test execution. ''' - try: - return WriteAttributeAction(test_step, cluster, self._context) - except ParsingError: - return None + return WriteAttributeAction(test_step, cluster, self._context) def _wait_for_commissionee_action_factory(self, test_step): - try: - return WaitForCommissioneeAction(test_step) - except ParsingError: - # TODO For now, ParsingErrors are largely issues that will be addressed soon. Once this - # runner has matched parity of the codegen YAML test, this exception should be - # propogated. - return None + return WaitForCommissioneeAction(test_step) def _wait_for_report_action_factory(self, test_step): - try: - return WaitForReportAction(test_step, self._context) - except ParsingError: - # TODO For now, ParsingErrors are largely issues that will be addressed soon. Once this - # runner has matched parity of the codegen YAML test, this exception should be - # propogated. - return None + return WaitForReportAction(test_step, self._context) def _commissioner_command_action_factory(self, test_step): - try: - return CommissionerCommandAction(test_step) - except ParsingError: - return None + return CommissionerCommandAction(test_step) def _default_pseudo_cluster(self, test_step): try: return DefaultPseudoCluster(test_step) - except ParsingError: + except ActionCreationError: return None def encode(self, request) -> BaseAction: diff --git a/src/controller/python/test/test_scripts/base.py b/src/controller/python/test/test_scripts/base.py index 3188e310848ae7..65d774618f1f79 100644 --- a/src/controller/python/test/test_scripts/base.py +++ b/src/controller/python/test/test_scripts/base.py @@ -27,7 +27,6 @@ import threading import time from dataclasses import dataclass -from inspect import Attribute from typing import Any import chip.CertificateAuthority @@ -38,7 +37,8 @@ import chip.interaction_model as IM import chip.native from chip import ChipDeviceCtrl -from chip.ChipStack import * +from chip.ChipStack import ChipStack +from chip.crypto import p256keypair from chip.utils import CommissioningBuildingBlocks logger = logging.getLogger('PythonMatterControllerTEST') @@ -190,7 +190,7 @@ def assertValueEqual(self, expected): class BaseTestHelper: - def __init__(self, nodeid: int, paaTrustStorePath: str, testCommissioner: bool = False): + def __init__(self, nodeid: int, paaTrustStorePath: str, testCommissioner: bool = False, keypair: p256keypair.P256Keypair = None): chip.native.Init() self.chipStack = ChipStack('/tmp/repl_storage.json') @@ -198,7 +198,7 @@ def __init__(self, nodeid: int, paaTrustStorePath: str, testCommissioner: bool = self.certificateAuthority = self.certificateAuthorityManager.NewCertificateAuthority() self.fabricAdmin = self.certificateAuthority.NewFabricAdmin(vendorId=0xFFF1, fabricId=1) self.devCtrl = self.fabricAdmin.NewController( - nodeid, paaTrustStorePath, testCommissioner) + nodeid, paaTrustStorePath, testCommissioner, keypair=keypair) self.controllerNodeId = nodeid self.logger = logger self.paaTrustStorePath = paaTrustStorePath @@ -226,7 +226,7 @@ def TestDiscovery(self, discriminator: int): chip.discovery.FilterType.LONG_DISCRIMINATOR, discriminator, stopOnFirst=True, timeoutSecond=3) if not res: self.logger.info( - f"Device not found") + "Device not found") return False self.logger.info(f"Found device {res[0]}") return res[0] @@ -241,15 +241,20 @@ def CreateNewFabricController(self): return True async def TestRevokeCommissioningWindow(self, ip: str, setuppin: int, nodeid: int): - await self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000) + await self.devCtrl.SendCommand( + nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000) if not self.TestPaseOnly(ip=ip, setuppin=setuppin, nodeid=nodeid, devCtrl=self.devCtrl2): return False - await self.devCtrl2.SendCommand(nodeid, 0, Clusters.GeneralCommissioning.Commands.ArmFailSafe(expiryLengthSeconds=180, breadcrumb=0)) + await self.devCtrl2.SendCommand( + nodeid, 0, Clusters.GeneralCommissioning.Commands.ArmFailSafe(expiryLengthSeconds=180, breadcrumb=0)) - await self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.RevokeCommissioning(), timedRequestTimeoutMs=10000) - await self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000) - await self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.RevokeCommissioning(), timedRequestTimeoutMs=10000) + await self.devCtrl.SendCommand( + nodeid, 0, Clusters.AdministratorCommissioning.Commands.RevokeCommissioning(), timedRequestTimeoutMs=10000) + await self.devCtrl.SendCommand( + nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000) + await self.devCtrl.SendCommand( + nodeid, 0, Clusters.AdministratorCommissioning.Commands.RevokeCommissioning(), timedRequestTimeoutMs=10000) return True def TestEnhancedCommissioningWindow(self, ip: str, nodeid: int): @@ -354,17 +359,24 @@ def TestFailsafe(self, nodeid: int): self.logger.info( "Attempting to open basic commissioning window - this should fail since the failsafe is armed") try: - res = asyncio.run(self.devCtrl.SendCommand( - nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000)) + asyncio.run(self.devCtrl.SendCommand( + nodeid, + 0, + Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), + timedRequestTimeoutMs=10000 + )) # we actually want the exception here because we want to see a failure, so return False here self.logger.error( 'Incorrectly succeeded in opening basic commissioning window') return False - except Exception as ex: + except Exception: pass - # TODO: pipe through the commissioning window opener so we can test enhanced properly. The pake verifier is just garbage because none of of the functions to calculate - # it or serialize it are available right now. However, this command should fail BEFORE that becomes an issue. + # TODO: + ''' Pipe through the commissioning window opener so we can test enhanced properly. + The pake verifier is just garbage because none of of the functions to calculate + it or serialize it are available right now. However, this command should fail BEFORE that becomes an issue. + ''' discriminator = 1111 salt = secrets.token_bytes(16) iterations = 2000 @@ -373,13 +385,19 @@ def TestFailsafe(self, nodeid: int): self.logger.info( "Attempting to open enhanced commissioning window - this should fail since the failsafe is armed") try: - res = asyncio.run(self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenCommissioningWindow( - commissioningTimeout=180, PAKEPasscodeVerifier=verifier, discriminator=discriminator, iterations=iterations, salt=salt), timedRequestTimeoutMs=10000)) + asyncio.run(self.devCtrl.SendCommand( + nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenCommissioningWindow( + commissioningTimeout=180, + PAKEPasscodeVerifier=verifier, + discriminator=discriminator, + iterations=iterations, + salt=salt), timedRequestTimeoutMs=10000)) + # we actually want the exception here because we want to see a failure, so return False here self.logger.error( 'Incorrectly succeeded in opening enhanced commissioning window') return False - except Exception as ex: + except Exception: pass self.logger.info("Disarming failsafe on CASE connection") @@ -393,9 +411,14 @@ def TestFailsafe(self, nodeid: int): self.logger.info( "Opening Commissioning Window - this should succeed since the failsafe was just disarmed") try: - res = asyncio.run(self.devCtrl.SendCommand( - nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000)) - except Exception as ex: + asyncio.run( + self.devCtrl.SendCommand( + nodeid, + 0, + Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), + timedRequestTimeoutMs=10000 + )) + except Exception: self.logger.error( 'Failed to open commissioning window after disarming failsafe') return False @@ -416,7 +439,12 @@ async def TestControllerCATValues(self, nodeid: int): ''' This tests controllers using CAT Values ''' # Allocate a new controller instance with a CAT tag. - newControllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=self.fabricAdmin, adminDevCtrl=self.devCtrl, controllerNodeIds=[300], targetNodeId=nodeid, privilege=None, catTags=[0x0001_0001]) + newControllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=self.fabricAdmin, + adminDevCtrl=self.devCtrl, + controllerNodeIds=[300], + targetNodeId=nodeid, + privilege=None, catTags=[0x0001_0001]) # Read out an attribute using the new controller. It has no privileges, so this should fail with an UnsupportedAccess error. res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) @@ -425,16 +453,27 @@ async def TestControllerCATValues(self, nodeid: int): return False # Grant the new controller privilege by adding the CAT tag to the subject. - await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[0], privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=nodeid, targetCatTags=[0x0001_0001]) + await CommissioningBuildingBlocks.GrantPrivilege( + adminCtrl=self.devCtrl, + grantedCtrl=newControllers[0], + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=nodeid, targetCatTags=[0x0001_0001]) # Read out the attribute again - this time, it should succeed. res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) - if (type(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): + if (type(res[0][ + Clusters.AccessControl][ + Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): self.logger.error(f"2: Received something other than data:{res}") return False # Reset the privilege back to pre-test. - await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[0], privilege=None, targetNodeId=nodeid) + await CommissioningBuildingBlocks.GrantPrivilege( + adminCtrl=self.devCtrl, + grantedCtrl=newControllers[0], + privilege=None, + targetNodeId=nodeid + ) newControllers[0].Shutdown() @@ -445,7 +484,13 @@ async def TestMultiControllerFabric(self, nodeid: int): ''' # Create two new controllers on the same fabric with no privilege on the target node. - newControllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=self.fabricAdmin, adminDevCtrl=self.devCtrl, controllerNodeIds=[100, 200], targetNodeId=nodeid, privilege=None) + newControllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=self.fabricAdmin, + adminDevCtrl=self.devCtrl, + controllerNodeIds=[100, 200], + targetNodeId=nodeid, + privilege=None + ) # # Read out the ACL list from one of the newly minted controllers which has no access. This should return an IM error. @@ -460,12 +505,15 @@ async def TestMultiControllerFabric(self, nodeid: int): # Doing this ensures that we're not somehow aliasing the CASE sessions. # res = await self.devCtrl.ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) - if (type(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): + if (type(res[0][ + Clusters.AccessControl][ + Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): self.logger.error(f"2: Received something other than data:{res}") return False # - # Re-do the previous read from the unprivileged controller just to do an ABA test to prove we haven't switched the CASE sessions + # Re-do the previous read from the unprivileged controller + # just to do an ABA test to prove we haven't switched the CASE sessions # under-neath. # res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) @@ -476,25 +524,43 @@ async def TestMultiControllerFabric(self, nodeid: int): # # Grant the new controller admin privileges. Reading out the ACL cluster should now yield data. # - await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[0], privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=nodeid) + await CommissioningBuildingBlocks.GrantPrivilege( + adminCtrl=self.devCtrl, + grantedCtrl=newControllers[0], + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=nodeid + ) res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) - if (type(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): + if (type(res[0][ + Clusters.AccessControl][ + Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): self.logger.error(f"4: Received something other than data:{res}") return False # # Grant the second new controller admin privileges as well. Reading out the ACL cluster should now yield data. # - await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[1], privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=nodeid) + await CommissioningBuildingBlocks.GrantPrivilege( + adminCtrl=self.devCtrl, + grantedCtrl=newControllers[1], + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=nodeid + ) res = await newControllers[1].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) - if (type(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): + if (type(res[0][ + Clusters.AccessControl][ + Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntryStruct): self.logger.error(f"5: Received something other than data:{res}") return False # # Grant the second new controller just view privilege. Reading out the ACL cluster should return no data. # - await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[1], privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, targetNodeId=nodeid) + await CommissioningBuildingBlocks.GrantPrivilege( + adminCtrl=self.devCtrl, + grantedCtrl=newControllers[1], + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + targetNodeId=nodeid) res = await newControllers[1].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) if (res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl].Reason.status != IM.Status.UnsupportedAccess): self.logger.error(f"6: Received data5 instead of an error:{res}") @@ -503,8 +569,13 @@ async def TestMultiControllerFabric(self, nodeid: int): # # Read the Basic cluster from the 2nd controller. This is possible with just view privileges. # - res = await newControllers[1].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.BasicInformation.Attributes.ClusterRevision)]) - if (type(res[0][Clusters.BasicInformation][Clusters.BasicInformation.Attributes.ClusterRevision]) != Clusters.BasicInformation.Attributes.ClusterRevision.attribute_type.Type): + res = await newControllers[1].ReadAttribute(nodeid=nodeid, + attributes=[(0, Clusters.BasicInformation.Attributes.ClusterRevision)]) + if (type( + res[0][ + Clusters.BasicInformation][ + Clusters.BasicInformation.Attributes.ClusterRevision] + ) != Clusters.BasicInformation.Attributes.ClusterRevision.attribute_type.Type): self.logger.error(f"7: Received something other than data:{res}") return False @@ -554,9 +625,16 @@ async def TestAddUpdateRemoveFabric(self, nodeid: int): # TODO Read using old node ID and expect that it fails. - currentFabricIndexResponse = await tempDevCtrl.ReadAttribute(newNodeIdForUpdateNoc, [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)]) - updatedNOCFabricIndex = currentFabricIndexResponse[0][Clusters.OperationalCredentials][Clusters.OperationalCredentials.Attributes.CurrentFabricIndex] - removeFabricResponse = await tempDevCtrl.SendCommand(newNodeIdForUpdateNoc, 0, Clusters.OperationalCredentials.Commands.RemoveFabric(updatedNOCFabricIndex)) + currentFabricIndexResponse = await tempDevCtrl.ReadAttribute( + newNodeIdForUpdateNoc, + [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)] + ) + updatedNOCFabricIndex = currentFabricIndexResponse[0][Clusters.OperationalCredentials][ + Clusters.OperationalCredentials.Attributes.CurrentFabricIndex] + # Remove Fabric Response + await tempDevCtrl.SendCommand( + newNodeIdForUpdateNoc, 0, + Clusters.OperationalCredentials.Commands.RemoveFabric(updatedNOCFabricIndex)) if startOfTestFabricCount != await self._GetCommissonedFabricCount(nodeid): self.logger.error("Expected fabric count to be the same at the end of test as when it started") @@ -621,7 +699,8 @@ def OnValueChange(path: Attribute.TypedAttributePath, transaction: Attribute.Sub sub.Shutdown() if sawValueChange is False: - self.logger.error("Didn't see value change in time, likely because sub got terminated due to unexpected session eviction!") + self.logger.error( + "Didn't see value change in time, likely because sub got terminated due to unexpected session eviction!") return False # @@ -682,7 +761,12 @@ def OnValueChange(path: Attribute.TypedAttributePath, transaction: Attribute.Sub async def TestMultiFabric(self, ip: str, setuppin: int, nodeid: int): self.logger.info("Opening Commissioning Window") - await self.devCtrl.SendCommand(nodeid, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), timedRequestTimeoutMs=10000) + await self.devCtrl.SendCommand( + nodeid, + 0, + Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180), + timedRequestTimeoutMs=10000 + ) self.logger.info("Creating 2nd Fabric Admin") self.fabricAdmin2 = self.certificateAuthority.NewFabricAdmin(vendorId=0xFFF1, fabricId=2) @@ -730,8 +814,16 @@ async def TestMultiFabric(self, ip: str, setuppin: int, nodeid: int): self.logger.error("Got back invalid nocList") return False - data1 = await self.devCtrl.ReadAttribute(nodeid, [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)], fabricFiltered=False) - data2 = await self.devCtrl2.ReadAttribute(nodeid, [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)], fabricFiltered=False) + data1 = await self.devCtrl.ReadAttribute( + nodeid, + [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)], + fabricFiltered=False + ) + data2 = await self.devCtrl2.ReadAttribute( + nodeid, + [(Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)], + fabricFiltered=False + ) # Read out current fabric from each fabric, and both should be different. self.currentFabric1 = data1[0][Clusters.OperationalCredentials][ @@ -863,12 +955,14 @@ def CompareUnfilteredData(accessingFabric, otherFabric, expectedData): if (item != expectedDefaultData): raise AssertionError("Got back mismatched data") - data = await self.devCtrl.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False) + data = await self.devCtrl.ReadAttribute(nodeid, + [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False) readListDataFabric = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped] CompareUnfilteredData(self.currentFabric1, self.currentFabric2, expectedDataFabric1) - data = await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False) + data = await self.devCtrl2.ReadAttribute(nodeid, + [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False) readListDataFabric = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped] CompareUnfilteredData(self.currentFabric2, self.currentFabric1, expectedDataFabric2) @@ -928,7 +1022,11 @@ async def OnResubscriptionSucceeded(transaction): async with cv: cv.notify() - subscription = await self.devCtrl.ReadAttribute(nodeid, [(Clusters.BasicInformation.Attributes.ClusterRevision)], reportInterval=(0, 5)) + subscription = await self.devCtrl.ReadAttribute( + nodeid, + [(Clusters.BasicInformation.Attributes.ClusterRevision)], + reportInterval=(0, 5) + ) # # Register async callbacks that will fire when a re-sub is attempted or succeeds. @@ -1029,14 +1127,14 @@ def TestResolve(self, nodeid): while not addr: addr = self.devCtrl.GetAddressAndPort(nodeid) if time.time() - start > 10: - self.logger.exception(f"Timeout waiting for address...") + self.logger.exception("Timeout waiting for address...") break if not addr: time.sleep(0.2) if not addr: - self.logger.exception(f"Addr is missing...") + self.logger.exception("Addr is missing...") return False self.logger.info(f"Resolved address: {addr[0]}:{addr[1]}") return True @@ -1165,21 +1263,23 @@ def run(self): # is really wrong and bail out here with some information. if not updateCv.wait(10.0): self.logger.error( - f"Failed to receive subscription update") + "Failed to receive subscription update") break - # thread changes 5 times, and sleeps for 3 seconds in between. Add an additional 3 seconds of slack. Timeout is in seconds. + # thread changes 5 times, and sleeps for 3 seconds in between. + # Add an additional 3 seconds of slack. Timeout is in seconds. changeThread.join(18.0) # - # Clean-up by shutting down the sub. Otherwise, we're going to get callbacks through OnValueChange on what will soon become an invalid + # Clean-up by shutting down the sub. Otherwise, we're going to get callbacks through + # OnValueChange on what will soon become an invalid # execution context above. # subscription.Shutdown() if changeThread.is_alive(): # Thread join timed out - self.logger.error(f"Failed to join change thread") + self.logger.error("Failed to join change thread") return False return True if receivedUpdate == 5 else False @@ -1213,7 +1313,7 @@ def TestFabricScopedCommandDuringPase(self, nodeid: int): ''' status = None try: - response = asyncio.run(self.devCtrl.SendCommand( + asyncio.run(self.devCtrl.SendCommand( nodeid, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("roboto"))) except IM.InteractionModelError as ex: status = ex.status diff --git a/src/controller/python/test/test_scripts/cluster_objects.py b/src/controller/python/test/test_scripts/cluster_objects.py index 59b55a6a13bf86..80210bb8af32d8 100644 --- a/src/controller/python/test/test_scripts/cluster_objects.py +++ b/src/controller/python/test/test_scripts/cluster_objects.py @@ -57,7 +57,8 @@ def VerifyDecodeSuccess(values): f"Ignoring attribute decode failure for path {endpoint}/{attribute}") else: raise AssertionError( - f"Cannot decode value for path {endpoint}/{attribute}, got error: '{str(v.Reason)}', raw TLV data: '{v.TLVValue}'") + f"Cannot decode value for path {endpoint}/{attribute}, " + f"got error: '{str(v.Reason)}', raw TLV data: '{v.TLVValue}'") for endpoint in values: for cluster in values[endpoint]: @@ -104,7 +105,7 @@ async def TestCommandRoundTripWithBadEndpoint(cls, devCtrl): req = Clusters.OnOff.Commands.On() try: await devCtrl.SendCommand(nodeid=NODE_ID, endpoint=233, payload=req) - raise ValueError(f"Failure expected") + raise ValueError("Failure expected") except chip.interaction_model.InteractionModelError as ex: logger.info(f"Recevied {ex} from server.") return @@ -156,11 +157,16 @@ async def TestWriteRequest(cls, devCtrl): raise AssertionError("Write returned unexpected result.") logger.info("2: Write chunked list") - res = await devCtrl.WriteAttribute(nodeid=NODE_ID, - attributes=[(1, Clusters.UnitTesting.Attributes.ListLongOctetString([b"0123456789abcdef" * 32] * 5))]) + res = await devCtrl.WriteAttribute( + nodeid=NODE_ID, + attributes=[ + (1, Clusters.UnitTesting.Attributes.ListLongOctetString([b"0123456789abcdef" * 32] * 5)) + ] + ) expectedRes = [ AttributeStatus(Path=AttributePath( - EndpointId=1, Attribute=Clusters.UnitTesting.Attributes.ListLongOctetString), Status=chip.interaction_model.Status.Success), + EndpointId=1, + Attribute=Clusters.UnitTesting.Attributes.ListLongOctetString), Status=chip.interaction_model.Status.Success), ] logger.info(f"Received WriteResponse: {res}") @@ -198,15 +204,19 @@ def subUpdate(path: TypedAttributePath, transaction: SubscriptionTransaction): @ base.test_case async def TestSubscribeZeroMinInterval(cls, devCtrl): ''' - This validates receiving subscription reports for two attributes at a time in quick succession after issuing a command that results in attribute side-effects. - Specifically, it relies on the fact that the second attribute is changed in a different execution context than the first. This ensures that we pick-up the first - attribute change and generate a notification, and validating that shortly after that, we generate a second report for the second change. - - This is done using subscriptions with a min reporting interval of 0 to ensure timely notification of the above. An On() command is sent to the OnOff cluster + This validates receiving subscription reports for two attributes at a time in quick succession after + issuing a command that results in attribute side-effects. Specifically, it relies on the fact that the second attribute + is changed in a different execution context than the first. This ensures that we pick-up the first + attribute change and generate a notification, and validating that shortly after that, + we generate a second report for the second change. + + This is done using subscriptions with a min reporting interval of 0 to ensure timely notification of the above. + An On() command is sent to the OnOff cluster which should simultaneously set the state to On as well as set the level to 254. ''' logger.info("Test Subscription With MinInterval of 0") - sub = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=[Clusters.OnOff, Clusters.LevelControl], reportInterval=(0, 60)) + sub = await devCtrl.ReadAttribute(nodeid=NODE_ID, + attributes=[Clusters.OnOff, Clusters.LevelControl], reportInterval=(0, 60)) data = sub.GetAttributes() logger.info("Sending off command") @@ -297,17 +307,21 @@ async def TestReadAttributeRequests(cls, devCtrl): if res[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListLongOctetString] != [b'0123456789abcdef' * 32] * 4: raise AssertionError("Unexpected read result") - logger.info("*: Getting current fabric index") - res = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=[(0, Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)]) - fabricIndex = res[0][Clusters.OperationalCredentials][Clusters.OperationalCredentials.Attributes.CurrentFabricIndex] - # Note: ListFabricScoped is an empty list for now. We should re-enable this test after we make it return expected data. + # logger.info("*: Getting current fabric index") + # res = await devCtrl.ReadAttribute(nodeid=NODE_ID, + # attributes=[(0, Clusters.OperationalCredentials.Attributes.CurrentFabricIndex)]) + # fabricIndex = res[0][Clusters.OperationalCredentials][Clusters.OperationalCredentials.Attributes.CurrentFabricIndex] + # # logger.info("8: Read without fabric filter") - # res = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=[(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False) + # res = await devCtrl.ReadAttribute(nodeid=NODE_ID, + # attributes=[(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], + # fabricFiltered=False) # if len(res[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]) == 1: # raise AssertionError("Expect more elements in the response") # logger.info("9: Read with fabric filter") - # res = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=[(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=True) + # res = await devCtrl.ReadAttribute(nodeid=NODE_ID, + # attributes=[(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=True) # if len(res[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]) != 1: # raise AssertionError("Expect exact one element in the response") # if res[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped][0].fabricIndex != fabricIndex: @@ -317,9 +331,12 @@ async def TestReadAttributeRequests(cls, devCtrl): @ classmethod async def _TriggerEvent(cls, devCtrl): # We trigger sending an event a couple of times just to be safe. - await devCtrl.SendCommand(nodeid=NODE_ID, endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) - await devCtrl.SendCommand(nodeid=NODE_ID, endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) - return await devCtrl.SendCommand(nodeid=NODE_ID, endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) + await devCtrl.SendCommand(nodeid=NODE_ID, + endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) + await devCtrl.SendCommand(nodeid=NODE_ID, + endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) + return await devCtrl.SendCommand(nodeid=NODE_ID, + endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestEventRequest()) @ classmethod async def _RetryForContent(cls, request, until, retryCount=10, intervalSeconds=1): @@ -351,20 +368,30 @@ def validate_got_expected_event(events): return False return True - await cls._RetryForContent(request=lambda: devCtrl.ReadEvent(nodeid=NODE_ID, events=req, eventNumberFilter=current_event_filter), until=validate_got_expected_event) + await cls._RetryForContent(request=lambda: devCtrl.ReadEvent( + nodeid=NODE_ID, + events=req, + eventNumberFilter=current_event_filter + ), until=validate_got_expected_event) def validate_got_no_event(events): return len(events) == 0 - await cls._RetryForContent(request=lambda: devCtrl.ReadEvent(nodeid=NODE_ID, events=req, eventNumberFilter=(current_event_filter + 1)), until=validate_got_no_event) + await cls._RetryForContent(request=lambda: devCtrl.ReadEvent( + nodeid=NODE_ID, + events=req, + eventNumberFilter=(current_event_filter + 1) + ), until=validate_got_no_event) @ classmethod @ base.test_case async def TestGenerateUndefinedFabricScopedEventRequests(cls, devCtrl): logger.info("Running TestGenerateUndefinedFabricScopedEventRequests") try: - res = await devCtrl.SendCommand(nodeid=NODE_ID, endpoint=1, payload=Clusters.UnitTesting.Commands.TestEmitTestFabricScopedEventRequest(arg1=0)) - raise ValueError(f"Unexpected Failure") + res = await devCtrl.SendCommand(nodeid=NODE_ID, + endpoint=1, + payload=Clusters.UnitTesting.Commands.TestEmitTestFabricScopedEventRequest(arg1=0)) + raise ValueError("Unexpected Failure") except chip.interaction_model.InteractionModelError as ex: logger.info(f"Recevied {ex} from server.") res = await devCtrl.ReadEvent(nodeid=NODE_ID, events=[ @@ -518,13 +545,15 @@ async def TestReadWriteAttributeRequestsWithVersion(cls, devCtrl): req = [ (0, Clusters.BasicInformation.Attributes.VendorName), ] - res = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=req, dataVersionFilters=[(0, Clusters.BasicInformation, data_version)]) + res = await devCtrl.ReadAttribute(nodeid=NODE_ID, + attributes=req, dataVersionFilters=[(0, Clusters.BasicInformation, data_version)]) VerifyDecodeSuccess(res) new_data_version = res[0][Clusters.BasicInformation][DataVersion] if (data_version + 1) != new_data_version: raise AssertionError("Version mistmatch happens.") - res = await devCtrl.ReadAttribute(nodeid=NODE_ID, attributes=req, dataVersionFilters=[(0, Clusters.BasicInformation, new_data_version)]) + res = await devCtrl.ReadAttribute(nodeid=NODE_ID, + attributes=req, dataVersionFilters=[(0, Clusters.BasicInformation, new_data_version)]) VerifyDecodeSuccess(res) res = await devCtrl.WriteAttribute(nodeid=NODE_ID, @@ -590,7 +619,10 @@ def eventPathPossibilities(): logging.info( f"{testCount}: Reading mixed Attributes({attributes[0]}) Events({events[0]})") await cls._TriggerEvent(devCtrl) - res = await cls._RetryForContent(request=lambda: devCtrl.Read(nodeid=NODE_ID, attributes=attributes[1], events=events[1]), until=lambda res: res != 0) + res = await cls._RetryForContent(request=lambda: devCtrl.Read( + nodeid=NODE_ID, + attributes=attributes[1], + events=events[1]), until=lambda res: res != 0) VerifyDecodeSuccess(res.attributes) @ classmethod diff --git a/src/controller/python/test/test_scripts/example_python_commissioning_flow.py b/src/controller/python/test/test_scripts/example_python_commissioning_flow.py new file mode 100644 index 00000000000000..b10269257f2b08 --- /dev/null +++ b/src/controller/python/test/test_scripts/example_python_commissioning_flow.py @@ -0,0 +1,81 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os +import random + +from chip import ChipDeviceCtrl +from chip import clusters as Clusters +from chip import commissioning +from chip.commissioning import commissioning_flow_blocks, pase + + +class ExampleCustomMatterCommissioningFlow(commissioning_flow_blocks.CommissioningFlowBlocks): + def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceControllerBase, credential_provider: commissioning.CredentialProvider, logger: logging.Logger): + super().__init__(devCtrl=devCtrl, credential_provider=credential_provider, logger=logger) + self._logger = logger + + async def commission(self, parameter: commissioning.Parameters): + # The example uses PASE, however, the blocks uses a node_id, which supports both PASE and CASE. + with pase.establish_session(devCtrl=self._devCtrl, parameter=parameter.pase_param) as device: + node_id = device.node_id + + self._logger.info("Sending ArmFailSafe to device") + await self.arm_failsafe(node_id=node_id, duration_seconds=parameter.failsafe_expiry_length_seconds) + + self._logger.info("Setting Regulatory Configuration") + await self.send_regulatory_config(parameter=parameter, node_id=node_id) + + self._logger.info("OperationalCredentials Commissioning") + case_nodeid = await self.operational_credentials_commissioning(parameter=parameter, node_id=node_id) + + if not parameter.commissionee_info.is_ethernet_device: + self._logger.info("Network Commissioning") + await self.network_commissioning(parameter=parameter, node_id=node_id) + else: + self._logger.info("Device is an ethernet device, network commissioning not required.") + + self._logger.info("Completing Commissioning") + await self.complete_commission(case_nodeid) + + self._logger.info("Commissioning Completed") + + +class ExampleCredentialProvider: + def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController): + self._devCtrl = devCtrl + + async def get_attestation_nonce(self) -> bytes: + return os.urandom(32) + + async def get_csr_nonce(self) -> bytes: + return os.urandom(32) + + async def get_commissionee_credentials(self, request: commissioning.GetCommissioneeCredentialsRequest) -> commissioning.GetCommissioneeCredentialsResponse: + node_id = random.randint(100000, 999999) + nocChain = self._devCtrl.IssueNOCChain(Clusters.OperationalCredentials.Commands.CSRResponse( + NOCSRElements=request.csr_elements, attestationSignature=request.attestation_signature), nodeId=node_id) + return commissioning.GetCommissioneeCredentialsResponse( + rcac=nocChain.rcacBytes, + noc=nocChain.nocBytes, + icac=nocChain.icacBytes, + ipk=nocChain.ipkBytes, + case_admin_node=self._devCtrl.nodeId, + admin_vendor_id=self._devCtrl.fabricAdmin.vendorId, + node_id=node_id, + fabric_id=self._devCtrl.fabricId) diff --git a/src/controller/python/test/test_scripts/mobile-device-test.py b/src/controller/python/test/test_scripts/mobile-device-test.py index e602194f29efbf..09f13ccb444843 100755 --- a/src/controller/python/test/test_scripts/mobile-device-test.py +++ b/src/controller/python/test/test_scripts/mobile-device-test.py @@ -195,19 +195,59 @@ def do_tests(controller_nodeid, device_nodeid, address, timeout, discriminator, @click.command() -@click.option("--controller-nodeid", default=TEST_CONTROLLER_NODE_ID, type=int, help="NodeId of the controller.") -@click.option("--device-nodeid", default=TEST_DEVICE_NODE_ID, type=int, help="NodeId of the device.") -@click.option("--address", "-a", default='', type=str, help="Skip commissionee discovery, commission the device with the IP directly.") -@click.option("--timeout", "-t", default=240, type=int, help="The program will return with timeout after specified seconds.") -@click.option("--discriminator", default=TEST_DISCRIMINATOR, type=int, help="Discriminator of the device.") -@click.option("--setup-pin", default=TEST_SETUPPIN, type=int, help="Setup pincode of the device.") -@click.option('--enable-test', default=['all'], type=str, multiple=True, help='The tests to be executed. By default, all tests will be executed, use this option to run a specific set of tests. Use --print-test-list for a list of appliable tests.') -@click.option('--disable-test', default=[], type=str, multiple=True, help='The tests to be excluded from the set of enabled tests. Use --print-test-list for a list of appliable tests.') -@click.option('--log-level', default='WARN', type=click.Choice(['ERROR', 'WARN', 'INFO', 'DEBUG']), help="The log level of the test.") -@click.option('--log-format', default=None, type=str, help="Override logging format") -@click.option('--print-test-list', is_flag=True, help="Print a list of test cases and test sets that can be toggled via --enable-test and --disable-test, then exit") -@click.option('--paa-trust-store-path', default='', type=str, help="Path that contains valid and trusted PAA Root Certificates.") -def run(controller_nodeid, device_nodeid, address, timeout, discriminator, setup_pin, enable_test, disable_test, log_level, log_format, print_test_list, paa_trust_store_path): +@click.option("--controller-nodeid", + default=TEST_CONTROLLER_NODE_ID, + type=int, + help="NodeId of the controller.") +@click.option("--device-nodeid", + default=TEST_DEVICE_NODE_ID, + type=int, + help="NodeId of the device.") +@click.option("--address", "-a", + default='', + type=str, + help="Skip commissionee discovery, commission the device with the IP directly.") +@click.option("--timeout", "-t", + default=240, + type=int, + help="The program will return with timeout after specified seconds.") +@click.option("--discriminator", + default=TEST_DISCRIMINATOR, + type=int, + help="Discriminator of the device.") +@click.option("--setup-pin", + default=TEST_SETUPPIN, + type=int, + help="Setup pincode of the device.") +@click.option('--enable-test', + default=['all'], + type=str, + multiple=True, + help='The tests to be executed. By default, all tests will be executed, use this option to run a ' + 'specific set of tests. Use --print-test-list for a list of appliable tests.') +@click.option('--disable-test', + default=[], + type=str, + multiple=True, + help='The tests to be excluded from the set of enabled tests. Use --print-test-list for a list of ' + 'appliable tests.') +@click.option('--log-level', + default='WARN', + type=click.Choice(['ERROR', 'WARN', 'INFO', 'DEBUG']), + help="The log level of the test.") +@click.option('--log-format', + default=None, + type=str, + help="Override logging format") +@click.option('--print-test-list', + is_flag=True, + help="Print a list of test cases and test sets that can be toggled via --enable-test and --disable-test, then exit") +@click.option('--paa-trust-store-path', + default='', + type=str, + help="Path that contains valid and trusted PAA Root Certificates.") +def run(controller_nodeid, device_nodeid, address, timeout, discriminator, setup_pin, enable_test, disable_test, log_level, + log_format, print_test_list, paa_trust_store_path): coloredlogs.install(level=log_level, fmt=log_format, logger=logger) if print_test_list: diff --git a/src/controller/python/test/test_scripts/network_commissioning.py b/src/controller/python/test/test_scripts/network_commissioning.py index 64c5510c4e9624..f21e3b4603564e 100644 --- a/src/controller/python/test/test_scripts/network_commissioning.py +++ b/src/controller/python/test/test_scripts/network_commissioning.py @@ -58,11 +58,16 @@ def __init__(self, devCtrl, nodeid): self._last_breadcrumb = random.randint(1, 1 << 48) async def must_verify_breadcrumb(self): - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(0, Clusters.GeneralCommissioning.Attributes.Breadcrumb)], returnClusterObject=True) + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(0, Clusters.GeneralCommissioning.Attributes.Breadcrumb)], + returnClusterObject=True + ) if self._last_breadcrumb is not None: if self._last_breadcrumb != res[0][Clusters.GeneralCommissioning].breadcrumb: raise AssertionError( - f"Breadcrumb attribute mismatch! Expect {self._last_breadcrumb} got {res[0][Clusters.GeneralCommissioning].breadcrumb}") + f"Breadcrumb attribute mismatch! Expect {self._last_breadcrumb} " + f"got {res[0][Clusters.GeneralCommissioning].breadcrumb}") def with_breadcrumb(self) -> int: self._last_breadcrumb += 1 @@ -78,9 +83,13 @@ def log_interface_basic_info(self, values): f"The feature map of this endpoint is {values.featureMap}.") async def readLastNetworkingStateAttributes(self, endpointId): - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.LastConnectErrorValue), - (endpointId, Clusters.NetworkCommissioning.Attributes.LastNetworkID), - (endpointId, Clusters.NetworkCommissioning.Attributes.LastNetworkingStatus)], returnClusterObject=True) + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.LastConnectErrorValue), + (endpointId, Clusters.NetworkCommissioning.Attributes.LastNetworkID), + (endpointId, Clusters.NetworkCommissioning.Attributes.LastNetworkingStatus)], + returnClusterObject=True + ) values = res[endpointId][Clusters.NetworkCommissioning] logger.info(f"Got values: {values}") return values @@ -91,7 +100,7 @@ async def test_negative(self, endpointId): try: logger.info( - f"1. Send ConnectNetwork command with a illegal network id") + "1. Send ConnectNetwork command with a illegal network id") req = Clusters.NetworkCommissioning.Commands.ConnectNetwork( networkID=b'0' * 254, breadcrumb=0) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) @@ -99,10 +108,10 @@ async def test_negative(self, endpointId): except chip.interaction_model.InteractionModelError as ex: logger.info(f"Received {ex} from server.") - logger.info(f"Finished negative test cases.") + logger.info("Finished negative test cases.") async def test_wifi(self, endpointId): - logger.info(f"Get basic information of the endpoint") + logger.info("Get basic information of the endpoint") res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[ (endpointId, Clusters.NetworkCommissioning.Attributes.ConnectMaxTimeSeconds), @@ -115,7 +124,7 @@ async def test_wifi(self, endpointId): returnClusterObject=True) self.log_interface_basic_info( res[endpointId][Clusters.NetworkCommissioning]) - logger.info(f"Finished getting basic information of the endpoint") + logger.info("Finished getting basic information of the endpoint") if res[endpointId][Clusters.NetworkCommissioning].acceptedCommandList != [ Clusters.NetworkCommissioning.Commands.ScanNetworks.command_id, @@ -123,45 +132,54 @@ async def test_wifi(self, endpointId): Clusters.NetworkCommissioning.Commands.RemoveNetwork.command_id, Clusters.NetworkCommissioning.Commands.ConnectNetwork.command_id, Clusters.NetworkCommissioning.Commands.ReorderNetwork.command_id]: - raise AssertionError(f"Unexpected accepted command list for Thread interface") + raise AssertionError("Unexpected accepted command list for Thread interface") if res[endpointId][Clusters.NetworkCommissioning].generatedCommandList != [ Clusters.NetworkCommissioning.Commands.ScanNetworksResponse.command_id, Clusters.NetworkCommissioning.Commands.NetworkConfigResponse.command_id, Clusters.NetworkCommissioning.Commands.ConnectNetworkResponse.command_id]: - raise AssertionError(f"Unexpected generated command list for Thread interface") + raise AssertionError("Unexpected generated command list for Thread interface") # Read Last* attributes - logger.info(f"Read Last* attributes") + logger.info("Read Last* attributes") res = await self.readLastNetworkingStateAttributes(endpointId=endpointId) if (res.lastNetworkID != NullValue) or (res.lastNetworkingStatus != NullValue) or (res.lastConnectErrorValue != NullValue): raise AssertionError( - f"LastNetworkID, LastNetworkingStatus and LastConnectErrorValue should be Null") + "LastNetworkID, LastNetworkingStatus and LastConnectErrorValue should be Null") # Scan networks - logger.info(f"Scan networks") + logger.info("Scan networks") req = Clusters.NetworkCommissioning.Commands.ScanNetworks( ssid=b'', breadcrumb=self.with_breadcrumb()) interactionTimeoutMs = self._devCtrl.ComputeRoundTripTimeout(self._nodeid, upperLayerProcessingTimeoutMs=30000) - res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req, interactionTimeoutMs=interactionTimeoutMs) + res = await self._devCtrl.SendCommand( + nodeid=self._nodeid, + endpoint=endpointId, + payload=req, + interactionTimeoutMs=interactionTimeoutMs + ) logger.info(f"Received response: {res}") if res.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: raise AssertionError(f"Unexpected result: {res.networkingStatus}") await self.must_verify_breadcrumb() # Arm the failsafe before making network config changes - logger.info(f"Arming the failsafe") + logger.info("Arming the failsafe") req = Clusters.GeneralCommissioning.Commands.ArmFailSafe(expiryLengthSeconds=900) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) logger.info(f"Received response: {res}") # Remove existing network - logger.info(f"Check network list") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + logger.info("Check network list") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 0: - logger.info(f"Removing existing network") + logger.info("Removing existing network") req = Clusters.NetworkCommissioning.Commands.RemoveNetwork( networkID=networkList[0].networkID, breadcrumb=self.with_breadcrumb()) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) @@ -172,7 +190,7 @@ async def test_wifi(self, endpointId): await self.must_verify_breadcrumb() # Add first network - logger.info(f"Adding first test network") + logger.info("Adding first test network") req = Clusters.NetworkCommissioning.Commands.AddOrUpdateWiFiNetwork( ssid=TEST_WIFI_SSID.encode(), credentials=TEST_WIFI_PASS.encode(), breadcrumb=self.with_breadcrumb()) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) @@ -184,8 +202,12 @@ async def test_wifi(self, endpointId): f"Unexpected result: {res.networkIndex} (should be 0)") await self.must_verify_breadcrumb() - logger.info(f"Check network list") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + logger.info("Check network list") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 1: @@ -195,28 +217,38 @@ async def test_wifi(self, endpointId): raise AssertionError( f"Unexpected result: first network ID should be 'TestSSID' got {networkList[0].networkID}") - logger.info(f"Connect to a network") + logger.info("Connect to a network") req = Clusters.NetworkCommissioning.Commands.ConnectNetwork( networkID=TEST_WIFI_SSID.encode(), breadcrumb=self.with_breadcrumb()) interactionTimeoutMs = self._devCtrl.ComputeRoundTripTimeout(self._nodeid, upperLayerProcessingTimeoutMs=30000) - res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req, interactionTimeoutMs=interactionTimeoutMs) + res = await self._devCtrl.SendCommand( + nodeid=self._nodeid, + endpoint=endpointId, + payload=req, + interactionTimeoutMs=interactionTimeoutMs + ) logger.info(f"Got response: {res}") if res.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: raise AssertionError(f"Unexpected result: {res.networkingStatus}") - logger.info(f"Device connected to a network.") + logger.info("Device connected to a network.") await self.must_verify_breadcrumb() # Disarm the failsafe - logger.info(f"Disarming the failsafe") + logger.info("Disarming the failsafe") req = Clusters.GeneralCommissioning.Commands.CommissioningComplete() res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) logger.info(f"Received response: {res}") - # Note: On Linux, when connecting to a connected network, it will return immediately, however, it will try a reconnect. This will make the below attribute read return false negative values. + # Note: On Linux, when connecting to a connected network, it will return immediately, however, it will try a reconnect. + # This will make the below attribute read return false negative values. await asyncio.sleep(5) - logger.info(f"Check network is connected") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + logger.info("Check network is connected") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 1: @@ -227,17 +259,18 @@ async def test_wifi(self, endpointId): f"Unexpected result: first network ID should be 'TestSSID' got {networkList[0].networkID}") if not networkList[0].connected: raise AssertionError( - f"Unexpected result: network is not marked as connected") + "Unexpected result: network is not marked as connected") # Verify Last* attributes - logger.info(f"Read Last* attributes") + logger.info("Read Last* attributes") res = await self.readLastNetworkingStateAttributes(endpointId=endpointId) if (res.lastNetworkID == NullValue) or (res.lastNetworkingStatus == NullValue) or (res.lastConnectErrorValue != NullValue): raise AssertionError( - f"LastNetworkID, LastNetworkingStatus should not be Null, LastConnectErrorValue should be Null for a successful network provision.") + "LastNetworkID, LastNetworkingStatus should not be Null, " + "LastConnectErrorValue should be Null for a successful network provision.") async def test_thread(self, endpointId): - logger.info(f"Get basic information of the endpoint") + logger.info("Get basic information of the endpoint") res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[ (endpointId, Clusters.NetworkCommissioning.Attributes.ConnectMaxTimeSeconds), @@ -257,47 +290,54 @@ async def test_thread(self, endpointId): Clusters.NetworkCommissioning.Commands.RemoveNetwork.command_id, Clusters.NetworkCommissioning.Commands.ConnectNetwork.command_id, Clusters.NetworkCommissioning.Commands.ReorderNetwork.command_id]: - raise AssertionError(f"Unexpected accepted command list for Thread interface") + raise AssertionError("Unexpected accepted command list for Thread interface") if res[endpointId][Clusters.NetworkCommissioning].generatedCommandList != [ Clusters.NetworkCommissioning.Commands.ScanNetworksResponse.command_id, Clusters.NetworkCommissioning.Commands.NetworkConfigResponse.command_id, Clusters.NetworkCommissioning.Commands.ConnectNetworkResponse.command_id]: - raise AssertionError(f"Unexpected generated command list for Thread interface") + raise AssertionError("Unexpected generated command list for Thread interface") - logger.info(f"Finished getting basic information of the endpoint") + logger.info("Finished getting basic information of the endpoint") # Read Last* attributes - logger.info(f"Read Last* attributes") + logger.info("Read Last* attributes") res = await self.readLastNetworkingStateAttributes(endpointId=endpointId) if (res.lastNetworkID != NullValue) or (res.lastNetworkingStatus != NullValue) or (res.lastConnectErrorValue != NullValue): raise AssertionError( - f"LastNetworkID, LastNetworkingStatus and LastConnectErrorValue should be Null") + "LastNetworkID, LastNetworkingStatus and LastConnectErrorValue should be Null") # Scan networks - logger.info(f"Scan networks") + logger.info("Scan networks") req = Clusters.NetworkCommissioning.Commands.ScanNetworks( ssid=b'', breadcrumb=self.with_breadcrumb()) interactionTimeoutMs = self._devCtrl.ComputeRoundTripTimeout(self._nodeid, upperLayerProcessingTimeoutMs=30000) - res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req, interactionTimeoutMs=interactionTimeoutMs) + res = await self._devCtrl.SendCommand(nodeid=self._nodeid, + endpoint=endpointId, + payload=req, + interactionTimeoutMs=interactionTimeoutMs) logger.info(f"Received response: {res}") if res.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: raise AssertionError(f"Unexpected result: {res.networkingStatus}") await self.must_verify_breadcrumb() # Arm the failsafe before making network config changes - logger.info(f"Arming the failsafe") + logger.info("Arming the failsafe") req = Clusters.GeneralCommissioning.Commands.ArmFailSafe(expiryLengthSeconds=900) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) logger.info(f"Received response: {res}") # Remove existing network - logger.info(f"Check network list") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + logger.info("Check network list") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 0: - logger.info(f"Removing existing network") + logger.info("Removing existing network") req = Clusters.NetworkCommissioning.Commands.RemoveNetwork( networkID=networkList[0].networkID, breadcrumb=self.with_breadcrumb()) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) @@ -308,7 +348,7 @@ async def test_thread(self, endpointId): await self.must_verify_breadcrumb() # Add first network - logger.info(f"Adding first test network") + logger.info("Adding first test network") req = Clusters.NetworkCommissioning.Commands.AddOrUpdateThreadNetwork( operationalDataset=TEST_THREAD_NETWORK_DATASET_TLVS[0], breadcrumb=self.with_breadcrumb()) res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) @@ -320,8 +360,12 @@ async def test_thread(self, endpointId): f"Unexpected result: {res.networkIndex} (should be 0)") await self.must_verify_breadcrumb() - logger.info(f"Check network list") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + logger.info("Check network list") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 1: @@ -331,32 +375,40 @@ async def test_thread(self, endpointId): raise AssertionError( f"Unexpected result: first network ID should be {TEST_THREAD_NETWORK_IDS[0]} got {networkList[0].networkID}") - logger.info(f"Connect to a network") + logger.info("Connect to a network") req = Clusters.NetworkCommissioning.Commands.ConnectNetwork( networkID=TEST_THREAD_NETWORK_IDS[0], breadcrumb=self.with_breadcrumb()) interactionTimeoutMs = self._devCtrl.ComputeRoundTripTimeout(self._nodeid, upperLayerProcessingTimeoutMs=30000) - res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req, interactionTimeoutMs=interactionTimeoutMs) + res = await self._devCtrl.SendCommand(nodeid=self._nodeid, + endpoint=endpointId, + payload=req, + interactionTimeoutMs=interactionTimeoutMs) logger.info(f"Got response: {res}") if res.networkingStatus != Clusters.NetworkCommissioning.Enums.NetworkCommissioningStatus.kSuccess: raise AssertionError(f"Unexpected result: {res.networkingStatus}") - logger.info(f"Device connected to a network.") + logger.info("Device connected to a network.") await self.must_verify_breadcrumb() # Disarm the failsafe - logger.info(f"Disarming the failsafe") + logger.info("Disarming the failsafe") req = Clusters.GeneralCommissioning.Commands.CommissioningComplete() res = await self._devCtrl.SendCommand(nodeid=self._nodeid, endpoint=endpointId, payload=req) logger.info(f"Received response: {res}") # Verify Last* attributes - logger.info(f"Read Last* attributes") + logger.info("Read Last* attributes") res = await self.readLastNetworkingStateAttributes(endpointId=endpointId) if (res.lastNetworkID == NullValue) or (res.lastNetworkingStatus == NullValue) or (res.lastConnectErrorValue != NullValue): raise AssertionError( - f"LastNetworkID, LastNetworkingStatus should not be Null, LastConnectErrorValue should be Null for a successful network provision.") - - logger.info(f"Check network list") - res = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], returnClusterObject=True) + "LastNetworkID, LastNetworkingStatus should not be Null, " + "LastConnectErrorValue should be Null for a successful network provision.") + + logger.info("Check network list") + res = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(endpointId, Clusters.NetworkCommissioning.Attributes.Networks)], + returnClusterObject=True + ) networkList = res[endpointId][Clusters.NetworkCommissioning].networks logger.info(f"Got network list: {networkList}") if len(networkList) != 1: @@ -367,16 +419,24 @@ async def test_thread(self, endpointId): f"Unexpected result: first network ID should be {TEST_THREAD_NETWORK_IDS[0]} got {networkList[0].networkID}") if not networkList[0].connected: raise AssertionError( - f"Unexpected result: network is not marked as connected") + "Unexpected result: network is not marked as connected") @base.test_case async def Test(self): - clusters = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(Clusters.Descriptor.Attributes.ServerList)], returnClusterObject=True) + clusters = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(Clusters.Descriptor.Attributes.ServerList)], + returnClusterObject=True + ) if Clusters.NetworkCommissioning.id not in clusters[0][Clusters.Descriptor].serverList: logger.info( - f"Network commissioning cluster {endpoint} is not enabled on this device.") + "Network commissioning cluster is not enabled on this device.") return - endpoints = await self._devCtrl.ReadAttribute(nodeid=self._nodeid, attributes=[(Clusters.NetworkCommissioning.Attributes.FeatureMap)], returnClusterObject=True) + endpoints = await self._devCtrl.ReadAttribute( + nodeid=self._nodeid, + attributes=[(Clusters.NetworkCommissioning.Attributes.FeatureMap)], + returnClusterObject=True + ) logger.info(endpoints) for endpoint, obj in endpoints.items(): clus = obj[Clusters.NetworkCommissioning] @@ -398,5 +458,5 @@ async def run(self): try: await self.Test() return True - except Exception as ex: + except Exception: return False diff --git a/src/controller/python/test/test_scripts/python_commissioning_flow_test.py b/src/controller/python/test/test_scripts/python_commissioning_flow_test.py new file mode 100755 index 00000000000000..2317a5571e7257 --- /dev/null +++ b/src/controller/python/test/test_scripts/python_commissioning_flow_test.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 + +# +# Copyright (c) 2021 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import asyncio +# Commissioning test. +import os +import random +import sys +from optparse import OptionParser + +import example_python_commissioning_flow +from base import BaseTestHelper, TestFail, TestTimeout, logger +from chip import ChipDeviceCtrl +from chip import clusters as Clusters +from chip import commissioning +from chip.crypto import p256keypair + +# The thread network dataset tlv for testing, splited into T-L-V. + +TEST_THREAD_NETWORK_DATASET_TLV = "0e080000000000010000" + \ + "000300000c" + \ + "35060004001fffe0" + \ + "0208fedcba9876543210" + \ + "0708fd00000000001234" + \ + "0510ffeeddccbbaa99887766554433221100" + \ + "030e54657374696e674e6574776f726b" + \ + "0102d252" + \ + "041081cb3b2efa781cc778397497ff520fa50c0302a0ff" +# Network id, for the thread network, current a const value, will be changed to XPANID of the thread network. +TEST_THREAD_NETWORK_ID = "fedcba9876543210" +TEST_DISCRIMINATOR = 3840 + +ENDPOINT_ID = 0 +LIGHTING_ENDPOINT_ID = 1 +GROUP_ID = 0 + + +def main(): + optParser = OptionParser() + optParser.add_option( + "-t", + "--timeout", + action="store", + dest="testTimeout", + default=75, + type='int', + help="The program will return with timeout after specified seconds.", + metavar="", + ) + optParser.add_option( + "--bad-cert-issuer", + action="store_true", + dest="badCertIssuer", + default=False, + help="Simulate a bad certificate issuer, the commissioning should fail when sending OpCreds.", + ) + optParser.add_option( + "-d", + "--discriminator", + action="store", + dest="discriminator", + default='', + type='str', + help="The long discriminator of the device", + metavar="", + ) + optParser.add_option( + "--setup-payload", + action="store", + dest="setupPayload", + default='', + type='str', + help="Setup Payload (manual pairing code or QR code content)", + metavar="" + ) + optParser.add_option( + "--nodeid", + action="store", + dest="nodeid", + default=1, + type=int, + help="The Node ID issued to the device", + metavar="" + ) + optParser.add_option( + '--paa-trust-store-path', + dest="paaPath", + default='', + type='str', + help="Path that contains valid and trusted PAA Root Certificates." + ) + + (options, remainingArgs) = optParser.parse_args(sys.argv[1:]) + + timeoutTicker = TestTimeout(options.testTimeout) + timeoutTicker.start() + + test = BaseTestHelper( + nodeid=112233, paaTrustStorePath=options.paaPath, testCommissioner=True, keypair=p256keypair.TestP256Keypair()) + + class BadCredentialProvider: + def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController): + self._devCtrl = devCtrl + + async def get_attestation_nonce(self) -> bytes: + return os.urandom(32) + + async def get_csr_nonce(self) -> bytes: + return os.urandom(32) + + async def get_commissionee_credentials(self, request: commissioning.GetCommissioneeCredentialsRequest) -> commissioning.GetCommissioneeCredentialsResponse: + node_id = random.randint(100000, 999999) + nocChain = self._devCtrl.IssueNOCChain(Clusters.OperationalCredentials.Commands.CSRResponse( + NOCSRElements=request.csr_elements, attestationSignature=request.attestation_signature), nodeId=node_id) + return commissioning.GetCommissioneeCredentialsResponse( + rcac=nocChain.rcacBytes[1:], + noc=nocChain.nocBytes[1:], + icac=nocChain.icacBytes[1:], + ipk=nocChain.ipkBytes[1:], + case_admin_node=self._devCtrl.nodeId, + admin_vendor_id=self._devCtrl.fabricAdmin.vendorId, + node_id=node_id, + fabric_id=self._devCtrl.fabricId) + + flow = example_python_commissioning_flow.ExampleCustomMatterCommissioningFlow( + devCtrl=test.devCtrl, + credential_provider=BadCredentialProvider( + test.devCtrl) if options.badCertIssuer else example_python_commissioning_flow.ExampleCredentialProvider(test.devCtrl), + logger=logger) + + try: + asyncio.run(flow.commission(commissioning.Parameters( + pase_param=commissioning.PaseOverIPParameters( + long_discriminator=options.discriminator, + setup_pin=20202021, temporary_nodeid=options.nodeid + ), + regulatory_config=commissioning.RegulatoryConfig( + location_type=commissioning.RegulatoryLocationType.INDOOR_OUTDOOR, country_code='US'), + fabric_label="TestFabric", + commissionee_info=commissioning.CommissioneeInfo( + endpoints={}, + is_thread_device=True, + is_ethernet_device=False, + is_wifi_device=False, + ), + wifi_credentials=None, + thread_credentials=bytes.fromhex(TEST_THREAD_NETWORK_DATASET_TLV)))) + if options.badCertIssuer: + raise AssertionError("The commission is expected to fail. (BadCredentialProvider used)") + except Exception as ex: + if options.badCertIssuer: + logger.exception("Got exception and the test is expected to fail (BadCredentialProvider used)") + else: + raise ex + + timeoutTicker.stop() + + logger.info("Test finished") + + # TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown. + # Call os._exit(0) to force close it. + os._exit(0) + + +if __name__ == "__main__": + try: + main() + except Exception as ex: + logger.exception(ex) + TestFail("Exception occurred when running tests.") diff --git a/src/controller/python/test/unit_tests/test_cluster_objects.py b/src/controller/python/test/unit_tests/test_cluster_objects.py index 926bc0f32d1b99..13b12db185270e 100644 --- a/src/controller/python/test/unit_tests/test_cluster_objects.py +++ b/src/controller/python/test/unit_tests/test_cluster_objects.py @@ -27,7 +27,9 @@ def _encode_attribute_and_then_decode_to_native(data, type: ClusterObjects.Clust return TLVReader(type.ToTLV(None, data)).get()['Any'] -def _encode_from_native_and_then_decode(data, cls: typing.Union[ClusterObjects.ClusterObject, ClusterObjects.ClusterAttributeDescriptor]): +def _encode_from_native_and_then_decode(data, + cls: typing.Union[ClusterObjects.ClusterObject, + ClusterObjects.ClusterAttributeDescriptor]): tlv = TLVWriter() tlv.put(None, data) return cls.FromTLV(bytes(tlv.encoding)) diff --git a/src/controller/python/test/unit_tests/test_tlv.py b/src/controller/python/test/unit_tests/test_tlv.py index ad37b53c190a69..74d7aefe0f07cc 100644 --- a/src/controller/python/test/unit_tests/test_tlv.py +++ b/src/controller/python/test/unit_tests/test_tlv.py @@ -149,9 +149,11 @@ def test_uint(self): def test_structure(self): test_cases = [ - (b'\x15\x36\x01\x15\x35\x01\x26\x00\xBF\xA2\x55\x16\x37\x01\x24\x02\x00\x24\x03\x28\x24\x04\x00\x18\x24\x02\x01\x18\x18\x18\x18', + (b'\x15\x36\x01\x15\x35\x01\x26\x00\xBF\xA2\x55\x16\x37\x01\x24' + b'\x02\x00\x24\x03\x28\x24\x04\x00\x18\x24\x02\x01\x18\x18\x18\x18', {1: [{1: {0: 374710975, 1: [0, 40, 0], 2: 1}}]}), - (b'\x156\x01\x155\x01&\x00\xBF\xA2U\x167\x01$\x02\x00$\x03($\x04\x01\x18,\x02\x18Nordic Semiconductor ASA\x18\x18\x18\x18', + (b'\x156\x01\x155\x01&\x00\xBF\xA2U\x167\x01$\x02\x00$\x03($\x04\x01' + b'\x18,\x02\x18Nordic Semiconductor ASA\x18\x18\x18\x18', {1: [{1: {0: 374710975, 1: [0, 40, 1], 2: 'Nordic Semiconductor ASA'}}]}), (b"\0256\001\0255\001&\000\031\346x\2077\001$\002\001$\003\006$\004\000\030(\002\030\030\030\030", {1: [{1: {0: 2272847385, 1: [1, 6, 0], 2: False}}]}) diff --git a/src/controller/tests/data_model/TestRead.cpp b/src/controller/tests/data_model/TestRead.cpp index 63983a9a5cb722..301febb1d3a895 100644 --- a/src/controller/tests/data_model/TestRead.cpp +++ b/src/controller/tests/data_model/TestRead.cpp @@ -2958,12 +2958,12 @@ class TestReadCallback : public app::ReadClient::Callback mLastError = CHIP_NO_ERROR; } - int32_t mAttributeCount = 0; - int32_t mOnReportEnd = 0; - int32_t mOnSubscriptionEstablishedCount = 0; - int32_t mOnDone = 0; - int32_t mOnError = 0; - CHIP_ERROR mLastError = CHIP_NO_ERROR; + uint32_t mAttributeCount = 0; + uint32_t mOnReportEnd = 0; + uint32_t mOnSubscriptionEstablishedCount = 0; + uint32_t mOnDone = 0; + uint32_t mOnError = 0; + CHIP_ERROR mLastError = CHIP_NO_ERROR; }; class TestPerpetualListReadCallback : public app::ReadClient::Callback @@ -2991,7 +2991,7 @@ class TestPerpetualListReadCallback : public app::ReadClient::Callback int32_t reportsReceived = 0; }; -void EstablishReadOrSubscriptions(nlTestSuite * apSuite, const SessionHandle & sessionHandle, int32_t numSubs, int32_t pathPerSub, +void EstablishReadOrSubscriptions(nlTestSuite * apSuite, const SessionHandle & sessionHandle, size_t numSubs, size_t pathPerSub, app::AttributePathParams path, app::ReadClient::InteractionType type, app::ReadClient::Callback * callback, std::vector> & readClients) { @@ -3006,7 +3006,7 @@ void EstablishReadOrSubscriptions(nlTestSuite * apSuite, const SessionHandle & s readParams.mKeepSubscriptions = true; } - for (int32_t i = 0; i < numSubs; i++) + for (uint32_t i = 0; i < numSubs; i++) { std::unique_ptr readClient = std::make_unique(app::InteractionModelEngine::GetInstance(), @@ -3066,9 +3066,9 @@ void TestReadInteraction::TestReadHandler_KillOverQuotaSubscriptions(nlTestSuite TestContext & ctx = *static_cast(apContext); auto sessionHandle = ctx.GetSessionBobToAlice(); - const int32_t kExpectedParallelSubs = + const auto kExpectedParallelSubs = app::InteractionModelEngine::kMinSupportedSubscriptionsPerFabric * ctx.GetFabricTable().FabricCount(); - const int32_t kExpectedParallelPaths = kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription; + const auto kExpectedParallelPaths = kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription; app::InteractionModelEngine::GetInstance()->RegisterReadHandlerAppCallback(&gTestReadInteraction); @@ -3117,14 +3117,14 @@ void TestReadInteraction::TestReadHandler_KillOverQuotaSubscriptions(nlTestSuite ctx.GetIOContext().DriveIOUntil(System::Clock::Seconds16(5), [&]() { return readCallback.mOnSubscriptionEstablishedCount == kExpectedParallelSubs + 1 && readCallback.mAttributeCount == - static_cast(kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription + - app::InteractionModelEngine::kMinSupportedPathsPerSubscription + 1); + kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription + + app::InteractionModelEngine::kMinSupportedPathsPerSubscription + 1; }); NL_TEST_ASSERT(apSuite, readCallback.mAttributeCount == - static_cast(kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription + - app::InteractionModelEngine::kMinSupportedPathsPerSubscription + 1)); + kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription + + app::InteractionModelEngine::kMinSupportedPathsPerSubscription + 1); NL_TEST_ASSERT(apSuite, readCallback.mOnSubscriptionEstablishedCount == kExpectedParallelSubs + 1); // We have set up the environment for testing the evicting logic. @@ -3132,8 +3132,8 @@ void TestReadInteraction::TestReadHandler_KillOverQuotaSubscriptions(nlTestSuite // subscriptions will require the eviction of existing subscriptions, OR potential rejection of the subscription if it exceeds // minimas. app::InteractionModelEngine::GetInstance()->SetForceHandlerQuota(true); - app::InteractionModelEngine::GetInstance()->SetHandlerCapacityForSubscriptions(kExpectedParallelSubs); - app::InteractionModelEngine::GetInstance()->SetPathPoolCapacityForSubscriptions(kExpectedParallelPaths); + app::InteractionModelEngine::GetInstance()->SetHandlerCapacityForSubscriptions(static_cast(kExpectedParallelSubs)); + app::InteractionModelEngine::GetInstance()->SetPathPoolCapacityForSubscriptions(static_cast(kExpectedParallelPaths)); // Part 1: Test per subscription minimas. // Rejection of the subscription that exceeds minimas. @@ -3291,9 +3291,9 @@ void TestReadInteraction::TestReadHandler_KillOldestSubscriptions(nlTestSuite * TestContext & ctx = *static_cast(apContext); auto sessionHandle = ctx.GetSessionBobToAlice(); - const int32_t kExpectedParallelSubs = + const auto kExpectedParallelSubs = app::InteractionModelEngine::kMinSupportedSubscriptionsPerFabric * ctx.GetFabricTable().FabricCount(); - const int32_t kExpectedParallelPaths = kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription; + const auto kExpectedParallelPaths = kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription; app::InteractionModelEngine::GetInstance()->RegisterReadHandlerAppCallback(&gTestReadInteraction); @@ -3301,8 +3301,8 @@ void TestReadInteraction::TestReadHandler_KillOldestSubscriptions(nlTestSuite * std::vector> readClients; app::InteractionModelEngine::GetInstance()->SetForceHandlerQuota(true); - app::InteractionModelEngine::GetInstance()->SetHandlerCapacityForSubscriptions(kExpectedParallelSubs); - app::InteractionModelEngine::GetInstance()->SetPathPoolCapacityForSubscriptions(kExpectedParallelPaths); + app::InteractionModelEngine::GetInstance()->SetHandlerCapacityForSubscriptions(static_cast(kExpectedParallelSubs)); + app::InteractionModelEngine::GetInstance()->SetPathPoolCapacityForSubscriptions(static_cast(kExpectedParallelPaths)); // This should just use all availbale resources. EstablishReadOrSubscriptions( @@ -3314,12 +3314,9 @@ void TestReadInteraction::TestReadHandler_KillOldestSubscriptions(nlTestSuite * NL_TEST_ASSERT(apSuite, readCallback.mAttributeCount == - kExpectedParallelSubs * - static_cast(app::InteractionModelEngine::kMinSupportedPathsPerSubscription)); + kExpectedParallelSubs * app::InteractionModelEngine::kMinSupportedPathsPerSubscription); NL_TEST_ASSERT(apSuite, readCallback.mOnSubscriptionEstablishedCount == kExpectedParallelSubs); - NL_TEST_ASSERT(apSuite, - app::InteractionModelEngine::GetInstance()->GetNumActiveReadHandlers() == - static_cast(kExpectedParallelSubs)); + NL_TEST_ASSERT(apSuite, app::InteractionModelEngine::GetInstance()->GetNumActiveReadHandlers() == kExpectedParallelSubs); // The following check will trigger the logic in im to kill the read handlers that uses more paths than the limit per fabric. { diff --git a/src/crypto/CHIPCryptoPALmbedTLS.cpp b/src/crypto/CHIPCryptoPALmbedTLS.cpp index 00b63db9b45c44..ae887e755c2330 100644 --- a/src/crypto/CHIPCryptoPALmbedTLS.cpp +++ b/src/crypto/CHIPCryptoPALmbedTLS.cpp @@ -1445,8 +1445,9 @@ CHIP_ERROR VerifyAttestationCertificateFormat(const ByteSpan & cert, Attestation { bool keyCertSignFlag = keyUsage & MBEDTLS_X509_KU_KEY_CERT_SIGN; bool crlSignFlag = keyUsage & MBEDTLS_X509_KU_CRL_SIGN; - bool otherFlags = - keyUsage & ~(MBEDTLS_X509_KU_CRL_SIGN | MBEDTLS_X509_KU_KEY_CERT_SIGN | MBEDTLS_X509_KU_DIGITAL_SIGNATURE); + bool otherFlags = keyUsage & + ~static_cast(MBEDTLS_X509_KU_CRL_SIGN | MBEDTLS_X509_KU_KEY_CERT_SIGN | + MBEDTLS_X509_KU_DIGITAL_SIGNATURE); VerifyOrExit(keyCertSignFlag && crlSignFlag && !otherFlags, error = CHIP_ERROR_INTERNAL); } } diff --git a/src/darwin/Framework/CHIP/MTRDevice.mm b/src/darwin/Framework/CHIP/MTRDevice.mm index 642a45ae622993..e61ae946392b19 100644 --- a/src/darwin/Framework/CHIP/MTRDevice.mm +++ b/src/darwin/Framework/CHIP/MTRDevice.mm @@ -254,6 +254,17 @@ - (void)invalidate os_unfair_lock_unlock(&self->_lock); } +- (void)nodeMayBeAdvertisingOperational +{ + // TODO: Figure out what to do with that information. If we're not waiting + // to subscribe/resubscribe, do nothing, otherwise perhaps trigger the + // subscribe/resubscribe immediately? We need to have much better tracking + // of our internal state for that, and may need to add something on + // ReadClient to cancel its outstanding timer and try to resubscribe + // immediately.... + MTR_LOG_DEFAULT("%@ saw new operational advertisement", self); +} + // assume lock is held - (void)_changeState:(MTRDeviceState)state { diff --git a/src/darwin/Framework/CHIP/MTRDeviceController.mm b/src/darwin/Framework/CHIP/MTRDeviceController.mm index 5dd543f74195d5..d91888ec30c341 100644 --- a/src/darwin/Framework/CHIP/MTRDeviceController.mm +++ b/src/darwin/Framework/CHIP/MTRDeviceController.mm @@ -48,10 +48,13 @@ #include #include #include +#include #include #include #include +#include + #import static NSString * const kErrorCommissionerInit = @"Init failure while initializing a commissioner"; @@ -82,7 +85,10 @@ typedef id (^SyncWorkQueueBlockWithReturnValue)(void); typedef BOOL (^SyncWorkQueueBlockWithBoolReturnValue)(void); -@interface MTRDeviceController () +@interface MTRDeviceController () { + // Atomic because it can be touched from multiple threads. + std::atomic _storedFabricIndex; +} // queue used to serialize all work performed by the MTRDeviceController @property (atomic, readonly) dispatch_queue_t chipWorkQueue; @@ -123,6 +129,8 @@ - (instancetype)initWithFactory:(MTRDeviceControllerFactory *)factory queue:(dis if ([self checkForInitError:(_operationalCredentialsDelegate != nullptr) logMsg:kErrorOperationalCredentialsInit]) { return nil; } + + _storedFabricIndex = chip::kUndefinedFabricIndex; } return self; } @@ -152,12 +160,15 @@ - (void)cleanupAfterStartup // in a very specific way that only MTRDeviceControllerFactory knows about. - (void)shutDownCppController { + assertChipStackLockedByCurrentThread(); + if (_cppCommissioner) { auto * commissionerToShutDown = _cppCommissioner; // Flag ourselves as not running before we start shutting down // _cppCommissioner, so we're not in a state where we claim to be // running but are actually partially shut down. _cppCommissioner = nullptr; + _storedFabricIndex = chip::kUndefinedFabricIndex; commissionerToShutDown->Shutdown(); delete commissionerToShutDown; if (_operationalCredentialsDelegate != nil) { @@ -345,6 +356,7 @@ - (BOOL)startup:(MTRDeviceControllerStartupParamsInternal *)startupParams return; } + self->_storedFabricIndex = fabricIdx; commissionerInitialized = YES; }); @@ -813,17 +825,26 @@ - (BOOL)syncRunOnWorkQueueWithBoolReturnValue:(SyncWorkQueueBlockWithBoolReturnV - (chip::FabricIndex)fabricIndex { + return _storedFabricIndex; +} + +- (nullable NSNumber *)compressedFabricID +{ + assertChipStackLockedByCurrentThread(); + if (!_cppCommissioner) { - return chip::kUndefinedFabricIndex; + return nil; } - return _cppCommissioner->GetFabricIndex(); + return @(_cppCommissioner->GetCompressedFabricId()); } - (CHIP_ERROR)isRunningOnFabric:(chip::FabricTable *)fabricTable fabricIndex:(chip::FabricIndex)fabricIndex isRunning:(BOOL *)isRunning { + assertChipStackLockedByCurrentThread(); + if (![self isRunning]) { *isRunning = NO; return CHIP_NO_ERROR; @@ -861,6 +882,22 @@ - (void)invalidateCASESessionForNode:(chip::NodeId)nodeID; [self syncRunOnWorkQueue:block error:nil]; } +- (void)operationalInstanceAdded:(chip::NodeId)nodeID +{ + // Don't use deviceForNodeID here, because we don't want to create the + // device if it does not already exist. + os_unfair_lock_lock(&_deviceMapLock); + MTRDevice * device = self.nodeIDToDeviceMap[@(nodeID)]; + os_unfair_lock_unlock(&_deviceMapLock); + + if (device == nil) { + return; + } + + ChipLogProgress(Controller, "Notifying device about node 0x" ChipLogFormatX64 " advertising", ChipLogValueX64(nodeID)); + [device nodeMayBeAdvertisingOperational]; +} + @end /** diff --git a/src/darwin/Framework/CHIP/MTRDeviceControllerFactory.mm b/src/darwin/Framework/CHIP/MTRDeviceControllerFactory.mm index 9c96f7e98ad308..0e57fa215a131b 100644 --- a/src/darwin/Framework/CHIP/MTRDeviceControllerFactory.mm +++ b/src/darwin/Framework/CHIP/MTRDeviceControllerFactory.mm @@ -28,6 +28,7 @@ #import "MTRFramework.h" #import "MTRLogging_Internal.h" #import "MTROTAProviderDelegateBridge.h" +#import "MTROperationalBrowser.h" #import "MTRP256KeypairBridge.h" #import "MTRPersistentStorageDelegateBridge.h" #import "NSDataSpanConversion.h" @@ -81,6 +82,7 @@ @interface MTRDeviceControllerFactory () @property (readonly) NSMutableArray * controllers; @property (readonly) PersistentStorageOperationalKeystore * keystore; @property (readonly) Credentials::PersistentStorageOpCertStore * opCertStore; +@property (readonly) MTROperationalBrowser * operationalBrowser; @property () chip::Credentials::DeviceAttestationVerifier * deviceAttestationVerifier; - (BOOL)findMatchingFabric:(FabricTable &)fabricTable @@ -643,6 +645,9 @@ - (MTRDeviceController * _Nullable)createController // Bringing up the first controller. Start the event loop now. If we // fail to bring it up, its cleanup will stop the event loop again. chip::DeviceLayer::PlatformMgrImpl().StartEventLoopTask(); + dispatch_sync(_chipWorkQueue, ^{ + self->_operationalBrowser = new MTROperationalBrowser(self, self->_chipWorkQueue); + }); } // Add the controller to _controllers now, so if we fail partway through its @@ -742,6 +747,10 @@ - (void)controllerShuttingDown:(MTRDeviceController *)controller [_controllers removeObject:controller]; if ([_controllers count] == 0) { + dispatch_sync(_chipWorkQueue, ^{ + delete self->_operationalBrowser; + self->_operationalBrowser = nullptr; + }); // That was our last controller. Stop the event loop before it // shuts down, because shutdown of the last controller will tear // down most of the world. @@ -777,6 +786,21 @@ - (nullable MTRDeviceController *)runningControllerForFabricIndex:(chip::FabricI return nil; } +- (void)operationalInstanceAdded:(chip::PeerId &)operationalID +{ + for (MTRDeviceController * controller in _controllers) { + auto * compressedFabricId = controller.compressedFabricID; + if (compressedFabricId != nil && compressedFabricId.unsignedLongLongValue == operationalID.GetCompressedFabricId()) { + ChipLogProgress(Controller, "Notifying controller at fabric index %u about new operational node 0x" ChipLogFormatX64, + controller.fabricIndex, ChipLogValueX64(operationalID.GetNodeId())); + [controller operationalInstanceAdded:operationalID.GetNodeId()]; + } + + // Keep going: more than one controller might match a given compressed + // fabric id, though the chances are low. + } +} + - (MTRPersistentStorageDelegateBridge *)storageDelegateBridge { return _persistentStorageDelegateBridge; diff --git a/src/darwin/Framework/CHIP/MTRDeviceControllerFactory_Internal.h b/src/darwin/Framework/CHIP/MTRDeviceControllerFactory_Internal.h index a92d00b1c1e8b3..91b5813c896fb8 100644 --- a/src/darwin/Framework/CHIP/MTRDeviceControllerFactory_Internal.h +++ b/src/darwin/Framework/CHIP/MTRDeviceControllerFactory_Internal.h @@ -24,6 +24,7 @@ #import "MTRDeviceControllerFactory.h" #include +#include class MTRPersistentStorageDelegateBridge; @@ -47,6 +48,12 @@ NS_ASSUME_NONNULL_BEGIN */ - (nullable MTRDeviceController *)runningControllerForFabricIndex:(chip::FabricIndex)fabricIndex; +/** + * Notify the controller factory that a new operational instance with the given + * compressed fabric id and node id has been observed. + */ +- (void)operationalInstanceAdded:(chip::PeerId &)operationalID; + @property (readonly) MTRPersistentStorageDelegateBridge * storageDelegateBridge; @property (readonly) chip::Credentials::GroupDataProvider * groupData; @property (readonly) chip::Credentials::DeviceAttestationVerifier * deviceAttestationVerifier; diff --git a/src/darwin/Framework/CHIP/MTRDeviceController_Internal.h b/src/darwin/Framework/CHIP/MTRDeviceController_Internal.h index e3e795ac15bb46..28beb9b8244a13 100644 --- a/src/darwin/Framework/CHIP/MTRDeviceController_Internal.h +++ b/src/darwin/Framework/CHIP/MTRDeviceController_Internal.h @@ -62,10 +62,17 @@ NS_ASSUME_NONNULL_BEGIN /** * Will return chip::kUndefinedFabricIndex if we do not have a fabric index. - * This property MUST be gotten from the Matter work queue. */ @property (readonly) chip::FabricIndex fabricIndex; +/** + * Will return the compressed fabric id of the fabric if the controller is + * running, else nil. + * + * This property MUST be gotten from the Matter work queue. + */ +@property (readonly, nullable) NSNumber * compressedFabricID; + /** * Init a newly created controller. * @@ -185,6 +192,12 @@ NS_ASSUME_NONNULL_BEGIN */ - (MTRBaseDevice *)baseDeviceForNodeID:(NSNumber *)nodeID; +/** + * Notify the controller that a new operational instance with the given node id + * and a compressed fabric id that matches this controller has been observed. + */ +- (void)operationalInstanceAdded:(chip::NodeId)nodeID; + #pragma mark - Device-specific data and SDK access // DeviceController will act as a central repository for this opaque dictionary that MTRDevice manages - (MTRDevice *)deviceForNodeID:(NSNumber *)nodeID; diff --git a/src/darwin/Framework/CHIP/MTRDevice_Internal.h b/src/darwin/Framework/CHIP/MTRDevice_Internal.h index 1cb11e83df748f..cccff5027e2cb2 100644 --- a/src/darwin/Framework/CHIP/MTRDevice_Internal.h +++ b/src/darwin/Framework/CHIP/MTRDevice_Internal.h @@ -36,6 +36,11 @@ typedef void (^MTRDevicePerformAsyncBlock)(MTRBaseDevice * baseDevice); // called by controller to clean up and shutdown - (void)invalidate; +// Called by controller when a new operational advertisement for what we think +// is this device's identity has been observed. This could have +// false-positives, for example due to compressed fabric id collisions. +- (void)nodeMayBeAdvertisingOperational; + @property (nonatomic, readonly) MTRDeviceController * deviceController; @property (nonatomic, readonly, copy) NSNumber * nodeID; // Queue used for various internal bookkeeping work. In general endWork calls diff --git a/src/darwin/Framework/CHIP/MTROperationalBrowser.h b/src/darwin/Framework/CHIP/MTROperationalBrowser.h new file mode 100644 index 00000000000000..0cdcbf6d869736 --- /dev/null +++ b/src/darwin/Framework/CHIP/MTROperationalBrowser.h @@ -0,0 +1,47 @@ +/** + * Copyright (c) 2023 Project CHIP Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#import +#import +#import + +class MTROperationalBrowser +{ +public: + // Should be created at a point when the factory starts up the event loop, + // and destroyed when the event loop is stopped. + MTROperationalBrowser(MTRDeviceControllerFactory * aFactory, dispatch_queue_t aQueue); + + ~MTROperationalBrowser(); + +private: + static void OnBrowse(DNSServiceRef aServiceRef, DNSServiceFlags aFlags, uint32_t aInterfaceId, DNSServiceErrorType aError, + const char * aName, const char * aType, const char * aDomain, void * aContext); + + void TryToStartBrowse(); + + MTRDeviceControllerFactory * const mDeviceControllerFactory; + dispatch_queue_t mQueue; + DNSServiceRef mBrowseRef; + + // If mInitialized is true, mBrowseRef is valid. + bool mInitialized = false; + + // If mIsDestroying is true, we're in our destructor, shutting things down. + bool mIsDestroying = false; +}; diff --git a/src/darwin/Framework/CHIP/MTROperationalBrowser.mm b/src/darwin/Framework/CHIP/MTROperationalBrowser.mm new file mode 100644 index 00000000000000..b547b35b842aa7 --- /dev/null +++ b/src/darwin/Framework/CHIP/MTROperationalBrowser.mm @@ -0,0 +1,112 @@ +/** + * Copyright (c) 2023 Project CHIP Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#import + +#import "MTRDeviceControllerFactory_Internal.h" +#import "MTROperationalBrowser.h" + +#include +#include +#include +#include + +namespace { +constexpr const char kLocalDot[] = "local."; +constexpr const char kOperationalType[] = "_matter._tcp"; +constexpr DNSServiceFlags kBrowseFlags = 0; +} + +MTROperationalBrowser::MTROperationalBrowser(MTRDeviceControllerFactory * aFactory, dispatch_queue_t aQueue) + : mDeviceControllerFactory(aFactory) + , mQueue(aQueue) +{ + // If we fail to start a browse, there's nothing our consumer would do + // differently, so we might as well do this in the constructor. + TryToStartBrowse(); +} + +void MTROperationalBrowser::TryToStartBrowse() +{ + assertChipStackLockedByCurrentThread(); + + ChipLogProgress(Controller, "Trying to start operational browse"); + + auto err + = DNSServiceBrowse(&mBrowseRef, kBrowseFlags, kDNSServiceInterfaceIndexAny, kOperationalType, kLocalDot, OnBrowse, this); + if (err != kDNSServiceErr_NoError) { + ChipLogError(Controller, "Failed to start operational browse: %" PRId32, err); + return; + } + + err = DNSServiceSetDispatchQueue(mBrowseRef, mQueue); + if (err != kDNSServiceErr_NoError) { + ChipLogError(Controller, "Failed to set up dispatch queue properly"); + DNSServiceRefDeallocate(mBrowseRef); + return; + } + + mInitialized = true; +} + +void MTROperationalBrowser::OnBrowse(DNSServiceRef aServiceRef, DNSServiceFlags aFlags, uint32_t aInterfaceId, + DNSServiceErrorType aError, const char * aName, const char * aType, const char * aDomain, void * aContext) +{ + assertChipStackLockedByCurrentThread(); + + auto self = static_cast(aContext); + + // We only expect to get notified about our type/domain. + if (aError != kDNSServiceErr_NoError) { + ChipLogError(Controller, "Operational browse failure: %" PRId32, aError); + DNSServiceRefDeallocate(self->mBrowseRef); + self->mInitialized = false; + + // We shouldn't really get callbacks under our destructor, but guard + // against it just in case. + if (!self->mIsDestroying) { + // Try to start a new browse, so we have one going. + self->TryToStartBrowse(); + } + return; + } + + if (!(aFlags & kDNSServiceFlagsAdd)) { + // We only care about new things appearing. + return; + } + + chip::PeerId peerId; + CHIP_ERROR err = chip::Dnssd::ExtractIdFromInstanceName(aName, &peerId); + if (err != CHIP_NO_ERROR) { + ChipLogError(Controller, "Invalid instance name: '%s'\n", aName); + return; + } + + ChipLogProgress(Controller, "Notifying controller factory about new operational instance: '%s'", aName); + [self->mDeviceControllerFactory operationalInstanceAdded:peerId]; +} + +MTROperationalBrowser::~MTROperationalBrowser() +{ + assertChipStackLockedByCurrentThread(); + + mIsDestroying = true; + + if (mInitialized) { + DNSServiceRefDeallocate(mBrowseRef); + } +} diff --git a/src/darwin/Framework/CHIP/templates/availability.yaml b/src/darwin/Framework/CHIP/templates/availability.yaml index f0b8f781aeec14..eb5db2f33878fb 100644 --- a/src/darwin/Framework/CHIP/templates/availability.yaml +++ b/src/darwin/Framework/CHIP/templates/availability.yaml @@ -3063,8 +3063,12 @@ - OTAChangeReasonEnum - OTAUpdateStateEnum TimeFormatLocalization: - - CalendarType - - HourFormat + # CalendarTypeEnum and HourFormatEnum were originally just named + # CalendarType and HourFormat, but we generate the same API for + # the names with/without "Enum" at the end, so the name can just + # change here. + - CalendarTypeEnum + - HourFormatEnum UnitLocalization: # TempUnitEnum was originally just named TempUnit, but # we generate the same API for both of those names, so the name @@ -3324,7 +3328,11 @@ - RollingBack - DelayedOnUserConsent TimeFormatLocalization: - CalendarType: + # CalendarTypeEnum and HourFormatEnum were originally just named + # CalendarType and HourFormat, but we generate the same API for + # the names with/without "Enum" at the end, so the name can just + # change here. + CalendarTypeEnum: - Buddhist - Chinese - Coptic @@ -3337,7 +3345,7 @@ - Korean - Persian - Taiwanese - HourFormat: + HourFormatEnum: - 12hr - 24hr UnitLocalization: diff --git a/src/darwin/Framework/CHIP/zap-generated/MTRBaseClusters.mm b/src/darwin/Framework/CHIP/zap-generated/MTRBaseClusters.mm index 650d89774e861b..86a9e2d3c69d75 100644 --- a/src/darwin/Framework/CHIP/zap-generated/MTRBaseClusters.mm +++ b/src/darwin/Framework/CHIP/zap-generated/MTRBaseClusters.mm @@ -17797,7 +17797,8 @@ - (void)readAttributeHourFormatWithCompletion:(void (^)(NSNumber * _Nullable val { MTRReadParams * params = [[MTRReadParams alloc] init]; using TypeInfo = TimeFormatLocalization::Attributes::HourFormat::TypeInfo; - return MTRReadAttribute( + return MTRReadAttribute( params, completion, self.callbackQueue, self.device, self->_endpoint, TypeInfo::GetClusterId(), TypeInfo::GetAttributeId()); } @@ -17843,7 +17844,7 @@ - (void)subscribeAttributeHourFormatWithParams:(MTRSubscribeParams * _Nonnull)pa reportHandler:(void (^)(NSNumber * _Nullable value, NSError * _Nullable error))reportHandler { using TypeInfo = TimeFormatLocalization::Attributes::HourFormat::TypeInfo; - MTRSubscribeAttribute(params, subscriptionEstablished, reportHandler, self.callbackQueue, self.device, self->_endpoint, TypeInfo::GetClusterId(), TypeInfo::GetAttributeId()); } @@ -17853,9 +17854,9 @@ + (void)readAttributeHourFormatWithClusterStateCache:(MTRClusterStateCacheContai queue:(dispatch_queue_t)queue completion:(void (^)(NSNumber * _Nullable value, NSError * _Nullable error))completion { - auto * bridge = new MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(queue, completion); + auto * bridge = new MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(queue, completion); std::move(*bridge).DispatchLocalAction(clusterStateCacheContainer.baseDevice, - ^(TimeFormatLocalizationClusterHourFormatAttributeCallback successCb, MTRErrorCallback failureCb) { + ^(TimeFormatLocalizationClusterHourFormatEnumAttributeCallback successCb, MTRErrorCallback failureCb) { if (clusterStateCacheContainer.cppClusterStateCache) { chip::app::ConcreteAttributePath path; using TypeInfo = TimeFormatLocalization::Attributes::HourFormat::TypeInfo; @@ -17877,7 +17878,8 @@ - (void)readAttributeActiveCalendarTypeWithCompletion:(void (^)(NSNumber * _Null { MTRReadParams * params = [[MTRReadParams alloc] init]; using TypeInfo = TimeFormatLocalization::Attributes::ActiveCalendarType::TypeInfo; - return MTRReadAttribute( + return MTRReadAttribute( params, completion, self.callbackQueue, self.device, self->_endpoint, TypeInfo::GetClusterId(), TypeInfo::GetAttributeId()); } @@ -17924,7 +17926,7 @@ - (void)subscribeAttributeActiveCalendarTypeWithParams:(MTRSubscribeParams * _No (void (^)(NSNumber * _Nullable value, NSError * _Nullable error))reportHandler { using TypeInfo = TimeFormatLocalization::Attributes::ActiveCalendarType::TypeInfo; - MTRSubscribeAttribute(params, subscriptionEstablished, reportHandler, self.callbackQueue, self.device, self->_endpoint, TypeInfo::GetClusterId(), TypeInfo::GetAttributeId()); } @@ -17935,9 +17937,9 @@ + (void)readAttributeActiveCalendarTypeWithClusterStateCache:(MTRClusterStateCac completion: (void (^)(NSNumber * _Nullable value, NSError * _Nullable error))completion { - auto * bridge = new MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(queue, completion); + auto * bridge = new MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(queue, completion); std::move(*bridge).DispatchLocalAction(clusterStateCacheContainer.baseDevice, - ^(TimeFormatLocalizationClusterCalendarTypeAttributeCallback successCb, MTRErrorCallback failureCb) { + ^(TimeFormatLocalizationClusterCalendarTypeEnumAttributeCallback successCb, MTRErrorCallback failureCb) { if (clusterStateCacheContainer.cppClusterStateCache) { chip::app::ConcreteAttributePath path; using TypeInfo = TimeFormatLocalization::Attributes::ActiveCalendarType::TypeInfo; diff --git a/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.h b/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.h index a4b1d9891a5ba0..f3a7deead776d0 100644 --- a/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.h +++ b/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.h @@ -215,14 +215,14 @@ typedef void (*OTASoftwareUpdateRequestorClusterOTAUpdateStateEnumAttributeCallb void *, chip::app::Clusters::OtaSoftwareUpdateRequestor::OTAUpdateStateEnum); typedef void (*NullableOTASoftwareUpdateRequestorClusterOTAUpdateStateEnumAttributeCallback)( void *, const chip::app::DataModel::Nullable &); -typedef void (*TimeFormatLocalizationClusterCalendarTypeAttributeCallback)( - void *, chip::app::Clusters::TimeFormatLocalization::CalendarType); -typedef void (*NullableTimeFormatLocalizationClusterCalendarTypeAttributeCallback)( - void *, const chip::app::DataModel::Nullable &); -typedef void (*TimeFormatLocalizationClusterHourFormatAttributeCallback)(void *, - chip::app::Clusters::TimeFormatLocalization::HourFormat); -typedef void (*NullableTimeFormatLocalizationClusterHourFormatAttributeCallback)( - void *, const chip::app::DataModel::Nullable &); +typedef void (*TimeFormatLocalizationClusterCalendarTypeEnumAttributeCallback)( + void *, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum); +typedef void (*NullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallback)( + void *, const chip::app::DataModel::Nullable &); +typedef void (*TimeFormatLocalizationClusterHourFormatEnumAttributeCallback)( + void *, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum); +typedef void (*NullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallback)( + void *, const chip::app::DataModel::Nullable &); typedef void (*UnitLocalizationClusterTempUnitEnumAttributeCallback)(void *, chip::app::Clusters::UnitLocalization::TempUnitEnum); typedef void (*NullableUnitLocalizationClusterTempUnitEnumAttributeCallback)( void *, const chip::app::DataModel::Nullable &); @@ -675,7 +675,8 @@ typedef void (*LocalizationConfigurationAcceptedCommandListListAttributeCallback typedef void (*LocalizationConfigurationAttributeListListAttributeCallback)( void * context, const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationSupportedCalendarTypesListAttributeCallback)( - void * context, const chip::app::DataModel::DecodableList & data); + void * context, + const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationGeneratedCommandListListAttributeCallback)( void * context, const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationAcceptedCommandListListAttributeCallback)( @@ -3870,7 +3871,7 @@ class MTRTimeFormatLocalizationSupportedCalendarTypesListAttributeCallbackBridge static void OnSuccessFn(void * context, - const chip::app::DataModel::DecodableList & value); + const chip::app::DataModel::DecodableList & value); }; class MTRTimeFormatLocalizationSupportedCalendarTypesListAttributeCallbackSubscriptionBridge @@ -13250,137 +13251,140 @@ class MTRNullableOTASoftwareUpdateRequestorClusterOTAUpdateStateEnumAttributeCal MTRSubscriptionEstablishedHandler mEstablishedHandler; }; -class MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge - : public MTRCallbackBridge +class MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge + : public MTRCallbackBridge { public: - MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : - MTRCallbackBridge(queue, handler, OnSuccessFn){}; + MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : + MTRCallbackBridge(queue, handler, OnSuccessFn){}; - MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, - MTRActionBlock action) : - MTRCallbackBridge(queue, handler, action, OnSuccessFn){}; + MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, + MTRActionBlock action) : + MTRCallbackBridge(queue, handler, action, OnSuccessFn){}; - static void OnSuccessFn(void * context, chip::app::Clusters::TimeFormatLocalization::CalendarType value); + static void OnSuccessFn(void * context, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum value); }; -class MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge - : public MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge +class MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge + : public MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge { public: - MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge( + MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge( dispatch_queue_t queue, ResponseHandler handler, MTRActionBlock action, MTRSubscriptionEstablishedHandler establishedHandler) : - MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(queue, handler, action), + MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(queue, handler, action), mEstablishedHandler(establishedHandler) {} void OnSubscriptionEstablished(); - using MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::KeepAliveOnCallback; - using MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::OnDone; + using MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::KeepAliveOnCallback; + using MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::OnDone; private: MTRSubscriptionEstablishedHandler mEstablishedHandler; }; -class MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge - : public MTRCallbackBridge +class MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge + : public MTRCallbackBridge { public: - MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : - MTRCallbackBridge(queue, handler, OnSuccessFn){}; + MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(dispatch_queue_t queue, + ResponseHandler handler) : + MTRCallbackBridge(queue, handler, OnSuccessFn){}; - MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, - MTRActionBlock action) : - MTRCallbackBridge(queue, handler, action, - OnSuccessFn){}; + MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, + MTRActionBlock action) : + MTRCallbackBridge(queue, handler, action, + OnSuccessFn){}; static void OnSuccessFn(void * context, - const chip::app::DataModel::Nullable & value); + const chip::app::DataModel::Nullable & value); }; -class MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge - : public MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge +class MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge + : public MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge { public: - MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge( + MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge( dispatch_queue_t queue, ResponseHandler handler, MTRActionBlock action, MTRSubscriptionEstablishedHandler establishedHandler) : - MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge(queue, handler, action), + MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge(queue, handler, action), mEstablishedHandler(establishedHandler) {} void OnSubscriptionEstablished(); - using MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::KeepAliveOnCallback; - using MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::OnDone; + using MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::KeepAliveOnCallback; + using MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::OnDone; private: MTRSubscriptionEstablishedHandler mEstablishedHandler; }; -class MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge - : public MTRCallbackBridge +class MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge + : public MTRCallbackBridge { public: - MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : - MTRCallbackBridge(queue, handler, OnSuccessFn){}; + MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : + MTRCallbackBridge(queue, handler, OnSuccessFn){}; - MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, - MTRActionBlock action) : - MTRCallbackBridge(queue, handler, action, OnSuccessFn){}; + MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, + MTRActionBlock action) : + MTRCallbackBridge(queue, handler, action, OnSuccessFn){}; - static void OnSuccessFn(void * context, chip::app::Clusters::TimeFormatLocalization::HourFormat value); + static void OnSuccessFn(void * context, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum value); }; -class MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge - : public MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge +class MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge + : public MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge { public: - MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge( + MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge( dispatch_queue_t queue, ResponseHandler handler, MTRActionBlock action, MTRSubscriptionEstablishedHandler establishedHandler) : - MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(queue, handler, action), + MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(queue, handler, action), mEstablishedHandler(establishedHandler) {} void OnSubscriptionEstablished(); - using MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::KeepAliveOnCallback; - using MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::OnDone; + using MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::KeepAliveOnCallback; + using MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::OnDone; private: MTRSubscriptionEstablishedHandler mEstablishedHandler; }; -class MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge - : public MTRCallbackBridge +class MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge + : public MTRCallbackBridge { public: - MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : - MTRCallbackBridge(queue, handler, OnSuccessFn){}; + MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler) : + MTRCallbackBridge(queue, handler, OnSuccessFn){}; - MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, - MTRActionBlock action) : - MTRCallbackBridge(queue, handler, action, OnSuccessFn){}; + MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(dispatch_queue_t queue, ResponseHandler handler, + MTRActionBlock action) : + MTRCallbackBridge(queue, handler, action, + OnSuccessFn){}; - static void OnSuccessFn(void * context, - const chip::app::DataModel::Nullable & value); + static void + OnSuccessFn(void * context, + const chip::app::DataModel::Nullable & value); }; -class MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge - : public MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge +class MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge + : public MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge { public: - MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge( + MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge( dispatch_queue_t queue, ResponseHandler handler, MTRActionBlock action, MTRSubscriptionEstablishedHandler establishedHandler) : - MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge(queue, handler, action), + MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge(queue, handler, action), mEstablishedHandler(establishedHandler) {} void OnSubscriptionEstablished(); - using MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::KeepAliveOnCallback; - using MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::OnDone; + using MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::KeepAliveOnCallback; + using MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::OnDone; private: MTRSubscriptionEstablishedHandler mEstablishedHandler; diff --git a/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.mm b/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.mm index c7806b542a1cdf..2dd1e11290d64c 100644 --- a/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.mm +++ b/src/darwin/Framework/CHIP/zap-generated/MTRCallbackBridge.mm @@ -2997,8 +2997,8 @@ } } -void MTRTimeFormatLocalizationSupportedCalendarTypesListAttributeCallbackBridge::OnSuccessFn( - void * context, const chip::app::DataModel::DecodableList & value) +void MTRTimeFormatLocalizationSupportedCalendarTypesListAttributeCallbackBridge::OnSuccessFn(void * context, + const chip::app::DataModel::DecodableList & value) { NSArray * _Nonnull objCValue; { // Scope for our temporary variables @@ -14220,15 +14220,15 @@ } } -void MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::OnSuccessFn( - void * context, chip::app::Clusters::TimeFormatLocalization::CalendarType value) +void MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::OnSuccessFn( + void * context, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum value) { NSNumber * _Nonnull objCValue; objCValue = [NSNumber numberWithUnsignedChar:chip::to_underlying(value)]; DispatchSuccess(context, objCValue); }; -void MTRTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() +void MTRTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() { if (!mQueue) { return; @@ -14243,8 +14243,8 @@ } } -void MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackBridge::OnSuccessFn( - void * context, const chip::app::DataModel::Nullable & value) +void MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackBridge::OnSuccessFn( + void * context, const chip::app::DataModel::Nullable & value) { NSNumber * _Nullable objCValue; if (value.IsNull()) { @@ -14255,7 +14255,7 @@ DispatchSuccess(context, objCValue); }; -void MTRNullableTimeFormatLocalizationClusterCalendarTypeAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() +void MTRNullableTimeFormatLocalizationClusterCalendarTypeEnumAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() { if (!mQueue) { return; @@ -14270,15 +14270,15 @@ } } -void MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::OnSuccessFn( - void * context, chip::app::Clusters::TimeFormatLocalization::HourFormat value) +void MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::OnSuccessFn( + void * context, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum value) { NSNumber * _Nonnull objCValue; objCValue = [NSNumber numberWithUnsignedChar:chip::to_underlying(value)]; DispatchSuccess(context, objCValue); }; -void MTRTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() +void MTRTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() { if (!mQueue) { return; @@ -14293,8 +14293,8 @@ } } -void MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackBridge::OnSuccessFn( - void * context, const chip::app::DataModel::Nullable & value) +void MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackBridge::OnSuccessFn( + void * context, const chip::app::DataModel::Nullable & value) { NSNumber * _Nullable objCValue; if (value.IsNull()) { @@ -14305,7 +14305,7 @@ DispatchSuccess(context, objCValue); }; -void MTRNullableTimeFormatLocalizationClusterHourFormatAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() +void MTRNullableTimeFormatLocalizationClusterHourFormatEnumAttributeCallbackSubscriptionBridge::OnSubscriptionEstablished() { if (!mQueue) { return; diff --git a/src/darwin/Framework/Matter.xcodeproj/project.pbxproj b/src/darwin/Framework/Matter.xcodeproj/project.pbxproj index 44b5e1cc9d9840..7033180257c717 100644 --- a/src/darwin/Framework/Matter.xcodeproj/project.pbxproj +++ b/src/darwin/Framework/Matter.xcodeproj/project.pbxproj @@ -126,6 +126,8 @@ 3DFCB32C29678C9500332B35 /* MTRConversion.h in Headers */ = {isa = PBXBuildFile; fileRef = 3DFCB32B29678C9500332B35 /* MTRConversion.h */; }; 51029DF6293AA6100087AFB0 /* MTROperationalCertificateIssuer.mm in Sources */ = {isa = PBXBuildFile; fileRef = 51029DF5293AA6100087AFB0 /* MTROperationalCertificateIssuer.mm */; }; 510CECA8297F72970064E0B3 /* MTROperationalCertificateIssuerTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 510CECA6297F72470064E0B3 /* MTROperationalCertificateIssuerTests.m */; }; + 5117DD3829A931AE00FFA1AA /* MTROperationalBrowser.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5117DD3629A931AD00FFA1AA /* MTROperationalBrowser.mm */; }; + 5117DD3929A931AE00FFA1AA /* MTROperationalBrowser.h in Headers */ = {isa = PBXBuildFile; fileRef = 5117DD3729A931AE00FFA1AA /* MTROperationalBrowser.h */; }; 511913FB28C100EF009235E9 /* MTRBaseSubscriptionCallback.mm in Sources */ = {isa = PBXBuildFile; fileRef = 511913F928C100EF009235E9 /* MTRBaseSubscriptionCallback.mm */; }; 511913FC28C100EF009235E9 /* MTRBaseSubscriptionCallback.h in Headers */ = {isa = PBXBuildFile; fileRef = 511913FA28C100EF009235E9 /* MTRBaseSubscriptionCallback.h */; }; 5129BCFD26A9EE3300122DDF /* MTRError.h in Headers */ = {isa = PBXBuildFile; fileRef = 5129BCFC26A9EE3300122DDF /* MTRError.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -401,6 +403,8 @@ 3DFCB32B29678C9500332B35 /* MTRConversion.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MTRConversion.h; sourceTree = ""; }; 51029DF5293AA6100087AFB0 /* MTROperationalCertificateIssuer.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MTROperationalCertificateIssuer.mm; sourceTree = ""; }; 510CECA6297F72470064E0B3 /* MTROperationalCertificateIssuerTests.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MTROperationalCertificateIssuerTests.m; sourceTree = ""; }; + 5117DD3629A931AD00FFA1AA /* MTROperationalBrowser.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MTROperationalBrowser.mm; sourceTree = ""; }; + 5117DD3729A931AE00FFA1AA /* MTROperationalBrowser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MTROperationalBrowser.h; sourceTree = ""; }; 511913F928C100EF009235E9 /* MTRBaseSubscriptionCallback.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MTRBaseSubscriptionCallback.mm; sourceTree = ""; }; 511913FA28C100EF009235E9 /* MTRBaseSubscriptionCallback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MTRBaseSubscriptionCallback.h; sourceTree = ""; }; 5129BCFC26A9EE3300122DDF /* MTRError.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MTRError.h; sourceTree = ""; }; @@ -998,6 +1002,8 @@ 997DED152695343400975E97 /* MTRThreadOperationalDataset.mm */, 3D843710294977000070D20A /* NSDataSpanConversion.h */, 3D84370E294977000070D20A /* NSStringSpanConversion.h */, + 5117DD3729A931AE00FFA1AA /* MTROperationalBrowser.h */, + 5117DD3629A931AD00FFA1AA /* MTROperationalBrowser.mm */, 3D69867E29382E58007314E7 /* Resources */, ); path = CHIP; @@ -1098,6 +1104,7 @@ 2CB7163B252E8A7B0026E2BB /* MTRDeviceControllerDelegateBridge.h in Headers */, 5ACDDD7A27CD129700EFD68A /* MTRClusterStateCacheContainer.h in Headers */, 5A6FEC9227B5669C00F25F42 /* MTRDeviceControllerOverXPC.h in Headers */, + 5117DD3929A931AE00FFA1AA /* MTROperationalBrowser.h in Headers */, 2C1B027B2641DB4E00780EF1 /* MTROperationalCredentialsDelegate.h in Headers */, 3D843717294979230070D20A /* MTRClusters_Internal.h in Headers */, 7596A85728788557004DAE0E /* MTRClusters.h in Headers */, @@ -1384,6 +1391,7 @@ 511913FB28C100EF009235E9 /* MTRBaseSubscriptionCallback.mm in Sources */, 5ACDDD7D27CD16D200EFD68A /* MTRClusterStateCacheContainer.mm in Sources */, 513DDB8A2761F6F900DAA01A /* MTRAttributeTLVValueDecoder.mm in Sources */, + 5117DD3829A931AE00FFA1AA /* MTROperationalBrowser.mm in Sources */, 2FD775552695557E00FF4B12 /* error-mapping.cpp in Sources */, 3D843757294AD25A0070D20A /* MTRCertificateInfo.mm in Sources */, 5A7947E427C0129600434CF2 /* MTRDeviceController+XPC.mm in Sources */, diff --git a/src/include/platform/BuildTime.h b/src/include/platform/BuildTime.h index 8015a534778a19..979f8c6bbd5e84 100644 --- a/src/include/platform/BuildTime.h +++ b/src/include/platform/BuildTime.h @@ -28,9 +28,9 @@ // Example of __TIME__ string: "21:06:19" #define COMPUTE_BUILD_YEAR(_date) \ - (((_date)[7] - '0') * 1000 + ((_date)[8] - '0') * 100 + ((_date)[9] - '0') * 10 + ((_date)[10] - '0')) + static_cast(((_date)[7] - '0') * 1000 + ((_date)[8] - '0') * 100 + ((_date)[9] - '0') * 10 + ((_date)[10] - '0')) -#define COMPUTE_BUILD_DAY(_date) ((((_date)[4] >= '0') ? ((_date)[4] - '0') * 10 : 0) + ((_date)[5] - '0')) +#define COMPUTE_BUILD_DAY(_date) static_cast((((_date)[4] >= '0') ? ((_date)[4] - '0') * 10 : 0) + ((_date)[5] - '0')) #define BUILD_MONTH_IS_JAN(_date) ((_date)[0] == 'J' && (_date)[1] == 'a') #define BUILD_MONTH_IS_FEB(_date) ((_date)[0] == 'F') @@ -63,9 +63,9 @@ ? 11 \ : (BUILD_MONTH_IS_DEC(_date)) ? 12 : /* error default */ 99) -#define COMPUTE_BUILD_HOUR(_time) (((_time)[0] - '0') * 10 + (_time)[1] - '0') -#define COMPUTE_BUILD_MIN(_time) (((_time)[3] - '0') * 10 + (_time)[4] - '0') -#define COMPUTE_BUILD_SEC(_time) (((_time)[6] - '0') * 10 + (_time)[7] - '0') +#define COMPUTE_BUILD_HOUR(_time) static_cast(((_time)[0] - '0') * 10 + (_time)[1] - '0') +#define COMPUTE_BUILD_MIN(_time) static_cast(((_time)[3] - '0') * 10 + (_time)[4] - '0') +#define COMPUTE_BUILD_SEC(_time) static_cast(((_time)[6] - '0') * 10 + (_time)[7] - '0') #define BUILD_DATE_IS_BAD(_date) ((_date) == nullptr || strlen(_date) < strlen("Jan 01 2000") || (_date)[0] == '?') #define BUILD_TIME_IS_BAD(_time) ((_time) == nullptr || strlen(_time) < strlen("00:00:00") || (_time)[0] == '?') diff --git a/src/include/platform/DeviceInfoProvider.h b/src/include/platform/DeviceInfoProvider.h index b16ab39c91b36e..7b3206eb5c5aad 100644 --- a/src/include/platform/DeviceInfoProvider.h +++ b/src/include/platform/DeviceInfoProvider.h @@ -67,7 +67,7 @@ class DeviceInfoProvider using FixedLabelType = app::Clusters::FixedLabel::Structs::LabelStruct::Type; using UserLabelType = app::Clusters::UserLabel::Structs::LabelStruct::Type; - using CalendarType = app::Clusters::TimeFormatLocalization::CalendarType; + using CalendarType = app::Clusters::TimeFormatLocalization::CalendarTypeEnum; using FixedLabelIterator = Iterator; using UserLabelIterator = Iterator; diff --git a/src/lib/dnssd/Advertiser.h b/src/lib/dnssd/Advertiser.h index 710fc64bc4596f..45cd072c1db11c 100644 --- a/src/lib/dnssd/Advertiser.h +++ b/src/lib/dnssd/Advertiser.h @@ -63,7 +63,7 @@ class BaseAdvertisingParams mPort = port; return *reinterpret_cast(this); } - uint64_t GetPort() const { return mPort; } + uint16_t GetPort() const { return mPort; } Derived & SetInterfaceId(Inet::InterfaceId interfaceId) { diff --git a/src/lib/dnssd/Advertiser_ImplMinimalMdns.cpp b/src/lib/dnssd/Advertiser_ImplMinimalMdns.cpp index 47ee3b10e7d073..679c8bae7d3e48 100644 --- a/src/lib/dnssd/Advertiser_ImplMinimalMdns.cpp +++ b/src/lib/dnssd/Advertiser_ImplMinimalMdns.cpp @@ -236,8 +236,9 @@ class AdvertiserMinMdns : public ServiceAdvertiser, "MRP retry interval idle value exceeds allowed range of 1 hour, using maximum available"); mrp.mIdleRetransTimeout = kMaxRetryInterval; } - size_t writtenCharactersNumber = snprintf(storage.sleepyIdleIntervalBuf, sizeof(storage.sleepyIdleIntervalBuf), - "SII=%" PRIu32, mrp.mIdleRetransTimeout.count()); + size_t writtenCharactersNumber = + static_cast(snprintf(storage.sleepyIdleIntervalBuf, sizeof(storage.sleepyIdleIntervalBuf), + "SII=%" PRIu32, mrp.mIdleRetransTimeout.count())); VerifyOrReturnError((writtenCharactersNumber > 0) && (writtenCharactersNumber < sizeof(storage.sleepyIdleIntervalBuf)), CHIP_ERROR_INVALID_STRING_LENGTH); @@ -252,8 +253,9 @@ class AdvertiserMinMdns : public ServiceAdvertiser, "MRP retry interval active value exceeds allowed range of 1 hour, using maximum available"); mrp.mActiveRetransTimeout = kMaxRetryInterval; } - size_t writtenCharactersNumber = snprintf(storage.sleepyActiveIntervalBuf, sizeof(storage.sleepyActiveIntervalBuf), - "SAI=%" PRIu32, mrp.mActiveRetransTimeout.count()); + size_t writtenCharactersNumber = + static_cast(snprintf(storage.sleepyActiveIntervalBuf, sizeof(storage.sleepyActiveIntervalBuf), + "SAI=%" PRIu32, mrp.mActiveRetransTimeout.count())); VerifyOrReturnError((writtenCharactersNumber > 0) && (writtenCharactersNumber < sizeof(storage.sleepyActiveIntervalBuf)), CHIP_ERROR_INVALID_STRING_LENGTH); @@ -262,8 +264,8 @@ class AdvertiserMinMdns : public ServiceAdvertiser, } if (params.GetTcpSupported().HasValue()) { - size_t writtenCharactersNumber = - snprintf(storage.tcpSupportedBuf, sizeof(storage.tcpSupportedBuf), "T=%d", params.GetTcpSupported().Value()); + size_t writtenCharactersNumber = static_cast( + snprintf(storage.tcpSupportedBuf, sizeof(storage.tcpSupportedBuf), "T=%d", params.GetTcpSupported().Value())); VerifyOrReturnError((writtenCharactersNumber > 0) && (writtenCharactersNumber < sizeof(storage.tcpSupportedBuf)), CHIP_ERROR_INVALID_STRING_LENGTH); txtFields[numTxtFields++] = storage.tcpSupportedBuf; @@ -290,7 +292,7 @@ class AdvertiserMinMdns : public ServiceAdvertiser, // current request handling const chip::Inet::IPPacketInfo * mCurrentSource = nullptr; - uint32_t mMessageId = 0; + uint16_t mMessageId = 0; const char * mEmptyTextEntries[1] = { "=", diff --git a/src/lib/dnssd/BUILD.gn b/src/lib/dnssd/BUILD.gn index e723dd54466600..b33b304f195df6 100644 --- a/src/lib/dnssd/BUILD.gn +++ b/src/lib/dnssd/BUILD.gn @@ -69,4 +69,6 @@ static_library("dnssd") { } else { assert(false, "Unknown Dnssd advertiser implementation.") } + + cflags = [ "-Wconversion" ] } diff --git a/src/lib/dnssd/Resolver.h b/src/lib/dnssd/Resolver.h index 9f4698a40db563..5f48378bd01d4e 100644 --- a/src/lib/dnssd/Resolver.h +++ b/src/lib/dnssd/Resolver.h @@ -149,13 +149,12 @@ struct CommissionNodeData uint16_t vendorId = 0; uint16_t productId = 0; uint8_t commissioningMode = 0; - // TODO: possibly 32-bit - see spec issue #3226 - uint16_t deviceType = 0; - char deviceName[kMaxDeviceNameLen + 1] = {}; - uint8_t rotatingId[kMaxRotatingIdLen] = {}; - size_t rotatingIdLen = 0; - uint16_t pairingHint = 0; - char pairingInstruction[kMaxPairingInstructionLen + 1] = {}; + uint32_t deviceType = 0; + char deviceName[kMaxDeviceNameLen + 1] = {}; + uint8_t rotatingId[kMaxRotatingIdLen] = {}; + size_t rotatingIdLen = 0; + uint16_t pairingHint = 0; + char pairingInstruction[kMaxPairingInstructionLen + 1] = {}; CommissionNodeData() {} @@ -190,7 +189,7 @@ struct CommissionNodeData } if (deviceType > 0) { - ChipLogDetail(Discovery, "\tDevice Type: %u", deviceType); + ChipLogDetail(Discovery, "\tDevice Type: %" PRIu32, deviceType); } if (longDiscriminator > 0) { diff --git a/src/lib/dnssd/ServiceNaming.cpp b/src/lib/dnssd/ServiceNaming.cpp index 16aedd8211707a..25bdfbf2cbf341 100644 --- a/src/lib/dnssd/ServiceNaming.cpp +++ b/src/lib/dnssd/ServiceNaming.cpp @@ -84,7 +84,7 @@ CHIP_ERROR MakeHostName(char * buffer, size_t bufferLen, const chip::ByteSpan & { ReturnErrorCodeIf(bufferLen < macOrEui64.size() * 2 + 1, CHIP_ERROR_BUFFER_TOO_SMALL); - size_t idx = 0; + int idx = 0; for (size_t i = 0; i < macOrEui64.size(); ++i) { idx += snprintf(buffer + idx, 3, "%02X", macOrEui64.data()[i]); @@ -94,7 +94,7 @@ CHIP_ERROR MakeHostName(char * buffer, size_t bufferLen, const chip::ByteSpan & CHIP_ERROR MakeServiceSubtype(char * buffer, size_t bufferLen, DiscoveryFilter subtype) { - size_t requiredSize; + int requiredSize; switch (subtype.type) { case DiscoveryFilterType::kShortDiscriminator: @@ -135,7 +135,7 @@ CHIP_ERROR MakeServiceSubtype(char * buffer, size_t bufferLen, DiscoveryFilter s break; case DiscoveryFilterType::kCompressedFabricId: requiredSize = snprintf(buffer, bufferLen, "_I"); - return Encoding::Uint64ToHex(subtype.code, &buffer[requiredSize], bufferLen - requiredSize, + return Encoding::Uint64ToHex(subtype.code, &buffer[requiredSize], bufferLen - static_cast(requiredSize), Encoding::HexFlags::kUppercaseAndNullTerminate); break; case DiscoveryFilterType::kInstanceName: @@ -146,12 +146,12 @@ CHIP_ERROR MakeServiceSubtype(char * buffer, size_t bufferLen, DiscoveryFilter s buffer[0] = '\0'; break; } - return (requiredSize <= (bufferLen - 1)) ? CHIP_NO_ERROR : CHIP_ERROR_NO_MEMORY; + return (static_cast(requiredSize) <= (bufferLen - 1)) ? CHIP_NO_ERROR : CHIP_ERROR_NO_MEMORY; } CHIP_ERROR MakeServiceTypeName(char * buffer, size_t bufferLen, DiscoveryFilter nameDesc, DiscoveryType type) { - size_t requiredSize; + int requiredSize; if (nameDesc.type == DiscoveryFilterType::kNone) { if (type == DiscoveryType::kCommissionableNode) @@ -192,7 +192,7 @@ CHIP_ERROR MakeServiceTypeName(char * buffer, size_t bufferLen, DiscoveryFilter } } - return (requiredSize <= (bufferLen - 1)) ? CHIP_NO_ERROR : CHIP_ERROR_NO_MEMORY; + return (static_cast(requiredSize) <= (bufferLen - 1)) ? CHIP_NO_ERROR : CHIP_ERROR_NO_MEMORY; } } // namespace Dnssd diff --git a/src/lib/dnssd/TxtFields.cpp b/src/lib/dnssd/TxtFields.cpp index 33840b9cd2ced5..a09efaaf5aa4b9 100644 --- a/src/lib/dnssd/TxtFields.cpp +++ b/src/lib/dnssd/TxtFields.cpp @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -49,9 +50,11 @@ bool IsKey(const ByteSpan & key, const char * desired) { return false; } + + auto desired_bytes = Uint8::from_const_char(desired); for (size_t i = 0; i < key.size(); ++i) { - if (SafeToLower(key.data()[i]) != SafeToLower(desired[i])) + if (SafeToLower(key.data()[i]) != SafeToLower(desired_bytes[i])) { return false; } @@ -104,8 +107,8 @@ bool MakeBoolFromAsciiDecimal(const ByteSpan & val) size_t GetPlusSignIdx(const ByteSpan & value) { - // Fist value is the vendor id, second (after the +) is the product. - for (int i = 0; i < static_cast(value.size()); ++i) + // First value is the vendor id, second (after the +) is the product. + for (size_t i = 0; i < value.size(); ++i) { if (static_cast(value.data()[i]) == '+') { diff --git a/src/lib/dnssd/minimal_mdns/BUILD.gn b/src/lib/dnssd/minimal_mdns/BUILD.gn index 927b881f2aabfc..bf0ae8abef3ae4 100644 --- a/src/lib/dnssd/minimal_mdns/BUILD.gn +++ b/src/lib/dnssd/minimal_mdns/BUILD.gn @@ -92,6 +92,8 @@ static_library("default_policy") { ":address_policy", "${chip_root}/src/inet", ] + + cflags = [ "-Wconversion" ] } if (current_os == "linux" && chip_minmdns_default_policy == "libnl") { @@ -156,4 +158,6 @@ static_library("minimal_mdns") { } public_configs = [ ":config" ] + + cflags = [ "-Wconversion" ] } diff --git a/src/lib/dnssd/minimal_mdns/Parser.cpp b/src/lib/dnssd/minimal_mdns/Parser.cpp index 8bc3e9e1af3bb4..4cd341da977677 100644 --- a/src/lib/dnssd/minimal_mdns/Parser.cpp +++ b/src/lib/dnssd/minimal_mdns/Parser.cpp @@ -75,7 +75,7 @@ bool QueryData::Append(HeaderRef & hdr, RecordWriter & out) const out.WriteQName(GetName()) .Put16(static_cast(mType)) - .Put16(static_cast(mClass) | (mAnswerViaUnicast ? kQClassUnicastAnswerFlag : 0)); + .Put16(static_cast(static_cast(mClass) | (mAnswerViaUnicast ? kQClassUnicastAnswerFlag : 0))); if (!out.Fit()) { diff --git a/src/lib/dnssd/minimal_mdns/ResponseSender.cpp b/src/lib/dnssd/minimal_mdns/ResponseSender.cpp index ef530355c02ed3..722ca973902fb3 100644 --- a/src/lib/dnssd/minimal_mdns/ResponseSender.cpp +++ b/src/lib/dnssd/minimal_mdns/ResponseSender.cpp @@ -100,7 +100,7 @@ bool ResponseSender::HasQueryResponders() const return false; } -CHIP_ERROR ResponseSender::Respond(uint32_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * querySource, +CHIP_ERROR ResponseSender::Respond(uint16_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * querySource, const ResponseConfiguration & configuration) { mSendState.Reset(messageId, query, querySource); diff --git a/src/lib/dnssd/minimal_mdns/ResponseSender.h b/src/lib/dnssd/minimal_mdns/ResponseSender.h index cf1b80f5d97f65..636acaaf7acf14 100644 --- a/src/lib/dnssd/minimal_mdns/ResponseSender.h +++ b/src/lib/dnssd/minimal_mdns/ResponseSender.h @@ -53,7 +53,7 @@ class ResponseSendingState public: ResponseSendingState() {} - void Reset(uint32_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * packet) + void Reset(uint16_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * packet) { mMessageId = messageId; mQuery = &query; @@ -72,7 +72,7 @@ class ResponseSendingState } CHIP_ERROR GetError() const { return mSendError; } - uint32_t GetMessageId() const { return mMessageId; } + uint16_t GetMessageId() const { return mMessageId; } const QueryData * GetQuery() const { return mQuery; } @@ -91,7 +91,7 @@ class ResponseSendingState private: const QueryData * mQuery = nullptr; // query being replied to const chip::Inet::IPPacketInfo * mSource = nullptr; // Where to send the reply (if unicast) - uint32_t mMessageId = 0; // message id for the reply + uint16_t mMessageId = 0; // message id for the reply ResourceType mResourceType = ResourceType::kAnswer; // what is being sent right now CHIP_ERROR mSendError = CHIP_NO_ERROR; }; @@ -112,7 +112,7 @@ class ResponseSender : public ResponderDelegate bool HasQueryResponders() const; /// Send back the response to a particular query - CHIP_ERROR Respond(uint32_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * querySource, + CHIP_ERROR Respond(uint16_t messageId, const QueryData & query, const chip::Inet::IPPacketInfo * querySource, const ResponseConfiguration & configuration); // Implementation of ResponderDelegate diff --git a/src/lib/dnssd/minimal_mdns/core/QName.cpp b/src/lib/dnssd/minimal_mdns/core/QName.cpp index 94a15bb4e8bba8..2cc6488cfb1503 100644 --- a/src/lib/dnssd/minimal_mdns/core/QName.cpp +++ b/src/lib/dnssd/minimal_mdns/core/QName.cpp @@ -60,7 +60,7 @@ bool SerializedQNameIterator::Next(bool followIndirectPointers) return false; } - size_t offset = ((*mCurrentPosition & 0x3F) << 8) | *(mCurrentPosition + 1); + size_t offset = static_cast(((*mCurrentPosition & 0x3F) << 8) | *(mCurrentPosition + 1)); if (offset > mLookBehindMax) { // Potential infinite recursion. diff --git a/src/lib/dnssd/minimal_mdns/core/RecordWriter.cpp b/src/lib/dnssd/minimal_mdns/core/RecordWriter.cpp index c03bb32990506f..0249d80d6e35ff 100644 --- a/src/lib/dnssd/minimal_mdns/core/RecordWriter.cpp +++ b/src/lib/dnssd/minimal_mdns/core/RecordWriter.cpp @@ -135,7 +135,7 @@ void RecordWriter::RememberWrittenQnameOffset(size_t offset) { if (previousName == kInvalidOffset) { - previousName = offset; + previousName = static_cast(offset); return; } } diff --git a/src/lib/dnssd/minimal_mdns/core/RecordWriter.h b/src/lib/dnssd/minimal_mdns/core/RecordWriter.h index 0356f7e4b2643e..35e563fe870ec2 100644 --- a/src/lib/dnssd/minimal_mdns/core/RecordWriter.h +++ b/src/lib/dnssd/minimal_mdns/core/RecordWriter.h @@ -110,7 +110,7 @@ class RecordWriter { if (previous == name) { - return chip::Optional::Value(previous.OffsetInCurrentValidData()); + return chip::MakeOptional(static_cast(previous.OffsetInCurrentValidData())); } if (!previous.Next()) diff --git a/src/messaging/tests/echo/echo_requester.cpp b/src/messaging/tests/echo/echo_requester.cpp index 58b827d7914d53..656f8d6e78e8a6 100644 --- a/src/messaging/tests/echo/echo_requester.cpp +++ b/src/messaging/tests/echo/echo_requester.cpp @@ -184,7 +184,7 @@ void HandleEchoResponseReceived(chip::Messaging::ExchangeContext * ec, chip::Sys gEchoRespCount++; printf("Echo Response: %" PRIu64 "/%" PRIu64 "(%.2f%%) len=%u time=%.3fs\n", gEchoRespCount, gEchoCount, - static_cast(gEchoRespCount) * 100 / gEchoCount, payload->DataLength(), + static_cast(gEchoRespCount) * 100 / static_cast(gEchoCount), payload->DataLength(), static_cast(chip::System::Clock::Milliseconds32(transitTime).count()) / 1000); } diff --git a/src/platform/Darwin/BUILD.gn b/src/platform/Darwin/BUILD.gn index 1a23fc1d1ea991..5188068b7a3036 100644 --- a/src/platform/Darwin/BUILD.gn +++ b/src/platform/Darwin/BUILD.gn @@ -36,7 +36,10 @@ config("darwin_config") { ] } - cflags = [ "-fobjc-arc" ] + cflags = [ + "-fobjc-arc", + "-Wconversion", + ] } static_library("Darwin") { @@ -140,6 +143,9 @@ static_library("logging") { ] configs += [ "${chip_root}/src:includes" ] - cflags = [ "-fobjc-arc" ] + cflags = [ + "-fobjc-arc", + "-Wconversion", + ] frameworks = [ "Foundation.framework" ] } diff --git a/src/platform/Darwin/BleConnectionDelegateImpl.mm b/src/platform/Darwin/BleConnectionDelegateImpl.mm index 6858594620257c..004a33bfb0e963 100644 --- a/src/platform/Darwin/BleConnectionDelegateImpl.mm +++ b/src/platform/Darwin/BleConnectionDelegateImpl.mm @@ -175,7 +175,8 @@ - (void)resetTimer { auto timeout = [self hasDiscriminator] ? kScanningWithDiscriminatorTimeoutInSeconds : kScanningWithoutDiscriminatorTimeoutInSeconds; - dispatch_source_set_timer(_timer, dispatch_walltime(nullptr, timeout * NSEC_PER_SEC), DISPATCH_TIME_FOREVER, 5 * NSEC_PER_SEC); + dispatch_source_set_timer( + _timer, dispatch_walltime(nullptr, static_cast(timeout * NSEC_PER_SEC)), DISPATCH_TIME_FOREVER, 5 * NSEC_PER_SEC); } // All our callback dispatch must happen on _chipWorkQueue diff --git a/src/platform/Darwin/ConnectivityManagerImpl.cpp b/src/platform/Darwin/ConnectivityManagerImpl.cpp index e78ca7dc5be8c0..e0a7711e18bbfb 100644 --- a/src/platform/Darwin/ConnectivityManagerImpl.cpp +++ b/src/platform/Darwin/ConnectivityManagerImpl.cpp @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -80,6 +81,10 @@ CHIP_ERROR ConnectivityManagerImpl::GetEthernetInterfaceName(char * outName, siz { CHIP_ERROR err = CHIP_ERROR_NOT_IMPLEMENTED; #if TARGET_OS_OSX + if (!CanCastTo(maxLen)) + { + return CHIP_ERROR_INVALID_ARGUMENT; + } CFArrayRef interfaces = SCNetworkInterfaceCopyAll(); VerifyOrReturnError(interfaces != nullptr, CHIP_ERROR_INTERNAL); @@ -102,7 +107,7 @@ CHIP_ERROR ConnectivityManagerImpl::GetEthernetInterfaceName(char * outName, siz continue; } - if (!CFStringGetCString(interfaceName, outName, maxLen, kCFStringEncodingUTF8)) + if (!CFStringGetCString(interfaceName, outName, static_cast(maxLen), kCFStringEncodingUTF8)) { continue; } diff --git a/src/platform/Darwin/DnssdHostNameRegistrar.h b/src/platform/Darwin/DnssdHostNameRegistrar.h index ae6b7efa0f937b..c3f44884767b14 100644 --- a/src/platform/Darwin/DnssdHostNameRegistrar.h +++ b/src/platform/Darwin/DnssdHostNameRegistrar.h @@ -51,7 +51,8 @@ namespace Dnssd { auto interfaceAddress = static_cast(&interface.second); auto interfaceAddressLen = sizeof(interface.second); - LogErrorOnFailure(RegisterInterface(interfaceId, type, interfaceAddress, interfaceAddressLen)); + LogErrorOnFailure( + RegisterInterface(interfaceId, type, interfaceAddress, static_cast(interfaceAddressLen))); } } diff --git a/src/platform/Darwin/WiFi/NetworkCommissioningWiFiDriver.mm b/src/platform/Darwin/WiFi/NetworkCommissioningWiFiDriver.mm index a393e79996f9c9..b1b9a763efe1c6 100644 --- a/src/platform/Darwin/WiFi/NetworkCommissioningWiFiDriver.mm +++ b/src/platform/Darwin/WiFi/NetworkCommissioningWiFiDriver.mm @@ -15,6 +15,7 @@ * limitations under the License. */ +#include #include #include #include @@ -72,18 +73,23 @@ bool Next(WiFiScanResponse & scanResponse) override void CopyNetworkInformationTo(WiFiScanResponse & destination, CWNetwork * source) { destination.security = GetWiFiSecurity(source); - destination.ssidLen = [source.ssid length]; - destination.channel = source.wlanChannel.channelNumber; + destination.channel = static_cast(source.wlanChannel.channelNumber); destination.wiFiBand = GetWiFiBand(source.wlanChannel); - destination.rssi = source.rssiValue; - CopyStringTo(destination.ssid, source.ssid, DeviceLayer::Internal::kMaxWiFiSSIDLength); - CopyStringTo(destination.bssid, source.bssid, DeviceLayer::Internal::kWiFiBSSIDLength); - } - - void CopyStringTo(uint8_t * destination, NSString * source, size_t maxLength) - { - NSData * data = [source dataUsingEncoding:NSUTF8StringEncoding]; - memcpy(destination, [data bytes], std::min([data length], maxLength)); + destination.rssi = static_cast(source.rssiValue); + + NSData * ssidData = source.ssidData; + destination.ssidLen = static_cast(std::min(ssidData.length, DeviceLayer::Internal::kMaxWiFiSSIDLength)); + memcpy(destination.ssid, ssidData.bytes, destination.ssidLen); + + // source.bssid looks like "00:00:00:00:00:00" if it's not nil. + NSString * bssid = source.bssid; + // 3 chars per byte, except the last byte. + if (bssid.length == 3 * sizeof(destination.bssid) - 1) { + const char * chars = bssid.UTF8String; + for (size_t i = 0; i < sizeof(destination.bssid); ++i) { + Encoding::HexToBytes(&chars[3 * i], 2, &destination.bssid[i], 1); + } + } } WiFiSecurity GetWiFiSecurity(CWNetwork * network) diff --git a/src/platform/ESP32/BUILD.gn b/src/platform/ESP32/BUILD.gn index 59ab8d1dcafb1c..29d3464f6fb824 100644 --- a/src/platform/ESP32/BUILD.gn +++ b/src/platform/ESP32/BUILD.gn @@ -123,4 +123,6 @@ static_library("ESP32") { "ESP32DeviceInfoProvider.h", ] } + + cflags = [ "-Wconversion" ] } diff --git a/src/platform/ESP32/DiagnosticDataProviderImpl.cpp b/src/platform/ESP32/DiagnosticDataProviderImpl.cpp index e71a6a2386a954..4ac4bebca424ec 100644 --- a/src/platform/ESP32/DiagnosticDataProviderImpl.cpp +++ b/src/platform/ESP32/DiagnosticDataProviderImpl.cpp @@ -236,7 +236,18 @@ CHIP_ERROR DiagnosticDataProviderImpl::GetNetworkInterfaces(NetworkInterface ** ifp->Ipv4AddressSpans[0] = ByteSpan(ifp->Ipv4AddressesBuffer[0], kMaxIPv4AddrSize); ifp->IPv4Addresses = chip::app::DataModel::List(ifp->Ipv4AddressSpans, 1); } - ipv6_addr_count = esp_netif_get_all_ip6(ifa, ip6_addr); + + static_assert(kMaxIPv6AddrCount <= UINT8_MAX, "Count might not fit in ipv6_addr_count"); + static_assert(ArraySize(ip6_addr) >= LWIP_IPV6_NUM_ADDRESSES, "Not enough space for our addresses."); + auto addr_count = esp_netif_get_all_ip6(ifa, ip6_addr); + if (addr_count < 0) + { + ipv6_addr_count = 0; + } + else + { + ipv6_addr_count = static_cast(min(addr_count, static_cast(kMaxIPv6AddrCount))); + } for (uint8_t idx = 0; idx < ipv6_addr_count; ++idx) { memcpy(ifp->Ipv6AddressesBuffer[idx], ip6_addr[idx].addr, kMaxIPv6AddrSize); diff --git a/src/platform/ESP32/DnssdImpl.h b/src/platform/ESP32/DnssdImpl.h index 2c3ac69e42b5d6..7f84eebb0fcbd9 100644 --- a/src/platform/ESP32/DnssdImpl.h +++ b/src/platform/ESP32/DnssdImpl.h @@ -104,7 +104,7 @@ struct ResolveContext : public GenericContext ResolveContext(DnssdService * service, Inet::InterfaceId ifId, mdns_search_once_t * searchHandle, DnssdResolveCallback cb, void * cbCtx) { - Platform::CopyString(mType, type); + Platform::CopyString(mType, service->mType); Platform::CopyString(mInstanceName, service->mName); mContextType = ContextType::Resolve; mProtocol = service->mProtocol; diff --git a/src/platform/ESP32/ESP32DeviceInfoProvider.cpp b/src/platform/ESP32/ESP32DeviceInfoProvider.cpp index bacae0f9449395..6cc130b8aa21c9 100644 --- a/src/platform/ESP32/ESP32DeviceInfoProvider.cpp +++ b/src/platform/ESP32/ESP32DeviceInfoProvider.cpp @@ -241,7 +241,7 @@ ESP32DeviceInfoProvider::SupportedCalendarTypesIteratorImpl::SupportedCalendarTy size_t ESP32DeviceInfoProvider::SupportedCalendarTypesIteratorImpl::Count() { size_t count = 0; - for (uint8_t i = 0; i < to_underlying(CalendarType::kUnknownEnumValue); i++) + for (uint8_t i = 0; i < to_underlying(CalendarTypeEnum::kUnknownEnumValue); i++) { if (mSupportedCalendarTypes & (1 << i)) { @@ -254,7 +254,7 @@ size_t ESP32DeviceInfoProvider::SupportedCalendarTypesIteratorImpl::Count() bool ESP32DeviceInfoProvider::SupportedCalendarTypesIteratorImpl::Next(CalendarType & output) { - while (mIndex < to_underlying(CalendarType::kUnknownEnumValue)) + while (mIndex < to_underlying(CalendarTypeEnum::kUnknownEnumValue)) { if (mSupportedCalendarTypes & (1 << mIndex)) { diff --git a/src/platform/ESP32/NetworkCommissioningDriver.cpp b/src/platform/ESP32/NetworkCommissioningDriver.cpp index c549b1b556d035..148704eeeeef2a 100644 --- a/src/platform/ESP32/NetworkCommissioningDriver.cpp +++ b/src/platform/ESP32/NetworkCommissioningDriver.cpp @@ -47,7 +47,9 @@ CHIP_ERROR GetConfiguredNetwork(Network & network) { return chip::DeviceLayer::Internal::ESP32Utils::MapError(err); } - uint8_t length = strnlen(reinterpret_cast(ap_info.ssid), DeviceLayer::Internal::kMaxWiFiSSIDLength); + static_assert(chip::DeviceLayer::Internal::kMaxWiFiSSIDLength <= UINT8_MAX, "SSID length might not fit in length"); + uint8_t length = + static_cast(strnlen(reinterpret_cast(ap_info.ssid), DeviceLayer::Internal::kMaxWiFiSSIDLength)); if (length > sizeof(network.networkID)) { return CHIP_ERROR_INTERNAL; @@ -75,8 +77,17 @@ CHIP_ERROR ESPWiFiDriver::Init(NetworkStatusChangeCallback * networkStatusChange { return CHIP_NO_ERROR; } - mSavedNetwork.credentialsLen = credentialsLen; - mSavedNetwork.ssidLen = ssidLen; + if (!CanCastTo(credentialsLen)) + { + return CHIP_ERROR_INCORRECT_STATE; + } + mSavedNetwork.credentialsLen = static_cast(credentialsLen); + + if (!CanCastTo(ssidLen)) + { + return CHIP_ERROR_INCORRECT_STATE; + } + mSavedNetwork.ssidLen = static_cast(ssidLen); mStagingNetwork = mSavedNetwork; mpScanCallback = nullptr; @@ -359,9 +370,14 @@ void ESPWiFiDriver::OnNetworkStatusChange() Status::kSuccess, MakeOptional(ByteSpan(configuredNetwork.networkID, configuredNetwork.networkIDLen)), NullOptional); return; } + + // The disconnect reason for networking status changes is allowed to have + // manufacturer-specific values, which is why it's an int32_t, even though + // we just store a uint16_t value in it. + int32_t lastDisconnectReason = GetLastDisconnectReason(); mpStatusChangeCallback->OnNetworkingStatusChange( Status::kUnknownError, MakeOptional(ByteSpan(configuredNetwork.networkID, configuredNetwork.networkIDLen)), - MakeOptional(GetLastDisconnectReason())); + MakeOptional(lastDisconnectReason)); } void ESPWiFiDriver::ScanNetworks(ByteSpan ssid, WiFiDriver::ScanCallback * callback) @@ -386,7 +402,7 @@ CHIP_ERROR ESPWiFiDriver::SetLastDisconnectReason(const ChipDeviceEvent * event) return CHIP_NO_ERROR; } -int32_t ESPWiFiDriver::GetLastDisconnectReason() +uint16_t ESPWiFiDriver::GetLastDisconnectReason() { return mLastDisconnectedReason; } diff --git a/src/platform/ESP32/NetworkCommissioningDriver.h b/src/platform/ESP32/NetworkCommissioningDriver.h index d2c6bb160e745d..c6175938d76478 100644 --- a/src/platform/ESP32/NetworkCommissioningDriver.h +++ b/src/platform/ESP32/NetworkCommissioningDriver.h @@ -41,8 +41,9 @@ class ESPScanResponseIterator : public Iterator } item.security.SetRaw(mpScanResults[mIternum].authmode); - item.ssidLen = - strnlen(reinterpret_cast(mpScanResults[mIternum].ssid), chip::DeviceLayer::Internal::kMaxWiFiSSIDLength); + static_assert(chip::DeviceLayer::Internal::kMaxWiFiSSIDLength <= UINT8_MAX, "SSID length might not fit in item.ssidLen"); + item.ssidLen = static_cast( + strnlen(reinterpret_cast(mpScanResults[mIternum].ssid), chip::DeviceLayer::Internal::kMaxWiFiSSIDLength)); item.channel = mpScanResults[mIternum].primary; item.wiFiBand = chip::DeviceLayer::NetworkCommissioning::WiFiBand::k2g4; item.rssi = mpScanResults[mIternum].rssi; @@ -115,7 +116,7 @@ class ESPWiFiDriver final : public WiFiDriver void OnNetworkStatusChange(); CHIP_ERROR SetLastDisconnectReason(const ChipDeviceEvent * event); - int32_t GetLastDisconnectReason(); + uint16_t GetLastDisconnectReason(); static ESPWiFiDriver & GetInstance() { @@ -132,7 +133,7 @@ class ESPWiFiDriver final : public WiFiDriver ScanCallback * mpScanCallback; ConnectCallback * mpConnectCallback; NetworkStatusChangeCallback * mpStatusChangeCallback = nullptr; - int32_t mLastDisconnectedReason; + uint16_t mLastDisconnectedReason; }; } // namespace NetworkCommissioning diff --git a/src/platform/ESP32/route_hook/ESP32RouteHook.c b/src/platform/ESP32/route_hook/ESP32RouteHook.c index c3883c86856107..a63df118619ed3 100644 --- a/src/platform/ESP32/route_hook/ESP32RouteHook.c +++ b/src/platform/ESP32/route_hook/ESP32RouteHook.c @@ -57,12 +57,12 @@ static void ra_recv_handler(struct netif * netif, const uint8_t * icmp_payload, return; } icmp_payload += sizeof(struct ra_header); - payload_len -= sizeof(struct ra_header); + payload_len = (uint16_t)(payload_len - sizeof(struct ra_header)); while (payload_len >= 2) { uint8_t opt_type = icmp_payload[0]; - uint8_t opt_len = icmp_payload[1] << 3; + uint8_t opt_len = (uint8_t)(icmp_payload[1] << 3); if (opt_type == ND6_OPTION_TYPE_ROUTE_INFO && opt_len >= sizeof(route_option_t) - sizeof(ip6_addr_p_t) && !is_self_address(netif, src_addr) && payload_len >= opt_len) @@ -75,9 +75,9 @@ static void ra_recv_handler(struct netif * netif, const uint8_t * icmp_payload, { break; } - uint8_t prefix_len_bytes = (route_option.prefix_length + 7) / 8; - int8_t preference = -2 * ((route_option.preference >> 4) & 1) + (((route_option.preference) >> 3) & 1); - uint8_t rio_data_len = opt_len - sizeof(route_option) + sizeof(ip6_addr_p_t); + uint8_t prefix_len_bytes = (uint8_t)((route_option.prefix_length + 7) / 8); + int8_t preference = (int8_t)(-2 * ((route_option.preference >> 4) & 1) + (((route_option.preference) >> 3) & 1)); + uint8_t rio_data_len = (uint8_t)(opt_len - sizeof(route_option) + sizeof(ip6_addr_p_t)); ESP_LOGI(TAG, "Received RIO"); if (rio_data_len >= prefix_len_bytes) @@ -101,7 +101,7 @@ static void ra_recv_handler(struct netif * netif, const uint8_t * icmp_payload, } } icmp_payload += opt_len; - payload_len -= opt_len; + payload_len = (uint16_t)(payload_len - opt_len); } } @@ -136,7 +136,7 @@ static uint8_t icmp6_raw_recv_handler(void * arg, struct raw_pcb * pcb, struct p return 0; } - icmp_payload_len = p->tot_len - sizeof(struct ip6_hdr); + icmp_payload_len = (uint16_t)(p->tot_len - sizeof(struct ip6_hdr)); icmp_payload = p->payload + sizeof(struct ip6_hdr); icmp6_header = (struct icmp6_hdr *) icmp_payload; @@ -155,7 +155,12 @@ esp_err_t esp_route_hook_init(esp_netif_t * netif) esp_err_t ret = ESP_OK; ESP_RETURN_ON_FALSE(netif != NULL, ESP_ERR_INVALID_ARG, TAG, "Invalid network interface"); - lwip_netif = netif_get_by_index(esp_netif_get_netif_impl_index(netif)); + int netif_idx = esp_netif_get_netif_impl_index(netif); + if (netif_idx < 0 || netif_idx > UINT8_MAX) + { + return ESP_ERR_INVALID_SIZE; + } + lwip_netif = netif_get_by_index((uint8_t) netif_idx); ESP_RETURN_ON_FALSE(lwip_netif != NULL, ESP_ERR_INVALID_ARG, TAG, "Invalid network interface"); for (esp_route_hook_t * iter = s_hooks; iter != NULL; iter++) diff --git a/src/platform/Linux/BLEManagerImpl.cpp b/src/platform/Linux/BLEManagerImpl.cpp index 2c2a8794c66105..c9ae6ae36bf771 100644 --- a/src/platform/Linux/BLEManagerImpl.cpp +++ b/src/platform/Linux/BLEManagerImpl.cpp @@ -790,7 +790,7 @@ void BLEManagerImpl::OnDeviceScanned(BluezDevice1 * device, const chip::Ble::Chi } else { - // Internal consistency eerror + // Internal consistency error ChipLogError(Ble, "Unknown discovery type. Ignoring scanned device."); return; } @@ -804,15 +804,24 @@ void BLEManagerImpl::OnDeviceScanned(BluezDevice1 * device, const chip::Ble::Chi void BLEManagerImpl::OnScanComplete() { - if (mBLEScanConfig.mBleScanState != BleScanState::kScanForDiscriminator && - mBLEScanConfig.mBleScanState != BleScanState::kScanForAddress) + switch (mBLEScanConfig.mBleScanState) { + case BleScanState::kNotScanning: ChipLogProgress(Ble, "Scan complete notification without an active scan."); - return; + break; + case BleScanState::kScanForAddress: + case BleScanState::kScanForDiscriminator: + mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; + ChipLogProgress(Ble, "Scan complete. No matching device found."); + break; + case BleScanState::kConnecting: + break; } +} - BleConnectionDelegate::OnConnectionError(mBLEScanConfig.mAppState, CHIP_ERROR_TIMEOUT); - mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; +void BLEManagerImpl::OnScanError(CHIP_ERROR err) +{ + ChipLogError(Ble, "BLE scan error: %" CHIP_ERROR_FORMAT, err.Format()); } } // namespace Internal diff --git a/src/platform/Linux/BLEManagerImpl.h b/src/platform/Linux/BLEManagerImpl.h index 8c0d06b066cbd8..ef7cb53ede7a50 100644 --- a/src/platform/Linux/BLEManagerImpl.h +++ b/src/platform/Linux/BLEManagerImpl.h @@ -91,6 +91,7 @@ class BLEManagerImpl final : public BLEManager, public: CHIP_ERROR ConfigureBle(uint32_t aAdapterId, bool aIsCentral); + void OnScanError(CHIP_ERROR error) override; // Driven by BlueZ IO static void HandleNewConnection(BLE_CONNECTION_OBJECT conId); diff --git a/src/platform/Linux/NetworkCommissioningWiFiDriver.cpp b/src/platform/Linux/NetworkCommissioningWiFiDriver.cpp index fa7b8322cff5ce..d1d40dbe3e3864 100644 --- a/src/platform/Linux/NetworkCommissioningWiFiDriver.cpp +++ b/src/platform/Linux/NetworkCommissioningWiFiDriver.cpp @@ -156,7 +156,7 @@ void LinuxWiFiDriver::ConnectNetwork(ByteSpan networkId, ConnectCallback * callb VerifyOrExit(NetworkMatch(mStagingNetwork, networkId), networkingStatus = Status::kNetworkIDNotFound); - ChipLogProgress(NetworkProvisioning, "LinuxNetworkCommissioningDelegate: SSID: %s", + ChipLogProgress(NetworkProvisioning, "LinuxWiFiDriver: SSID: %.*s", static_cast(networkId.size()), StringOrNullMarker((char *) networkId.data())); err = ConnectivityMgrImpl().ConnectWiFiNetworkAsync(ByteSpan(mStagingNetwork.ssid, mStagingNetwork.ssidLen), diff --git a/src/platform/Linux/bluez/ChipDeviceScanner.cpp b/src/platform/Linux/bluez/ChipDeviceScanner.cpp index 46e17ce5b17089..04c522ba34aa66 100644 --- a/src/platform/Linux/bluez/ChipDeviceScanner.cpp +++ b/src/platform/Linux/bluez/ChipDeviceScanner.cpp @@ -150,7 +150,9 @@ CHIP_ERROR ChipDeviceScanner::StartScan(System::Clock::Timeout timeout) void ChipDeviceScanner::TimerExpiredCallback(chip::System::Layer * layer, void * appState) { - static_cast(appState)->StopScan(); + ChipDeviceScanner * chipDeviceScanner = static_cast(appState); + chipDeviceScanner->mDelegate->OnScanError(CHIP_ERROR_TIMEOUT); + chipDeviceScanner->StopScan(); } CHIP_ERROR ChipDeviceScanner::StopScan() diff --git a/src/platform/Linux/bluez/ChipDeviceScanner.h b/src/platform/Linux/bluez/ChipDeviceScanner.h index bbe84ac07268ba..f81f923dfb3b47 100644 --- a/src/platform/Linux/bluez/ChipDeviceScanner.h +++ b/src/platform/Linux/bluez/ChipDeviceScanner.h @@ -44,6 +44,9 @@ class ChipDeviceScannerDelegate // Called when a scan was completed (stopped or timed out) virtual void OnScanComplete() = 0; + + // Call on scan error + virtual void OnScanError(CHIP_ERROR) = 0; }; /// Allows scanning for CHIP devices diff --git a/src/platform/OpenThread/GenericThreadStackManagerImpl_OpenThread.cpp b/src/platform/OpenThread/GenericThreadStackManagerImpl_OpenThread.cpp index 3f0410c0cd0b40..3489213552e2f4 100644 --- a/src/platform/OpenThread/GenericThreadStackManagerImpl_OpenThread.cpp +++ b/src/platform/OpenThread/GenericThreadStackManagerImpl_OpenThread.cpp @@ -482,7 +482,8 @@ void GenericThreadStackManagerImpl_OpenThread::_OnNetworkScanFinished scanResponse.lqi = aResult->mLqi; scanResponse.extendedAddress = Encoding::BigEndian::Get64(aResult->mExtAddress.m8); scanResponse.extendedPanId = Encoding::BigEndian::Get64(aResult->mExtendedPanId.m8); - scanResponse.networkNameLen = strnlen(aResult->mNetworkName.m8, OT_NETWORK_NAME_MAX_SIZE); + static_assert(OT_NETWORK_NAME_MAX_SIZE <= UINT8_MAX, "Network name length won't fit"); + scanResponse.networkNameLen = static_cast(strnlen(aResult->mNetworkName.m8, OT_NETWORK_NAME_MAX_SIZE)); memcpy(scanResponse.networkName, aResult->mNetworkName.m8, scanResponse.networkNameLen); mScanResponseIter.Add(&scanResponse); @@ -1168,7 +1169,7 @@ CHIP_ERROR GenericThreadStackManagerImpl_OpenThread::_WriteThreadNetw } else { - lastRssi.SetNonNull(((neighInfo.mLastRssi > 0) ? 0 : neighInfo.mLastRssi)); + lastRssi.SetNonNull(min(static_cast(0), neighInfo.mLastRssi)); } neighborTable.averageRssi = averageRssi; diff --git a/src/platform/Tizen/BLEManagerImpl.cpp b/src/platform/Tizen/BLEManagerImpl.cpp index c33326f659a9ee..54527546da655a 100644 --- a/src/platform/Tizen/BLEManagerImpl.cpp +++ b/src/platform/Tizen/BLEManagerImpl.cpp @@ -515,18 +515,26 @@ void BLEManagerImpl::OnChipDeviceScanned(void * device, const Ble::ChipBLEDevice ConnectHandler(deviceInfo->remote_address); } -void BLEManagerImpl::OnChipScanComplete() +void BLEManagerImpl::OnScanComplete() { - if (mBLEScanConfig.mBleScanState != BleScanState::kScanForDiscriminator && - mBLEScanConfig.mBleScanState != BleScanState::kScanForAddress) + switch (mBLEScanConfig.mBleScanState) { - ChipLogProgress(DeviceLayer, "Scan complete notification without an active scan."); - return; + case BleScanState::kNotScanning: + ChipLogProgress(Ble, "Scan complete notification without an active scan."); + break; + case BleScanState::kScanForAddress: + case BleScanState::kScanForDiscriminator: + mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; + ChipLogProgress(Ble, "Scan complete. No matching device found."); + break; + case BleScanState::kConnecting: + break; } +} - ChipLogError(DeviceLayer, "Scan Completed with Timeout: Notify Upstream."); - BleConnectionDelegate::OnConnectionError(mBLEScanConfig.mAppState, CHIP_ERROR_TIMEOUT); - mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; +void BLEManagerImpl::OnScanError(CHIP_ERROR err) +{ + ChipLogDetail(Ble, "BLE scan error: %" CHIP_ERROR_FORMAT, err.Format()); } int BLEManagerImpl::RegisterGATTServer() diff --git a/src/platform/Tizen/BLEManagerImpl.h b/src/platform/Tizen/BLEManagerImpl.h index 7723d2dcf62221..a685a85205de65 100644 --- a/src/platform/Tizen/BLEManagerImpl.h +++ b/src/platform/Tizen/BLEManagerImpl.h @@ -138,7 +138,8 @@ class BLEManagerImpl final : public BLEManager, // ===== Members that implement virtual methods on ChipDeviceScannerDelegate void OnChipDeviceScanned(void * device, const Ble::ChipBLEDeviceIdentificationInfo & info) override; - void OnChipScanComplete() override; + void OnScanComplete() override; + void OnScanError(CHIP_ERROR err) override; // ===== Members for internal use by the following friends. diff --git a/src/platform/Tizen/ChipDeviceScanner.cpp b/src/platform/Tizen/ChipDeviceScanner.cpp index 204895347cb45c..2fb48203a1ca1e 100644 --- a/src/platform/Tizen/ChipDeviceScanner.cpp +++ b/src/platform/Tizen/ChipDeviceScanner.cpp @@ -242,7 +242,7 @@ CHIP_ERROR ChipDeviceScanner::StopChipScan() UnRegisterScanFilter(); // Report to Impl class - mDelegate->OnChipScanComplete(); + mDelegate->OnScanComplete(); mIsScanning = false; diff --git a/src/platform/Tizen/ChipDeviceScanner.h b/src/platform/Tizen/ChipDeviceScanner.h index f95f4782ece551..ac8049fdd1ecdd 100644 --- a/src/platform/Tizen/ChipDeviceScanner.h +++ b/src/platform/Tizen/ChipDeviceScanner.h @@ -65,7 +65,10 @@ class ChipDeviceScannerDelegate virtual void OnChipDeviceScanned(void * device, const chip::Ble::ChipBLEDeviceIdentificationInfo & info) = 0; // Called when a scan was completed (stopped or timed out) - virtual void OnChipScanComplete(void) = 0; + virtual void OnScanComplete(void) = 0; + + // Called on scan error + virtual void OnScanError(CHIP_ERROR err) = 0; }; /// Allows scanning for CHIP devices diff --git a/src/platform/Zephyr/BUILD.gn b/src/platform/Zephyr/BUILD.gn index 61834d8b12eb52..748bc6e2419994 100644 --- a/src/platform/Zephyr/BUILD.gn +++ b/src/platform/Zephyr/BUILD.gn @@ -76,4 +76,6 @@ static_library("Zephyr") { if (chip_malloc_sys_heap) { sources += [ "SysHeapMalloc.cpp" ] } + + cflags = [ "-Wconversion" ] } diff --git a/src/platform/Zephyr/DiagnosticDataProviderImpl.cpp b/src/platform/Zephyr/DiagnosticDataProviderImpl.cpp index a735d3e0162b79..e7fc5b38428ba8 100644 --- a/src/platform/Zephyr/DiagnosticDataProviderImpl.cpp +++ b/src/platform/Zephyr/DiagnosticDataProviderImpl.cpp @@ -226,7 +226,7 @@ CHIP_ERROR DiagnosticDataProviderImpl::GetTotalOperationalHours(uint32_t & total ReturnErrorOnFailure(ConfigurationMgr().GetTotalOperationalHours(reinterpret_cast(totalHours))); - totalOperationalHours = totalHours + deltaTime < UINT32_MAX ? totalHours + deltaTime : UINT32_MAX; + totalOperationalHours = static_cast(totalHours + deltaTime < UINT32_MAX ? totalHours + deltaTime : UINT32_MAX); return CHIP_NO_ERROR; } diff --git a/src/platform/Zephyr/PlatformManagerImpl.cpp b/src/platform/Zephyr/PlatformManagerImpl.cpp index 6cd42773227ed2..0e5aac4bda8dda 100644 --- a/src/platform/Zephyr/PlatformManagerImpl.cpp +++ b/src/platform/Zephyr/PlatformManagerImpl.cpp @@ -27,6 +27,7 @@ #include +#include #include #include #include @@ -48,11 +49,20 @@ static k_timer sOperationalHoursSavingTimer; static int app_entropy_source(void * data, unsigned char * output, size_t len, size_t * olen) { const struct device * entropy = DEVICE_DT_GET(DT_CHOSEN(zephyr_entropy)); - int ret = entropy_get_entropy(entropy, output, len); + uint16_t clampedLen; + if (CanCastTo(len)) + { + clampedLen = static_cast(len); + } + else + { + clampedLen = UINT16_MAX; + } + int ret = entropy_get_entropy(entropy, output, clampedLen); if (ret == 0) { - *olen = len; + *olen = clampedLen; } else { @@ -85,7 +95,7 @@ void PlatformManagerImpl::UpdateOperationalHours(intptr_t arg) if (ConfigurationMgr().GetTotalOperationalHours(reinterpret_cast(totalOperationalHours)) == CHIP_NO_ERROR) { ConfigurationMgr().StoreTotalOperationalHours( - totalOperationalHours + deltaTime < UINT32_MAX ? totalOperationalHours + deltaTime : UINT32_MAX); + static_cast(totalOperationalHours + deltaTime < UINT32_MAX ? totalOperationalHours + deltaTime : UINT32_MAX)); sInstance.mSavedOperationalHoursSinceBoot = upTimeH; } else diff --git a/src/platform/android/CommissionableDataProviderImpl.cpp b/src/platform/android/CommissionableDataProviderImpl.cpp index 1c4e9d3b4a336f..8524dbcbecfa88 100644 --- a/src/platform/android/CommissionableDataProviderImpl.cpp +++ b/src/platform/android/CommissionableDataProviderImpl.cpp @@ -34,6 +34,10 @@ using namespace chip::Crypto; namespace { +#ifndef CHIP_DEVICE_CONFIG_USE_TEST_SPAKE2P_ITERATION_COUNT +#define CHIP_DEVICE_CONFIG_USE_TEST_SPAKE2P_ITERATION_COUNT 1000 +#endif + CHIP_ERROR GeneratePaseSalt(std::vector & spake2pSaltVector) { constexpr size_t kSaltLen = kSpake2p_Max_PBKDF_Salt_Length; diff --git a/src/platform/nrfconnect/BUILD.gn b/src/platform/nrfconnect/BUILD.gn index f4a79e118b8da7..40c6f8cac3d292 100644 --- a/src/platform/nrfconnect/BUILD.gn +++ b/src/platform/nrfconnect/BUILD.gn @@ -114,4 +114,6 @@ static_library("nrfconnect") { if (chip_malloc_sys_heap) { sources += [ "../Zephyr/SysHeapMalloc.cpp" ] } + + cflags = [ "-Wconversion" ] } diff --git a/src/platform/nrfconnect/OTAImageProcessorImpl.cpp b/src/platform/nrfconnect/OTAImageProcessorImpl.cpp index b86439f469656c..9d024991f122a0 100644 --- a/src/platform/nrfconnect/OTAImageProcessorImpl.cpp +++ b/src/platform/nrfconnect/OTAImageProcessorImpl.cpp @@ -164,8 +164,16 @@ CHIP_ERROR OTAImageProcessorImpl::ProcessBlock(ByteSpan & aBlock) if (error == CHIP_NO_ERROR) { // DFU target library buffers data internally, so do not clone the block data. - error = System::MapErrorZephyr(dfu_multi_image_write(mParams.downloadedBytes, aBlock.data(), aBlock.size())); - mParams.downloadedBytes += aBlock.size(); + if (mParams.downloadedBytes > std::numeric_limits::max()) + { + error = CHIP_ERROR_BUFFER_TOO_SMALL; + } + else + { + error = System::MapErrorZephyr( + dfu_multi_image_write(static_cast(mParams.downloadedBytes), aBlock.data(), aBlock.size())); + mParams.downloadedBytes += aBlock.size(); + } } // Report the result back to the downloader asynchronously. diff --git a/src/platform/nrfconnect/wifi/NrfWiFiDriver.cpp b/src/platform/nrfconnect/wifi/NrfWiFiDriver.cpp index e8e5d10d73ae97..bfb284f3b84851 100644 --- a/src/platform/nrfconnect/wifi/NrfWiFiDriver.cpp +++ b/src/platform/nrfconnect/wifi/NrfWiFiDriver.cpp @@ -48,7 +48,7 @@ bool NrfWiFiDriver::WiFiNetworkIterator::Next(Network & item) } memcpy(item.networkID, mDriver->mStagingNetwork.ssid, mDriver->mStagingNetwork.ssidLen); - item.networkIDLen = mDriver->mStagingNetwork.ssidLen; + item.networkIDLen = static_cast(mDriver->mStagingNetwork.ssidLen); item.connected = false; mExhausted = true; diff --git a/src/platform/nrfconnect/wifi/WiFiManager.cpp b/src/platform/nrfconnect/wifi/WiFiManager.cpp index a47655c0fa9624..6a4585a57723f8 100644 --- a/src/platform/nrfconnect/wifi/WiFiManager.cpp +++ b/src/platform/nrfconnect/wifi/WiFiManager.cpp @@ -252,8 +252,8 @@ CHIP_ERROR WiFiManager::GetWiFiInfo(WiFiInfo & info) const info.mBssId = ByteSpan(mac_string_buf, sizeof(mac_string_buf)); info.mSecurityType = static_cast(status.security); info.mWiFiVersion = static_cast(status.link_mode); - info.mRssi = status.rssi; - info.mChannel = status.channel; + info.mRssi = static_cast(status.rssi); + info.mChannel = static_cast(status.channel); info.mSsidLen = status.ssid_len; memcpy(info.mSsid, status.ssid, status.ssid_len); @@ -291,12 +291,12 @@ void WiFiManager::ScanResultHandler(uint8_t * data) if (scanResult->rssi > Instance().mWiFiParams.mRssi) { Instance().ClearStationProvisioningData(); - Instance().mWiFiParams.mParams.ssid_length = Instance().mWantedNetwork.ssidLen; + Instance().mWiFiParams.mParams.ssid_length = static_cast(Instance().mWantedNetwork.ssidLen); Instance().mWiFiParams.mParams.ssid = Instance().mWantedNetwork.ssid; // Fallback to the WIFI_SECURITY_TYPE_PSK if the security is unknown Instance().mWiFiParams.mParams.security = scanResult->security <= WIFI_SECURITY_TYPE_MAX ? scanResult->security : WIFI_SECURITY_TYPE_PSK; - Instance().mWiFiParams.mParams.psk_length = Instance().mWantedNetwork.passLen; + Instance().mWiFiParams.mParams.psk_length = static_cast(Instance().mWantedNetwork.passLen); // If the security is none, WiFi driver expects the psk to be nullptr if (Instance().mWiFiParams.mParams.security == WIFI_SECURITY_TYPE_NONE) diff --git a/src/platform/nxp/mw320/DeviceInfoProviderImpl.cpp b/src/platform/nxp/mw320/DeviceInfoProviderImpl.cpp index 0c8731b6782e9f..0fb5570b294e82 100644 --- a/src/platform/nxp/mw320/DeviceInfoProviderImpl.cpp +++ b/src/platform/nxp/mw320/DeviceInfoProviderImpl.cpp @@ -321,40 +321,40 @@ bool DeviceInfoProviderImpl::SupportedCalendarTypesIteratorImpl::Next(CalendarTy switch (mIndex) { case 0: - output = app::Clusters::TimeFormatLocalization::CalendarType::kBuddhist; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kBuddhist; break; case 1: - output = app::Clusters::TimeFormatLocalization::CalendarType::kChinese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kChinese; break; case 2: - output = app::Clusters::TimeFormatLocalization::CalendarType::kCoptic; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kCoptic; break; case 3: - output = app::Clusters::TimeFormatLocalization::CalendarType::kEthiopian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kEthiopian; break; case 4: - output = app::Clusters::TimeFormatLocalization::CalendarType::kGregorian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kGregorian; break; case 5: - output = app::Clusters::TimeFormatLocalization::CalendarType::kHebrew; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kHebrew; break; case 6: - output = app::Clusters::TimeFormatLocalization::CalendarType::kIndian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIndian; break; case 7: - output = app::Clusters::TimeFormatLocalization::CalendarType::kJapanese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kJapanese; break; case 8: - output = app::Clusters::TimeFormatLocalization::CalendarType::kKorean; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kKorean; break; case 9: - output = app::Clusters::TimeFormatLocalization::CalendarType::kPersian; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kPersian; break; case 10: - output = app::Clusters::TimeFormatLocalization::CalendarType::kTaiwanese; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kTaiwanese; break; case 11: - output = app::Clusters::TimeFormatLocalization::CalendarType::kIslamic; + output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIslamic; break; default: err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND; diff --git a/src/platform/nxp/mw320/NetworkCommissioningWiFiDriver.cpp b/src/platform/nxp/mw320/NetworkCommissioningWiFiDriver.cpp index 10b361c8fbd35d..c096a47dd0e785 100644 --- a/src/platform/nxp/mw320/NetworkCommissioningWiFiDriver.cpp +++ b/src/platform/nxp/mw320/NetworkCommissioningWiFiDriver.cpp @@ -160,7 +160,7 @@ void Mw320WiFiDriver::ConnectNetwork(ByteSpan networkId, ConnectCallback * callb VerifyOrExit(NetworkMatch(mStagingNetwork, networkId), networkingStatus = Status::kNetworkIDNotFound); - ChipLogProgress(NetworkProvisioning, "LinuxNetworkCommissioningDelegate: SSID: %s", networkId.data()); + ChipLogProgress(NetworkProvisioning, "Mw320WiFiDriver: SSID: %.*s", static_cast(networkId.size()), networkId.data()); err = ConnectivityMgrImpl().ConnectWiFiNetworkAsync(ByteSpan(mStagingNetwork.ssid, mStagingNetwork.ssidLen), ByteSpan(mStagingNetwork.credentials, mStagingNetwork.credentialsLen), diff --git a/src/platform/silabs/SiWx917/BUILD.gn b/src/platform/silabs/SiWx917/BUILD.gn index 7f361f313cd771..d53d04894869ce 100644 --- a/src/platform/silabs/SiWx917/BUILD.gn +++ b/src/platform/silabs/SiWx917/BUILD.gn @@ -27,6 +27,9 @@ if (chip_enable_openthread) { import("//build_overrides/openthread.gni") } +if (chip_crypto == "platform") { + import("//build_overrides/mbedtls.gni") +} static_library("SiWx917") { sources = [ "${silabs_platform_dir}/BLEManagerImpl.h", @@ -67,6 +70,15 @@ static_library("SiWx917") { public_deps = [ "${chip_root}/src/platform:platform_base" ] + # Add platform crypto implementation + if (chip_crypto == "platform") { + sources += [ "CHIPCryptoPALTinyCrypt.cpp" ] + public_deps += [ + "${chip_root}/src/crypto", + "${mbedtls_root}:mbedtls", + ] + } + if (chip_enable_wifi) { sources += [ "${silabs_platform_dir}/ConnectivityManagerImpl_WIFI.cpp", diff --git a/src/platform/silabs/SiWx917/CHIPCryptoPALTinyCrypt.cpp b/src/platform/silabs/SiWx917/CHIPCryptoPALTinyCrypt.cpp new file mode 100644 index 00000000000000..9f88a1937f8eb7 --- /dev/null +++ b/src/platform/silabs/SiWx917/CHIPCryptoPALTinyCrypt.cpp @@ -0,0 +1,1754 @@ +/* + * + * Copyright (c) 2020-2022 Project CHIP Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * mbedTLS based implementation of CHIP crypto primitives + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(MBEDTLS_X509_CRT_PARSE_C) +#include +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace chip { +namespace Crypto { + +#define MAX_ERROR_STR_LEN 128 +#define NUM_BYTES_IN_SHA256_HASH 32 + +// In mbedTLS 3.0.0 direct access to structure fields was replaced with using MBEDTLS_PRIVATE macro. +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) +#define CHIP_CRYPTO_PAL_PRIVATE(x) MBEDTLS_PRIVATE(x) +#else +#define CHIP_CRYPTO_PAL_PRIVATE(x) x +#endif + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000 && MBEDTLS_VERSION_NUMBER < 0x03010000) +#define CHIP_CRYPTO_PAL_PRIVATE_X509(x) MBEDTLS_PRIVATE(x) +#else +#define CHIP_CRYPTO_PAL_PRIVATE_X509(x) x +#endif + +typedef struct +{ + bool mInitialized; + bool mDRBGSeeded; + mbedtls_ctr_drbg_context mDRBGCtxt; + mbedtls_entropy_context mEntropy; +} EntropyContext; + +static EntropyContext gsEntropyContext; + +static void _log_mbedTLS_error(int error_code) +{ + if (error_code != 0 && error_code != UECC_SUCCESS) + { +#if defined(MBEDTLS_ERROR_C) + char error_str[MAX_ERROR_STR_LEN]; + mbedtls_strerror(error_code, error_str, sizeof(error_str)); + ChipLogError(Crypto, "mbedTLS error: %s", error_str); +#else + // Error codes defined in 16-bit negative hex numbers. Ease lookup by printing likewise + ChipLogError(Crypto, "mbedTLS error: -0x%04X", -static_cast(error_code)); +#endif + } +} + +static bool _isValidTagLength(size_t tag_length) +{ + if (tag_length == 8 || tag_length == 12 || tag_length == 16) + { + return true; + } + return false; +} + +CHIP_ERROR AES_CCM_encrypt(const uint8_t * plaintext, size_t plaintext_length, const uint8_t * aad, size_t aad_length, + const Aes128KeyHandle & key, const uint8_t * nonce, size_t nonce_length, uint8_t * ciphertext, + uint8_t * tag, size_t tag_length) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 1; + + mbedtls_ccm_context context; + mbedtls_ccm_init(&context); + + VerifyOrExit(plaintext != nullptr || plaintext_length == 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(ciphertext != nullptr || plaintext_length == 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(nonce != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(nonce_length > 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(tag != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(_isValidTagLength(tag_length), error = CHIP_ERROR_INVALID_ARGUMENT); + if (aad_length > 0) + { + VerifyOrExit(aad != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + } + + // multiplying by 8 to convert key from bits to byte + result = mbedtls_ccm_setkey(&context, MBEDTLS_CIPHER_ID_AES, key.As(), sizeof(Aes128KeyByteArray) * 8); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // Encrypt + result = mbedtls_ccm_encrypt_and_tag(&context, plaintext_length, Uint8::to_const_uchar(nonce), nonce_length, + Uint8::to_const_uchar(aad), aad_length, Uint8::to_const_uchar(plaintext), + Uint8::to_uchar(ciphertext), Uint8::to_uchar(tag), tag_length); + _log_mbedTLS_error(result); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + +exit: + mbedtls_ccm_free(&context); + return error; +} + +CHIP_ERROR AES_CCM_decrypt(const uint8_t * ciphertext, size_t ciphertext_len, const uint8_t * aad, size_t aad_len, + const uint8_t * tag, size_t tag_length, const Aes128KeyHandle & key, const uint8_t * nonce, + size_t nonce_length, uint8_t * plaintext) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 1; + + mbedtls_ccm_context context; + mbedtls_ccm_init(&context); + + VerifyOrExit(plaintext != nullptr || ciphertext_len == 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(ciphertext != nullptr || ciphertext_len == 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(tag != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(_isValidTagLength(tag_length), error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(nonce != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(nonce_length > 0, error = CHIP_ERROR_INVALID_ARGUMENT); + if (aad_len > 0) + { + VerifyOrExit(aad != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + } + + // multiplying by 8 to convert key from bits to byte + result = mbedtls_ccm_setkey(&context, MBEDTLS_CIPHER_ID_AES, key.As(), sizeof(Aes128KeyByteArray) * 8); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // Decrypt + result = mbedtls_ccm_auth_decrypt(&context, ciphertext_len, Uint8::to_const_uchar(nonce), nonce_length, + Uint8::to_const_uchar(aad), aad_len, Uint8::to_const_uchar(ciphertext), + Uint8::to_uchar(plaintext), Uint8::to_const_uchar(tag), tag_length); + _log_mbedTLS_error(result); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + +exit: + mbedtls_ccm_free(&context); + return error; +} + +CHIP_ERROR Hash_SHA256(const uint8_t * data, const size_t data_length, uint8_t * out_buffer) +{ + // zero data length hash is supported. + VerifyOrReturnError(data != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) + const int result = mbedtls_sha256(Uint8::to_const_uchar(data), data_length, Uint8::to_uchar(out_buffer), 0); +#else + const int result = mbedtls_sha256_ret(Uint8::to_const_uchar(data), data_length, Uint8::to_uchar(out_buffer), 0); +#endif + + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Hash_SHA1(const uint8_t * data, const size_t data_length, uint8_t * out_buffer) +{ + // zero data length hash is supported. + VerifyOrReturnError(out_buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) + const int result = mbedtls_sha1(Uint8::to_const_uchar(data), data_length, Uint8::to_uchar(out_buffer)); +#else + const int result = mbedtls_sha1_ret(Uint8::to_const_uchar(data), data_length, Uint8::to_uchar(out_buffer)); +#endif + + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +static_assert(kMAX_Hash_SHA256_Context_Size >= sizeof(mbedtls_sha256_context), + "kMAX_Hash_SHA256_Context_Size is too small for the size of underlying mbedtls_sha256_context"); + +static inline mbedtls_sha256_context * to_inner_hash_sha256_context(HashSHA256OpaqueContext * context) +{ + return SafePointerCast(context); +} + +Hash_SHA256_stream::Hash_SHA256_stream(void) +{ + mbedtls_sha256_context * context = to_inner_hash_sha256_context(&mContext); + mbedtls_sha256_init(context); +} + +Hash_SHA256_stream::~Hash_SHA256_stream(void) +{ + mbedtls_sha256_context * context = to_inner_hash_sha256_context(&mContext); + mbedtls_sha256_free(context); + Clear(); +} + +CHIP_ERROR Hash_SHA256_stream::Begin(void) +{ + mbedtls_sha256_context * const context = to_inner_hash_sha256_context(&mContext); + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) + const int result = mbedtls_sha256_starts(context, 0); +#else + const int result = mbedtls_sha256_starts_ret(context, 0); +#endif + + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Hash_SHA256_stream::AddData(const ByteSpan data) +{ + mbedtls_sha256_context * const context = to_inner_hash_sha256_context(&mContext); + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) + const int result = mbedtls_sha256_update(context, Uint8::to_const_uchar(data.data()), data.size()); +#else + const int result = mbedtls_sha256_update_ret(context, Uint8::to_const_uchar(data.data()), data.size()); +#endif + + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Hash_SHA256_stream::GetDigest(MutableByteSpan & out_buffer) +{ + mbedtls_sha256_context * context = to_inner_hash_sha256_context(&mContext); + + // Back-up context as we are about to finalize the hash to extract digest. + mbedtls_sha256_context previous_ctx; + mbedtls_sha256_init(&previous_ctx); + mbedtls_sha256_clone(&previous_ctx, context); + + // Pad + compute digest, then finalize context. It is restored next line to continue. + CHIP_ERROR result = Finish(out_buffer); + + // Restore context prior to finalization. + mbedtls_sha256_clone(context, &previous_ctx); + mbedtls_sha256_free(&previous_ctx); + + return result; +} + +CHIP_ERROR Hash_SHA256_stream::Finish(MutableByteSpan & out_buffer) +{ + VerifyOrReturnError(out_buffer.size() >= kSHA256_Hash_Length, CHIP_ERROR_BUFFER_TOO_SMALL); + mbedtls_sha256_context * const context = to_inner_hash_sha256_context(&mContext); + +#if (MBEDTLS_VERSION_NUMBER >= 0x03000000) + const int result = mbedtls_sha256_finish(context, Uint8::to_uchar(out_buffer.data())); +#else + const int result = mbedtls_sha256_finish_ret(context, Uint8::to_uchar(out_buffer.data())); +#endif + + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + out_buffer = out_buffer.SubSpan(0, kSHA256_Hash_Length); + + return CHIP_NO_ERROR; +} + +void Hash_SHA256_stream::Clear(void) +{ + mbedtls_platform_zeroize(this, sizeof(*this)); +} + +CHIP_ERROR HKDF_sha::HKDF_SHA256(const uint8_t * secret, const size_t secret_length, const uint8_t * salt, const size_t salt_length, + const uint8_t * info, const size_t info_length, uint8_t * out_buffer, size_t out_length) +{ + VerifyOrReturnError(secret != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(secret_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + + // Salt is optional + if (salt_length > 0) + { + VerifyOrReturnError(salt != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + } + + VerifyOrReturnError(info_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(info != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + + const mbedtls_md_info_t * const md = mbedtls_md_info_from_type(MBEDTLS_MD_SHA256); + VerifyOrReturnError(md != nullptr, CHIP_ERROR_INTERNAL); + + const int result = mbedtls_hkdf(md, Uint8::to_const_uchar(salt), salt_length, Uint8::to_const_uchar(secret), secret_length, + Uint8::to_const_uchar(info), info_length, Uint8::to_uchar(out_buffer), out_length); + _log_mbedTLS_error(result); + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR HMAC_sha::HMAC_SHA256(const uint8_t * key, size_t key_length, const uint8_t * message, size_t message_length, + uint8_t * out_buffer, size_t out_length) +{ + VerifyOrReturnError(key != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(key_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(message != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(message_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_length >= kSHA256_Hash_Length, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + + const mbedtls_md_info_t * const md = mbedtls_md_info_from_type(MBEDTLS_MD_SHA256); + VerifyOrReturnError(md != nullptr, CHIP_ERROR_INTERNAL); + + const int result = + mbedtls_md_hmac(md, Uint8::to_const_uchar(key), key_length, Uint8::to_const_uchar(message), message_length, out_buffer); + + _log_mbedTLS_error(result); + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR PBKDF2_sha256::pbkdf2_sha256(const uint8_t * password, size_t plen, const uint8_t * salt, size_t slen, + unsigned int iteration_count, uint32_t key_length, uint8_t * output) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + const mbedtls_md_info_t * md_info; + mbedtls_md_context_t md_ctxt; + constexpr int use_hmac = 1; + + bool free_md_ctxt = false; + + VerifyOrExit(password != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(plen > 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(salt != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(slen >= kSpake2p_Min_PBKDF_Salt_Length, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(slen <= kSpake2p_Max_PBKDF_Salt_Length, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(key_length > 0, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(output != nullptr, error = CHIP_ERROR_INVALID_ARGUMENT); + + md_info = mbedtls_md_info_from_type(MBEDTLS_MD_SHA256); + VerifyOrExit(md_info != nullptr, error = CHIP_ERROR_INTERNAL); + + mbedtls_md_init(&md_ctxt); + free_md_ctxt = true; + + result = mbedtls_md_setup(&md_ctxt, md_info, use_hmac); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + result = mbedtls_pkcs5_pbkdf2_hmac(&md_ctxt, Uint8::to_const_uchar(password), plen, Uint8::to_const_uchar(salt), slen, + iteration_count, key_length, Uint8::to_uchar(output)); + + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + +exit: + _log_mbedTLS_error(result); + + if (free_md_ctxt) + { + mbedtls_md_free(&md_ctxt); + } + + return error; +} + +static EntropyContext * get_entropy_context() +{ + if (!gsEntropyContext.mInitialized) + { + mbedtls_entropy_init(&gsEntropyContext.mEntropy); + mbedtls_ctr_drbg_init(&gsEntropyContext.mDRBGCtxt); + + gsEntropyContext.mInitialized = true; + } + + return &gsEntropyContext; +} + +static mbedtls_ctr_drbg_context * get_drbg_context() +{ + EntropyContext * const context = get_entropy_context(); + + mbedtls_ctr_drbg_context * const drbgCtxt = &context->mDRBGCtxt; + + if (!context->mDRBGSeeded) + { + const int status = mbedtls_ctr_drbg_seed(drbgCtxt, mbedtls_entropy_func, &context->mEntropy, nullptr, 0); + if (status != 0) + { + _log_mbedTLS_error(status); + return nullptr; + } + + context->mDRBGSeeded = true; + } + + return drbgCtxt; +} + +CHIP_ERROR add_entropy_source(entropy_source fn_source, void * p_source, size_t threshold) +{ + VerifyOrReturnError(fn_source != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + + EntropyContext * const entropy_ctxt = get_entropy_context(); + VerifyOrReturnError(entropy_ctxt != nullptr, CHIP_ERROR_INTERNAL); + + const int result = + mbedtls_entropy_add_source(&entropy_ctxt->mEntropy, fn_source, p_source, threshold, MBEDTLS_ENTROPY_SOURCE_STRONG); + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + return CHIP_NO_ERROR; +} + +CHIP_ERROR DRBG_get_bytes(uint8_t * out_buffer, const size_t out_length) +{ + VerifyOrReturnError(out_buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(out_length > 0, CHIP_ERROR_INVALID_ARGUMENT); + + mbedtls_ctr_drbg_context * const drbg_ctxt = get_drbg_context(); + VerifyOrReturnError(drbg_ctxt != nullptr, CHIP_ERROR_INTERNAL); + + const int result = mbedtls_ctr_drbg_random(drbg_ctxt, Uint8::to_uchar(out_buffer), out_length); + VerifyOrReturnError(result == 0, CHIP_ERROR_INTERNAL); + + return CHIP_NO_ERROR; +} + +static int CryptoRNG(void * ctxt, uint8_t * out_buffer, size_t out_length) +{ + return (chip::Crypto::DRBG_get_bytes(out_buffer, out_length) == CHIP_NO_ERROR) ? 0 : 1; +} + +mbedtls_ecp_group_id MapECPGroupId(SupportedECPKeyTypes keyType) +{ + switch (keyType) + { + case SupportedECPKeyTypes::ECP256R1: + return MBEDTLS_ECP_DP_SECP256R1; + default: + return MBEDTLS_ECP_DP_NONE; + } +} + +static inline mbedtls_uecc_keypair * to_keypair(P256KeypairContext * context) +{ + return SafePointerCast(context); +} + +static inline const mbedtls_uecc_keypair * to_const_keypair(const P256KeypairContext * context) +{ + return SafePointerCast(context); +} + +CHIP_ERROR P256Keypair::ECDSA_sign_msg(const uint8_t * msg, const size_t msg_length, P256ECDSASignature & out_signature) const +{ + VerifyOrReturnError(mInitialized, CHIP_ERROR_WELL_UNINITIALIZED); + VerifyOrReturnError((msg != nullptr) && (msg_length > 0), CHIP_ERROR_INVALID_ARGUMENT); + + uint8_t digest[kSHA256_Hash_Length]; + memset(&digest[0], 0, sizeof(digest)); + ReturnErrorOnFailure(Hash_SHA256(msg, msg_length, &digest[0])); + + CHIP_ERROR error = CHIP_NO_ERROR; + int result = UECC_FAILURE; + + const mbedtls_uecc_keypair * keypair = to_const_keypair(&mKeypair); + + result = uECC_sign(keypair->private_key, digest, sizeof(digest), out_signature.Bytes()); + + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INTERNAL); + VerifyOrExit(out_signature.SetLength(kP256_ECDSA_Signature_Length_Raw) == CHIP_NO_ERROR, error = CHIP_ERROR_INTERNAL); + + keypair = nullptr; + +exit: + return error; +} + +CHIP_ERROR P256PublicKey::ECDSA_validate_msg_signature(const uint8_t * msg, const size_t msg_length, + const P256ECDSASignature & signature) const +{ +#if defined(MBEDTLS_ECDSA_C) + VerifyOrReturnError((msg != nullptr) && (msg_length > 0), CHIP_ERROR_INVALID_ARGUMENT); + + uint8_t digest[kSHA256_Hash_Length]; + memset(&digest[0], 0, sizeof(digest)); + ReturnErrorOnFailure(Hash_SHA256(msg, msg_length, &digest[0])); + + return ECDSA_validate_hash_signature(&digest[0], sizeof(digest), signature); +#else + return CHIP_ERROR_NOT_IMPLEMENTED; +#endif +} + +CHIP_ERROR P256PublicKey::ECDSA_validate_hash_signature(const uint8_t * hash, const size_t hash_length, + const P256ECDSASignature & signature) const +{ + VerifyOrReturnError(hash != nullptr, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(hash_length == kSHA256_Hash_Length, CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrReturnError(signature.Length() == kP256_ECDSA_Signature_Length_Raw, CHIP_ERROR_INVALID_ARGUMENT); + + CHIP_ERROR error = CHIP_NO_ERROR; + int result = UECC_FAILURE; + + const uint8_t * public_key = *this; + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + result = uECC_verify(public_key + 1, hash, hash_length, Uint8::to_const_uchar(signature.ConstBytes())); + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INVALID_SIGNATURE); + +exit: + return error; +} + +CHIP_ERROR P256Keypair::ECDH_derive_secret(const P256PublicKey & remote_public_key, P256ECDHDerivedSecret & out_secret) const +{ +#if defined(MBEDTLS_ECDH_C) + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + size_t secret_length = (out_secret.Length() == 0) ? out_secret.Capacity() : out_secret.Length(); + + const mbedtls_uecc_keypair * keypair = to_const_keypair(&mKeypair); + + VerifyOrExit(mInitialized, error = CHIP_ERROR_WELL_UNINITIALIZED); + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + result = uECC_shared_secret(remote_public_key.ConstBytes() + 1, keypair->private_key, out_secret.Bytes()); + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INTERNAL); + + SuccessOrExit(out_secret.SetLength(secret_length)); + +exit: + keypair = nullptr; + _log_mbedTLS_error(result); + return error; +#else + return CHIP_ERROR_NOT_IMPLEMENTED; +#endif +} + +void ClearSecretData(uint8_t * buf, size_t len) +{ + mbedtls_platform_zeroize(buf, len); +} + +// THE BELOW IS FROM `third_party/openthread/repo/third_party/mbedtls/repo/library/constant_time.c` since +// mbedtls_ct_memcmp is not available on Linux somehow :( +int mbedtls_ct_memcmp_copy(const void * a, const void * b, size_t n) +{ + size_t i; + volatile const unsigned char * A = (volatile const unsigned char *) a; + volatile const unsigned char * B = (volatile const unsigned char *) b; + volatile unsigned char diff = 0; + + for (i = 0; i < n; i++) + { + /* Read volatile data in order before computing diff. + * This avoids IAR compiler warning: + * 'the order of volatile accesses is undefined ..' */ + unsigned char x = A[i], y = B[i]; + diff |= x ^ y; + } + + return ((int) diff); +} + +bool IsBufferContentEqualConstantTime(const void * a, const void * b, size_t n) +{ + return mbedtls_ct_memcmp_copy(a, b, n) == 0; +} + +CHIP_ERROR P256Keypair::Initialize(ECPKeyTarget key_target) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = UECC_FAILURE; + + Clear(); + + mbedtls_uecc_keypair * keypair = to_keypair(&mKeypair); + + result = uECC_make_key(keypair->public_key, keypair->private_key); + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INTERNAL); + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + Uint8::to_uchar(mPublicKey)[0] = 0x04; + memcpy(Uint8::to_uchar(mPublicKey) + 1, keypair->public_key, 2 * NUM_ECC_BYTES); + + keypair = nullptr; + mInitialized = true; + +exit: + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR P256Keypair::Serialize(P256SerializedKeypair & output) const +{ + const mbedtls_uecc_keypair * keypair = to_const_keypair(&mKeypair); + size_t len = output.Length() == 0 ? output.Capacity() : output.Length(); + Encoding::BufferWriter bbuf(output.Bytes(), len); + uint8_t privkey[kP256_PrivateKey_Length]; + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + bbuf.Put(mPublicKey, mPublicKey.Length()); + + VerifyOrExit(bbuf.Available() == sizeof(privkey), error = CHIP_ERROR_INTERNAL); + VerifyOrExit(sizeof(keypair->private_key) <= bbuf.Available(), error = CHIP_ERROR_INTERNAL); + + memcpy(privkey, keypair->private_key, sizeof(privkey)); + + bbuf.Put(privkey, sizeof(privkey)); + VerifyOrExit(bbuf.Fit(), error = CHIP_ERROR_BUFFER_TOO_SMALL); + + output.SetLength(bbuf.Needed()); + +exit: + memset(privkey, 0, sizeof(privkey)); + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR P256Keypair::Deserialize(P256SerializedKeypair & input) +{ + int result = 0; + CHIP_ERROR error = CHIP_NO_ERROR; + Encoding::BufferWriter bbuf(mPublicKey, mPublicKey.Length()); + + Clear(); + + mbedtls_uecc_keypair * keypair = to_keypair(&mKeypair); + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + memcpy(keypair->public_key, input.ConstBytes() + 1, 2 * NUM_ECC_BYTES); + memcpy(keypair->private_key, input.ConstBytes() + mPublicKey.Length(), NUM_ECC_BYTES); + + keypair = nullptr; + + VerifyOrExit(input.Length() == mPublicKey.Length() + kP256_PrivateKey_Length, error = CHIP_ERROR_INVALID_ARGUMENT); + bbuf.Put(input.ConstBytes(), mPublicKey.Length()); + VerifyOrExit(bbuf.Fit(), error = CHIP_ERROR_NO_MEMORY); + + mInitialized = true; + + _log_mbedTLS_error(result); + +exit: + return error; +} + +void P256Keypair::Clear() +{ + if (mInitialized) + { + mbedtls_uecc_keypair * keypair = to_keypair(&mKeypair); + memset(keypair, 0, sizeof(mbedtls_uecc_keypair)); + mInitialized = false; + } +} + +P256Keypair::~P256Keypair() +{ + Clear(); +} + +CHIP_ERROR P256Keypair::NewCertificateSigningRequest(uint8_t * out_csr, size_t & csr_length) const +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + size_t out_length; + + mbedtls_x509write_csr csr; + mbedtls_x509write_csr_init(&csr); + + mbedtls_pk_context pk; + pk.CHIP_CRYPTO_PAL_PRIVATE(pk_info) = mbedtls_pk_info_from_type(MBEDTLS_PK_ECKEY); + pk.CHIP_CRYPTO_PAL_PRIVATE(pk_ctx) = to_keypair(&mKeypair); + VerifyOrExit(pk.CHIP_CRYPTO_PAL_PRIVATE(pk_info) != nullptr, error = CHIP_ERROR_INTERNAL); + + VerifyOrExit(mInitialized, error = CHIP_ERROR_WELL_UNINITIALIZED); + + mbedtls_x509write_csr_set_key(&csr, &pk); + + mbedtls_x509write_csr_set_md_alg(&csr, MBEDTLS_MD_SHA256); + + // TODO: mbedTLS CSR parser fails if the subject name is not set (or if empty). + // CHIP Spec doesn't specify the subject name that can be used. + // Figure out the correct value and update this code. + result = mbedtls_x509write_csr_set_subject_name(&csr, "O=CSR"); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + result = mbedtls_x509write_csr_der(&csr, out_csr, csr_length, CryptoRNG, nullptr); + VerifyOrExit(result > 0, error = CHIP_ERROR_INTERNAL); + VerifyOrExit(CanCastTo(result), error = CHIP_ERROR_INTERNAL); + + out_length = static_cast(result); + result = 0; + VerifyOrExit(out_length <= csr_length, error = CHIP_ERROR_INTERNAL); + + if (csr_length != out_length) + { + // mbedTLS API writes the CSR at the end of the provided buffer. + // Let's move it to the start of the buffer. + size_t offset = csr_length - out_length; + memmove(out_csr, &out_csr[offset], out_length); + } + + csr_length = out_length; + +exit: + mbedtls_x509write_csr_free(&csr); + + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR VerifyCertificateSigningRequest(const uint8_t * csr_buf, size_t csr_length, P256PublicKey & pubkey) +{ +#if defined(MBEDTLS_X509_CSR_PARSE_C) + ReturnErrorOnFailure(VerifyCertificateSigningRequestFormat(csr_buf, csr_length)); + + // TODO: For some embedded targets, mbedTLS library doesn't have mbedtls_x509_csr_parse_der, and mbedtls_x509_csr_parse_free. + // Taking a step back, embedded targets likely will not process CSR requests. Adding this action item to reevaluate + // this if there's a need for this processing for embedded targets. + CHIP_ERROR error = CHIP_NO_ERROR; + size_t pubkey_size = 0; + + mbedtls_ecp_keypair * keypair = nullptr; + + P256ECDSASignature signature; + MutableByteSpan out_raw_sig_span(signature.Bytes(), signature.Capacity()); + + mbedtls_x509_csr csr; + mbedtls_x509_csr_init(&csr); + + int result = mbedtls_x509_csr_parse_der(&csr, csr_buf, csr_length); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // Verify the signature algorithm and public key type + VerifyOrExit(csr.CHIP_CRYPTO_PAL_PRIVATE(sig_md) == MBEDTLS_MD_SHA256, error = CHIP_ERROR_UNSUPPORTED_SIGNATURE_TYPE); + VerifyOrExit(csr.CHIP_CRYPTO_PAL_PRIVATE(sig_pk) == MBEDTLS_PK_ECDSA, error = CHIP_ERROR_WRONG_KEY_TYPE); + + keypair = mbedtls_pk_ec(csr.CHIP_CRYPTO_PAL_PRIVATE_X509(pk)); + + // Copy the public key from the CSR + result = mbedtls_ecp_point_write_binary(&keypair->CHIP_CRYPTO_PAL_PRIVATE(grp), &keypair->CHIP_CRYPTO_PAL_PRIVATE(Q), + MBEDTLS_ECP_PF_UNCOMPRESSED, &pubkey_size, Uint8::to_uchar(pubkey), pubkey.Length()); + + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + VerifyOrExit(pubkey_size == pubkey.Length(), error = CHIP_ERROR_INTERNAL); + + // Convert DER signature to raw signature + error = EcdsaAsn1SignatureToRaw(kP256_FE_Length, + ByteSpan{ csr.CHIP_CRYPTO_PAL_PRIVATE(sig).CHIP_CRYPTO_PAL_PRIVATE_X509(p), + csr.CHIP_CRYPTO_PAL_PRIVATE(sig).CHIP_CRYPTO_PAL_PRIVATE_X509(len) }, + out_raw_sig_span); + + VerifyOrExit(error == CHIP_NO_ERROR, error = CHIP_ERROR_INVALID_ARGUMENT); + VerifyOrExit(out_raw_sig_span.size() == (kP256_FE_Length * 2), error = CHIP_ERROR_INTERNAL); + signature.SetLength(out_raw_sig_span.size()); + + // Verify the signature using the public key + error = pubkey.ECDSA_validate_msg_signature(csr.CHIP_CRYPTO_PAL_PRIVATE_X509(cri).CHIP_CRYPTO_PAL_PRIVATE_X509(p), + csr.CHIP_CRYPTO_PAL_PRIVATE_X509(cri).CHIP_CRYPTO_PAL_PRIVATE_X509(len), signature); + + SuccessOrExit(error); + +exit: + mbedtls_x509_csr_free(&csr); + _log_mbedTLS_error(result); + return error; +#else + ChipLogError(Crypto, "MBEDTLS_X509_CSR_PARSE_C is not enabled. CSR cannot be parsed"); + return CHIP_ERROR_UNSUPPORTED_CHIP_FEATURE; +#endif +} + +typedef struct Spake2p_Context +{ + const mbedtls_md_info_t * md_info; + uECC_word_t M[2 * NUM_ECC_WORDS]; + uECC_word_t N[2 * NUM_ECC_WORDS]; + uECC_word_t X[2 * NUM_ECC_WORDS]; + uECC_word_t Y[2 * NUM_ECC_WORDS]; + uECC_word_t L[2 * NUM_ECC_WORDS]; + uECC_word_t Z[2 * NUM_ECC_WORDS]; + uECC_word_t V[2 * NUM_ECC_WORDS]; + + uECC_word_t w0[NUM_ECC_WORDS]; + uECC_word_t w1[NUM_ECC_WORDS]; + uECC_word_t xy[NUM_ECC_WORDS]; + uECC_word_t tempbn[NUM_ECC_WORDS]; +} Spake2p_Context; + +static inline Spake2p_Context * to_inner_spake2p_context(Spake2pOpaqueContext * context) +{ + return SafePointerCast(context); +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::InitInternal(void) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + Spake2p_Context * context = to_inner_spake2p_context(&mSpake2pContext); + + memset(context, 0, sizeof(Spake2p_Context)); + + M = context->M; + N = context->N; + X = context->X; + Y = context->Y; + L = context->L; + V = context->V; + Z = context->Z; + + w0 = context->w0; + w1 = context->w1; + xy = context->xy; + tempbn = context->tempbn; + + G = curve_G; + + return error; + +exit: + _log_mbedTLS_error(result); + Clear(); + return error; +} + +void Spake2p_P256_SHA256_HKDF_HMAC::Clear() +{ + VerifyOrReturn(state != CHIP_SPAKE2P_STATE::PREINIT); + + Spake2p_Context * context = to_inner_spake2p_context(&mSpake2pContext); + memset(&context->M, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->N, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->X, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->Y, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->L, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->Z, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->V, 0, 2 * NUM_ECC_WORDS * sizeof(uECC_word_t)); + + memset(&context->w0, 0, NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->w1, 0, NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->xy, 0, NUM_ECC_WORDS * sizeof(uECC_word_t)); + memset(&context->tempbn, 0, NUM_ECC_WORDS * sizeof(uECC_word_t)); + + G = NULL; + state = CHIP_SPAKE2P_STATE::PREINIT; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::Mac(const uint8_t * key, size_t key_len, const uint8_t * in, size_t in_len, + MutableByteSpan & out_span) +{ + HMAC_sha hmac; + VerifyOrReturnError(out_span.size() >= kSHA256_Hash_Length, CHIP_ERROR_BUFFER_TOO_SMALL); + ReturnErrorOnFailure(hmac.HMAC_SHA256(key, key_len, in, in_len, out_span.data(), kSHA256_Hash_Length)); + out_span = out_span.SubSpan(0, kSHA256_Hash_Length); + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::MacVerify(const uint8_t * key, size_t key_len, const uint8_t * mac, size_t mac_len, + const uint8_t * in, size_t in_len) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + uint8_t computed_mac[kSHA256_Hash_Length]; + MutableByteSpan computed_mac_span{ computed_mac }; + VerifyOrExit(mac_len == kSHA256_Hash_Length, error = CHIP_ERROR_INVALID_ARGUMENT); + + SuccessOrExit(error = Mac(key, key_len, in, in_len, computed_mac_span)); + VerifyOrExit(computed_mac_span.size() == mac_len, error = CHIP_ERROR_INTERNAL); + + VerifyOrExit(IsBufferContentEqualConstantTime(mac, computed_mac, kSHA256_Hash_Length), error = CHIP_ERROR_INTERNAL); + +exit: + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::FELoad(const uint8_t * in, size_t in_len, void * fe) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + uECC_word_t tmp[2 * NUM_ECC_WORDS] = { 0 }; + uECC_vli_bytesToNative(tmp, in, NUM_ECC_BYTES); + + uECC_vli_mmod((uECC_word_t *) fe, tmp, curve_n); + +exit: + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::FEWrite(const void * fe, uint8_t * out, size_t out_len) +{ + uECC_vli_nativeToBytes(out, NUM_ECC_BYTES, (const unsigned int *) fe); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::FEGenerate(void * fe) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + mbedtls_uecc_keypair keypair; + + result = UECC_FAILURE; + + result = uECC_make_key(keypair.public_key, keypair.private_key); + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INTERNAL); + + uECC_vli_bytesToNative((uECC_word_t *) fe, keypair.private_key, NUM_ECC_BYTES); + +exit: + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::FEMul(void * fer, const void * fe1, const void * fe2) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + uECC_vli_modMult((uECC_word_t *) fer, (const uECC_word_t *) fe1, (const uECC_word_t *) fe2, (const uECC_word_t *) curve_n); + +exit: + _log_mbedTLS_error(result); + return error; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointLoad(const uint8_t * in, size_t in_len, void * R) +{ + uint8_t tmp[2 * NUM_ECC_BYTES]; + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + memcpy(tmp, in + 1, 2 * NUM_ECC_BYTES); + + uECC_vli_bytesToNative((uECC_word_t *) R, tmp, NUM_ECC_BYTES); + uECC_vli_bytesToNative((uECC_word_t *) R + NUM_ECC_WORDS, tmp + NUM_ECC_BYTES, NUM_ECC_BYTES); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointWrite(const void * R, uint8_t * out, size_t out_len) +{ + memset(out, 0, out_len); + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + out[0] = 0x04; + uECC_vli_nativeToBytes(out + 1, NUM_ECC_BYTES, (uECC_word_t *) R); + uECC_vli_nativeToBytes(out + NUM_ECC_BYTES + 1, NUM_ECC_BYTES, (uECC_word_t *) R + NUM_ECC_WORDS); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointMul(void * R, const void * P1, const void * fe1) +{ + + if (EccPoint_mult_safer((uECC_word_t *) R, (const uECC_word_t *) P1, (const uECC_word_t *) fe1) != UECC_SUCCESS) + { + return CHIP_ERROR_INTERNAL; + } + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointAddMul(void * R, const void * P1, const void * fe1, const void * P2, + const void * fe2) +{ + uECC_word_t R1[2 * NUM_ECC_WORDS]; + uECC_word_t R2[2 * NUM_ECC_WORDS]; + uECC_word_t z[NUM_ECC_WORDS]; + uint8_t ret = UECC_SUCCESS; + + if (EccPoint_mult_safer(R1, (const uECC_word_t *) P1, (const uECC_word_t *) fe1) != UECC_SUCCESS) + { + return CHIP_ERROR_INTERNAL; + } + + if (EccPoint_mult_safer(R2, (const uECC_word_t *) P2, (const uECC_word_t *) fe2) != UECC_SUCCESS) + { + return CHIP_ERROR_INTERNAL; + } + + uECC_vli_modSub(z, R2, R1, curve_p); + XYcZ_add(R1, R1 + NUM_ECC_WORDS, R2, R2 + NUM_ECC_WORDS); + uECC_vli_modInv(z, z, curve_p); + apply_z(R2, R2 + NUM_ECC_WORDS, z); + + memcpy((uECC_word_t *) R, R2, 2 * NUM_ECC_BYTES); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointInvert(void * R) +{ + uECC_word_t tmp[NUM_ECC_WORDS] = { 0 }; + + uECC_vli_sub(tmp, curve_p, (uECC_word_t *) R + NUM_ECC_WORDS); + memcpy((uECC_word_t *) R + NUM_ECC_WORDS, tmp, NUM_ECC_BYTES); + + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointCofactorMul(void * R) +{ + return CHIP_NO_ERROR; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::ComputeL(uint8_t * Lout, size_t * L_len, const uint8_t * w1in, size_t w1in_len) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + + result = UECC_SUCCESS; + uECC_word_t tmp[2 * NUM_ECC_WORDS]; + uECC_word_t w1_bn[NUM_ECC_WORDS]; + uECC_word_t L_tmp[2 * NUM_ECC_WORDS]; + + uECC_vli_bytesToNative(tmp, w1in, NUM_ECC_BYTES); + + uECC_vli_mmod(w1_bn, tmp, curve_n); + + result = EccPoint_mult_safer(L_tmp, curve_G, w1_bn); + VerifyOrExit(result == UECC_SUCCESS, error = CHIP_ERROR_INTERNAL); + + // Fully padded raw uncompressed points expected, first byte is always 0x04 i.e uncompressed + Lout[0] = 0x04; + uECC_vli_nativeToBytes(Lout + 1, NUM_ECC_BYTES, L_tmp); + uECC_vli_nativeToBytes(Lout + NUM_ECC_BYTES + 1, NUM_ECC_BYTES, L_tmp + NUM_ECC_WORDS); + +exit: + _log_mbedTLS_error(result); + + return error; +} + +CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::PointIsValid(void * R) +{ + if (uECC_valid_point((const uECC_word_t *) R) != 0) + { + return CHIP_ERROR_INTERNAL; + } + + return CHIP_NO_ERROR; +} + +namespace { + +#if defined(MBEDTLS_X509_CRT_PARSE_C) +bool IsTimeGreaterThanEqual(const mbedtls_x509_time * const timeA, const mbedtls_x509_time * const timeB) +{ + + // checks if two values are different and if yes, then returns first > second. +#define RETURN_STRICTLY_GREATER_IF_DIFFERENT(component) \ + { \ + auto valueA = timeA->CHIP_CRYPTO_PAL_PRIVATE_X509(component); \ + auto valueB = timeB->CHIP_CRYPTO_PAL_PRIVATE_X509(component); \ + \ + if (valueA != valueB) \ + { \ + return valueA > valueB; \ + } \ + } + + RETURN_STRICTLY_GREATER_IF_DIFFERENT(year); + RETURN_STRICTLY_GREATER_IF_DIFFERENT(mon); + RETURN_STRICTLY_GREATER_IF_DIFFERENT(day); + RETURN_STRICTLY_GREATER_IF_DIFFERENT(hour); + RETURN_STRICTLY_GREATER_IF_DIFFERENT(min); + RETURN_STRICTLY_GREATER_IF_DIFFERENT(sec); + + // all above are equal + return true; +} + +CHIP_ERROR IsCertificateValidAtIssuance(const mbedtls_x509_crt * candidateCertificate, const mbedtls_x509_crt * issuerCertificate) +{ + mbedtls_x509_time candidateNotBeforeTime = candidateCertificate->CHIP_CRYPTO_PAL_PRIVATE_X509(valid_from); + mbedtls_x509_time issuerNotBeforeTime = issuerCertificate->CHIP_CRYPTO_PAL_PRIVATE_X509(valid_from); + mbedtls_x509_time issuerNotAfterTime = issuerCertificate->CHIP_CRYPTO_PAL_PRIVATE_X509(valid_to); + + // check if candidateCertificate is issued at or after issuerCertificate's notBefore timestamp + VerifyOrReturnError(IsTimeGreaterThanEqual(&candidateNotBeforeTime, &issuerNotBeforeTime), CHIP_ERROR_CERT_EXPIRED); + + // check if candidateCertificate is issued at or before issuerCertificate's notAfter timestamp + VerifyOrReturnError(IsTimeGreaterThanEqual(&issuerNotAfterTime, &candidateNotBeforeTime), CHIP_ERROR_CERT_EXPIRED); + + return CHIP_NO_ERROR; +} + +int CallbackForCustomValidityCheck(void * data, mbedtls_x509_crt * crt, int depth, uint32_t * flags) +{ + mbedtls_x509_crt * leafCert = reinterpret_cast(data); + mbedtls_x509_crt * issuerCert = crt; + + // Ignore any time validy error performed by the standard mbedTLS code. + *flags &= ~(static_cast(MBEDTLS_X509_BADCERT_EXPIRED | MBEDTLS_X509_BADCERT_FUTURE)); + + // Verify that the leaf certificate has a notBefore time valid within the validity period of the issuerCertificate. + // Note that this callback is invoked for each certificate in the chain. + if (IsCertificateValidAtIssuance(leafCert, issuerCert) != CHIP_NO_ERROR) + { + return MBEDTLS_ERR_X509_INVALID_DATE; + } + + return 0; +} + +constexpr uint8_t sOID_AttributeType_CommonName[] = { 0x55, 0x04, 0x03 }; +constexpr uint8_t sOID_AttributeType_MatterVendorId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x02, 0x01 }; +constexpr uint8_t sOID_AttributeType_MatterProductId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x02, 0x02 }; +constexpr uint8_t sOID_SigAlgo_ECDSAWithSHA256[] = { 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x02 }; +constexpr uint8_t sOID_Extension_BasicConstraints[] = { 0x55, 0x1D, 0x13 }; +constexpr uint8_t sOID_Extension_KeyUsage[] = { 0x55, 0x1D, 0x0F }; +constexpr uint8_t sOID_Extension_SubjectKeyIdentifier[] = { 0x55, 0x1D, 0x0E }; +constexpr uint8_t sOID_Extension_AuthorityKeyIdentifier[] = { 0x55, 0x1D, 0x23 }; + +/** + * Compares an mbedtls_asn1_buf structure (oidBuf) to a reference OID represented as uint8_t array (oid). + */ +#define OID_CMP(oid, oidBuf) \ + ((MBEDTLS_ASN1_OID == (oidBuf).CHIP_CRYPTO_PAL_PRIVATE_X509(tag)) && \ + (sizeof(oid) == (oidBuf).CHIP_CRYPTO_PAL_PRIVATE_X509(len)) && \ + (memcmp((oid), (oidBuf).CHIP_CRYPTO_PAL_PRIVATE_X509(p), (oidBuf).CHIP_CRYPTO_PAL_PRIVATE_X509(len)) == 0)) + +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + +} // anonymous namespace + +CHIP_ERROR VerifyAttestationCertificateFormat(const ByteSpan & cert, AttestationCertType certType) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + mbedtls_x509_crt mbed_cert; + unsigned char * p = nullptr; + const unsigned char * end = nullptr; + size_t len = 0; + bool extBasicPresent = false; + bool extKeyUsagePresent = false; + + VerifyOrReturnError(!cert.empty(), CHIP_ERROR_INVALID_ARGUMENT); + + mbedtls_x509_crt_init(&mbed_cert); + + result = mbedtls_x509_crt_parse(&mbed_cert, Uint8::to_const_uchar(cert.data()), cert.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // "version" value is 1 higher than the actual encoded value. + VerifyOrExit(mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(version) - 1 == 2, error = CHIP_ERROR_INTERNAL); + + // Verify signature algorithms is ECDSA with SHA256. + VerifyOrExit(OID_CMP(sOID_SigAlgo_ECDSAWithSHA256, mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(sig_oid)), + error = CHIP_ERROR_INTERNAL); + + // Verify public key presence and format. + { + Crypto::P256PublicKey pubkey; + SuccessOrExit(error = ExtractPubkeyFromX509Cert(cert, pubkey)); + } + + p = mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(v3_ext).CHIP_CRYPTO_PAL_PRIVATE_X509(p); + end = p + mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(v3_ext).CHIP_CRYPTO_PAL_PRIVATE_X509(len); + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + while (p < end) + { + mbedtls_x509_buf extOID = { 0, 0, nullptr }; + int extCritical = 0; + + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + /* Get extension ID */ + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OID); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + extOID.CHIP_CRYPTO_PAL_PRIVATE_X509(tag) = MBEDTLS_ASN1_OID; + extOID.CHIP_CRYPTO_PAL_PRIVATE_X509(len) = len; + extOID.CHIP_CRYPTO_PAL_PRIVATE_X509(p) = p; + p += len; + + /* Get optional critical */ + result = mbedtls_asn1_get_bool(&p, end, &extCritical); + VerifyOrExit(result == 0 || result == MBEDTLS_ERR_ASN1_UNEXPECTED_TAG, error = CHIP_ERROR_INTERNAL); + + /* Data should be octet string type */ + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + if (OID_CMP(sOID_Extension_BasicConstraints, extOID)) + { + int isCA = 0; + int pathLen = -1; + unsigned char * seqStart = p; + + VerifyOrExit(extCritical, error = CHIP_ERROR_INTERNAL); + extBasicPresent = true; + + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + if (len > 0) + { + result = mbedtls_asn1_get_bool(&p, end, &isCA); + VerifyOrExit(result == 0 || result == MBEDTLS_ERR_ASN1_UNEXPECTED_TAG, error = CHIP_ERROR_INTERNAL); + + if (p != seqStart + len) + { + result = mbedtls_asn1_get_int(&p, end, &pathLen); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + } + } + + if (certType == AttestationCertType::kDAC) + { + VerifyOrExit(!isCA && pathLen == -1, error = CHIP_ERROR_INTERNAL); + } + else if (certType == AttestationCertType::kPAI) + { + VerifyOrExit(isCA && pathLen == 0, error = CHIP_ERROR_INTERNAL); + } + else + { + VerifyOrExit(isCA && (pathLen == -1 || pathLen == 0 || pathLen == 1), error = CHIP_ERROR_INTERNAL); + } + } + else if (OID_CMP(sOID_Extension_KeyUsage, extOID)) + { + mbedtls_x509_bitstring bs = { 0, 0, nullptr }; + unsigned int keyUsage = 0; + + VerifyOrExit(extCritical, error = CHIP_ERROR_INTERNAL); + extKeyUsagePresent = true; + + result = mbedtls_asn1_get_bitstring(&p, p + len, &bs); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + for (size_t i = 0; i < bs.CHIP_CRYPTO_PAL_PRIVATE_X509(len) && i < sizeof(unsigned int); i++) + { + keyUsage |= static_cast(bs.CHIP_CRYPTO_PAL_PRIVATE_X509(p)[i]) << (8 * i); + } + + if (certType == AttestationCertType::kDAC) + { + // SHALL only have the digitalSignature bit set. + VerifyOrExit(keyUsage == MBEDTLS_X509_KU_DIGITAL_SIGNATURE, error = CHIP_ERROR_INTERNAL); + } + else + { + bool keyCertSignFlag = keyUsage & MBEDTLS_X509_KU_KEY_CERT_SIGN; + bool crlSignFlag = keyUsage & MBEDTLS_X509_KU_CRL_SIGN; + bool otherFlags = + keyUsage & ~(MBEDTLS_X509_KU_CRL_SIGN | MBEDTLS_X509_KU_KEY_CERT_SIGN | MBEDTLS_X509_KU_DIGITAL_SIGNATURE); + VerifyOrExit(keyCertSignFlag && crlSignFlag && !otherFlags, error = CHIP_ERROR_INTERNAL); + } + } + else + { + p += len; + } + } + + // Verify basic and key usage extensions are present. + VerifyOrExit(extBasicPresent && extKeyUsagePresent, error = CHIP_ERROR_INTERNAL); + + // Verify that SKID and AKID extensions are present. + { + uint8_t kidBuf[kSubjectKeyIdentifierLength]; + MutableByteSpan kid(kidBuf); + SuccessOrExit(error = ExtractSKIDFromX509Cert(cert, kid)); + if (certType == AttestationCertType::kDAC || certType == AttestationCertType::kPAI) + { + // Mandatory extension for DAC and PAI certs. + SuccessOrExit(error = ExtractAKIDFromX509Cert(cert, kid)); + } + } + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbed_cert); + +#else + (void) cert; + (void) certType; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +CHIP_ERROR ValidateCertificateChain(const uint8_t * rootCertificate, size_t rootCertificateLen, const uint8_t * caCertificate, + size_t caCertificateLen, const uint8_t * leafCertificate, size_t leafCertificateLen, + CertificateChainValidationResult & result) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + mbedtls_x509_crt certChain; + mbedtls_x509_crt rootCert; + int mbedResult; + uint32_t flags = 0; + + result = CertificateChainValidationResult::kInternalFrameworkError; + + VerifyOrReturnError(rootCertificate != nullptr && rootCertificateLen != 0, + (result = CertificateChainValidationResult::kRootArgumentInvalid, CHIP_ERROR_INVALID_ARGUMENT)); + VerifyOrReturnError(leafCertificate != nullptr && leafCertificateLen != 0, + (result = CertificateChainValidationResult::kLeafArgumentInvalid, CHIP_ERROR_INVALID_ARGUMENT)); + + mbedtls_x509_crt_init(&certChain); + mbedtls_x509_crt_init(&rootCert); + + /* Start of chain */ + mbedResult = mbedtls_x509_crt_parse(&certChain, Uint8::to_const_uchar(leafCertificate), leafCertificateLen); + VerifyOrExit(mbedResult == 0, (result = CertificateChainValidationResult::kLeafFormatInvalid, error = CHIP_ERROR_INTERNAL)); + + /* Add the intermediate to the chain, if present */ + if (caCertificate != nullptr && caCertificateLen > 0) + { + mbedResult = mbedtls_x509_crt_parse(&certChain, Uint8::to_const_uchar(caCertificate), caCertificateLen); + VerifyOrExit(mbedResult == 0, (result = CertificateChainValidationResult::kICAFormatInvalid, error = CHIP_ERROR_INTERNAL)); + } + + /* Parse the root cert */ + mbedResult = mbedtls_x509_crt_parse(&rootCert, Uint8::to_const_uchar(rootCertificate), rootCertificateLen); + VerifyOrExit(mbedResult == 0, (result = CertificateChainValidationResult::kRootFormatInvalid, error = CHIP_ERROR_INTERNAL)); + + /* Verify the chain against the root */ + mbedResult = + mbedtls_x509_crt_verify(&certChain, &rootCert, nullptr, nullptr, &flags, CallbackForCustomValidityCheck, &certChain); + + switch (mbedResult) + { + case 0: + VerifyOrExit(flags == 0, (result = CertificateChainValidationResult::kInternalFrameworkError, error = CHIP_ERROR_INTERNAL)); + result = CertificateChainValidationResult::kSuccess; + break; + case MBEDTLS_ERR_X509_INVALID_DATE: + case MBEDTLS_ERR_X509_CERT_VERIFY_FAILED: + result = CertificateChainValidationResult::kChainInvalid; + error = CHIP_ERROR_CERT_NOT_TRUSTED; + break; + default: + SuccessOrExit((result = CertificateChainValidationResult::kInternalFrameworkError, error = CHIP_ERROR_INTERNAL)); + } + +exit: + _log_mbedTLS_error(mbedResult); + mbedtls_x509_crt_free(&certChain); + mbedtls_x509_crt_free(&rootCert); + +#else + (void) rootCertificate; + (void) rootCertificateLen; + (void) caCertificate; + (void) caCertificateLen; + (void) leafCertificate; + (void) leafCertificateLen; + (void) result; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +CHIP_ERROR IsCertificateValidAtIssuance(const ByteSpan & candidateCertificate, const ByteSpan & issuerCertificate) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + mbedtls_x509_crt mbedCandidateCertificate; + mbedtls_x509_crt mbedIssuerCertificate; + int result; + + VerifyOrReturnError(!candidateCertificate.empty() && !issuerCertificate.empty(), CHIP_ERROR_INVALID_ARGUMENT); + + mbedtls_x509_crt_init(&mbedCandidateCertificate); + mbedtls_x509_crt_init(&mbedIssuerCertificate); + + result = mbedtls_x509_crt_parse(&mbedCandidateCertificate, Uint8::to_const_uchar(candidateCertificate.data()), + candidateCertificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + result = + mbedtls_x509_crt_parse(&mbedIssuerCertificate, Uint8::to_const_uchar(issuerCertificate.data()), issuerCertificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // Verify that the candidateCertificate has a notBefore time valid within the validity period of the issuerCertificate. + SuccessOrExit(error = IsCertificateValidAtIssuance(&mbedCandidateCertificate, &mbedIssuerCertificate)); + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbedCandidateCertificate); + mbedtls_x509_crt_free(&mbedIssuerCertificate); + +#else + (void) candidateCertificate; + (void) issuerCertificate; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +CHIP_ERROR IsCertificateValidAtCurrentTime(const ByteSpan & certificate) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + mbedtls_x509_crt mbedCertificate; + int result; + + VerifyOrReturnError(!certificate.empty(), CHIP_ERROR_INVALID_ARGUMENT); + + mbedtls_x509_crt_init(&mbedCertificate); + + result = mbedtls_x509_crt_parse(&mbedCertificate, Uint8::to_const_uchar(certificate.data()), certificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // check if certificate's notBefore timestamp is earlier than or equal to current time. + result = mbedtls_x509_time_is_past(&mbedCertificate.CHIP_CRYPTO_PAL_PRIVATE_X509(valid_from)); + VerifyOrExit(result == 1, error = CHIP_ERROR_CERT_EXPIRED); + + // check if certificate's notAfter timestamp is later than current time. + result = mbedtls_x509_time_is_future(&mbedCertificate.CHIP_CRYPTO_PAL_PRIVATE_X509(valid_to)); + VerifyOrExit(result == 1, error = CHIP_ERROR_CERT_EXPIRED); + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbedCertificate); + +#else + (void) certificate; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +CHIP_ERROR ExtractPubkeyFromX509Cert(const ByteSpan & certificate, Crypto::P256PublicKey & pubkey) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + mbedtls_x509_crt mbed_cert; + mbedtls_uecc_keypair * keypair = nullptr; + size_t pubkey_size = 0; + + mbedtls_x509_crt_init(&mbed_cert); + + int result = mbedtls_x509_crt_parse(&mbed_cert, Uint8::to_const_uchar(certificate.data()), certificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + VerifyOrExit(mbedtls_pk_get_type(&(mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(pk))) == MBEDTLS_PK_ECKEY, + error = CHIP_ERROR_INVALID_ARGUMENT); + + keypair = mbedtls_pk_uecc(mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(pk)); + Uint8::to_uchar(pubkey)[0] = 0x04; // uncompressed type + memcpy(Uint8::to_uchar(pubkey) + 1, keypair->public_key, 2 * NUM_ECC_BYTES); + + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbed_cert); + +#else + (void) certificate; + (void) pubkey; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +namespace { + +CHIP_ERROR ExtractKIDFromX509Cert(bool extractSKID, const ByteSpan & certificate, MutableByteSpan & kid) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_ERROR_NOT_FOUND; + mbedtls_x509_crt mbed_cert; + unsigned char * p = nullptr; + const unsigned char * end = nullptr; + size_t len = 0; + + mbedtls_x509_crt_init(&mbed_cert); + + int result = mbedtls_x509_crt_parse(&mbed_cert, Uint8::to_const_uchar(certificate.data()), certificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + // TODO: The mbedTLS team is working on supporting SKID and AKID extensions processing. + // Once it is supported, this code should be updated. + + p = mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(v3_ext).CHIP_CRYPTO_PAL_PRIVATE_X509(p); + end = mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(v3_ext).CHIP_CRYPTO_PAL_PRIVATE_X509(p) + + mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(v3_ext).CHIP_CRYPTO_PAL_PRIVATE_X509(len); + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + + while (p < end) + { + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OID); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + + mbedtls_x509_buf extOID = { MBEDTLS_ASN1_OID, len, p }; + bool extractCurrentExtSKID = extractSKID && OID_CMP(sOID_Extension_SubjectKeyIdentifier, extOID); + bool extractCurrentExtAKID = !extractSKID && OID_CMP(sOID_Extension_AuthorityKeyIdentifier, extOID); + p += len; + + int is_critical = 0; + result = mbedtls_asn1_get_bool(&p, end, &is_critical); + VerifyOrExit(result == 0 || result == MBEDTLS_ERR_ASN1_UNEXPECTED_TAG, error = CHIP_ERROR_WRONG_CERT_TYPE); + + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + + if (extractCurrentExtSKID || extractCurrentExtAKID) + { + if (extractCurrentExtSKID) + { + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + } + else + { + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + result = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONTEXT_SPECIFIC); + VerifyOrExit(result == 0, error = CHIP_ERROR_WRONG_CERT_TYPE); + // Other optional fields, authorityCertIssuer and authorityCertSerialNumber, + // will be skipped if present. + } + VerifyOrExit(len == kSubjectKeyIdentifierLength, error = CHIP_ERROR_WRONG_CERT_TYPE); + VerifyOrExit(len <= kid.size(), error = CHIP_ERROR_BUFFER_TOO_SMALL); + memcpy(kid.data(), p, len); + if (kid.size() > len) + { + kid.reduce_size(len); + } + ExitNow(error = CHIP_NO_ERROR); + break; + } + p += len; + } + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbed_cert); + +#else + (void) certificate; + (void) kid; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +} // namespace + +CHIP_ERROR ExtractSKIDFromX509Cert(const ByteSpan & certificate, MutableByteSpan & skid) +{ + return ExtractKIDFromX509Cert(true, certificate, skid); +} + +CHIP_ERROR ExtractAKIDFromX509Cert(const ByteSpan & certificate, MutableByteSpan & akid) +{ + return ExtractKIDFromX509Cert(false, certificate, akid); +} + +CHIP_ERROR ExtractVIDPIDFromX509Cert(const ByteSpan & certificate, AttestationCertVidPid & vidpid) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + CHIP_ERROR error = CHIP_NO_ERROR; + mbedtls_x509_crt mbed_cert; + mbedtls_asn1_named_data * dnIterator = nullptr; + AttestationCertVidPid vidpidFromCN; + + mbedtls_x509_crt_init(&mbed_cert); + + int result = mbedtls_x509_crt_parse(&mbed_cert, Uint8::to_const_uchar(certificate.data()), certificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + for (dnIterator = &mbed_cert.CHIP_CRYPTO_PAL_PRIVATE_X509(subject); dnIterator != nullptr; + dnIterator = dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(next)) + { + DNAttrType attrType = DNAttrType::kUnspecified; + if (OID_CMP(sOID_AttributeType_CommonName, dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(oid))) + { + attrType = DNAttrType::kCommonName; + } + else if (OID_CMP(sOID_AttributeType_MatterVendorId, dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(oid))) + { + attrType = DNAttrType::kMatterVID; + } + else if (OID_CMP(sOID_AttributeType_MatterProductId, dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(oid))) + { + attrType = DNAttrType::kMatterPID; + } + + size_t val_len = dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(val).CHIP_CRYPTO_PAL_PRIVATE_X509(len); + uint8_t * val_p = dnIterator->CHIP_CRYPTO_PAL_PRIVATE_X509(val).CHIP_CRYPTO_PAL_PRIVATE_X509(p); + error = ExtractVIDPIDFromAttributeString(attrType, ByteSpan(val_p, val_len), vidpid, vidpidFromCN); + SuccessOrExit(error); + } + + // If Matter Attributes were not found use values extracted from the CN Attribute, + // which might be uninitialized as well. + if (!vidpid.Initialized()) + { + vidpid = vidpidFromCN; + } + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbed_cert); + +#else + (void) certificate; + (void) vidpid; + CHIP_ERROR error = CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) + + return error; +} + +namespace { +#if defined(MBEDTLS_X509_CRT_PARSE_C) +CHIP_ERROR ExtractRawSubjectFromX509Cert(const ByteSpan & certificate, MutableByteSpan & subject) +{ + CHIP_ERROR error = CHIP_NO_ERROR; + int result = 0; + uint8_t * p = nullptr; + size_t len = 0; + mbedtls_x509_crt mbedCertificate; + + ReturnErrorCodeIf(certificate.empty(), CHIP_ERROR_INVALID_ARGUMENT); + + mbedtls_x509_crt_init(&mbedCertificate); + result = mbedtls_x509_crt_parse(&mbedCertificate, Uint8::to_const_uchar(certificate.data()), certificate.size()); + VerifyOrExit(result == 0, error = CHIP_ERROR_INTERNAL); + + len = mbedCertificate.CHIP_CRYPTO_PAL_PRIVATE_X509(subject_raw).CHIP_CRYPTO_PAL_PRIVATE_X509(len); + p = mbedCertificate.CHIP_CRYPTO_PAL_PRIVATE_X509(subject_raw).CHIP_CRYPTO_PAL_PRIVATE_X509(p); + + VerifyOrExit(len <= subject.size(), error = CHIP_ERROR_BUFFER_TOO_SMALL); + memcpy(subject.data(), p, len); + subject.reduce_size(len); + +exit: + _log_mbedTLS_error(result); + mbedtls_x509_crt_free(&mbedCertificate); + + return error; +} +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) +} // namespace + +CHIP_ERROR ReplaceCertIfResignedCertFound(const ByteSpan & referenceCertificate, const ByteSpan * candidateCertificates, + size_t candidateCertificatesCount, ByteSpan & outCertificate) +{ +#if defined(MBEDTLS_X509_CRT_PARSE_C) + constexpr size_t kMaxCertificateSubjectLength = 150; + uint8_t referenceSubjectBuf[kMaxCertificateSubjectLength]; + uint8_t referenceSKIDBuf[kSubjectKeyIdentifierLength]; + MutableByteSpan referenceSubject(referenceSubjectBuf); + MutableByteSpan referenceSKID(referenceSKIDBuf); + + outCertificate = referenceCertificate; + + ReturnErrorCodeIf(candidateCertificates == nullptr || candidateCertificatesCount == 0, CHIP_NO_ERROR); + + ReturnErrorOnFailure(ExtractRawSubjectFromX509Cert(referenceCertificate, referenceSubject)); + ReturnErrorOnFailure(ExtractSKIDFromX509Cert(referenceCertificate, referenceSKID)); + + for (size_t i = 0; i < candidateCertificatesCount; i++) + { + const ByteSpan candidateCertificate = candidateCertificates[i]; + uint8_t candidateSubjectBuf[kMaxCertificateSubjectLength]; + uint8_t candidateSKIDBuf[kSubjectKeyIdentifierLength]; + MutableByteSpan candidateSubject(candidateSubjectBuf); + MutableByteSpan candidateSKID(candidateSKIDBuf); + + ReturnErrorOnFailure(ExtractRawSubjectFromX509Cert(candidateCertificate, candidateSubject)); + ReturnErrorOnFailure(ExtractSKIDFromX509Cert(candidateCertificate, candidateSKID)); + + if (referenceSKID.data_equal(candidateSKID) && referenceSubject.data_equal(candidateSubject)) + { + outCertificate = candidateCertificate; + return CHIP_NO_ERROR; + } + } + + return CHIP_NO_ERROR; +#else + (void) referenceCertificate; + (void) candidateCertificates; + (void) candidateCertificatesCount; + (void) outCertificate; + return CHIP_ERROR_NOT_IMPLEMENTED; +#endif // defined(MBEDTLS_X509_CRT_PARSE_C) +} + +} // namespace Crypto +} // namespace chip diff --git a/src/platform/silabs/SiWx917/wifi_args.gni b/src/platform/silabs/SiWx917/wifi_args.gni index 44f07f37985d88..d63fb5e1d89a2c 100644 --- a/src/platform/silabs/SiWx917/wifi_args.gni +++ b/src/platform/silabs/SiWx917/wifi_args.gni @@ -25,7 +25,7 @@ arm_platform_config = "${efr32_sdk_build_root}/efr32_arm.gni" mbedtls_target = "${efr32_sdk_build_root}:efr32_sdk" -chip_crypto = "tinycrypt" +chip_crypto = "platform" # Transitional CommissionableDataProvider not used anymore # examples/platform/efr32/EFR32DeviceDataProvider is now used. diff --git a/src/platform/webos/BLEManagerImpl.cpp b/src/platform/webos/BLEManagerImpl.cpp index 4aef02746ebc5a..3ea67b6ad1c486 100644 --- a/src/platform/webos/BLEManagerImpl.cpp +++ b/src/platform/webos/BLEManagerImpl.cpp @@ -917,31 +917,26 @@ void BLEManagerImpl::NotifyBLEPeripheralAdvStopComplete(bool aIsSuccess, void * PlatformMgr().PostEventOrDie(&event); } -void BLEManagerImpl::OnChipScanComplete() -{ - if (mBLEScanConfig.mBleScanState != BleScanState::kScanForDiscriminator && - mBLEScanConfig.mBleScanState != BleScanState::kScanForAddress) - { - ChipLogProgress(DeviceLayer, "Scan complete notification without an active scan."); - return; - } - - ChipLogError(DeviceLayer, "Scan Completed with Timeout: Notify Upstream."); - BleConnectionDelegate::OnConnectionError(mBLEScanConfig.mAppState, CHIP_ERROR_TIMEOUT); - mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; -} - void BLEManagerImpl::OnScanComplete() { - if (mBLEScanConfig.mBleScanState != BleScanState::kScanForDiscriminator && - mBLEScanConfig.mBleScanState != BleScanState::kScanForAddress) + switch (mBLEScanConfig.mBleScanState) { + case BleScanState::kNotScanning: ChipLogProgress(Ble, "Scan complete notification without an active scan."); - return; + break; + case BleScanState::kScanForAddress: + case BleScanState::kScanForDiscriminator: + mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; + ChipLogProgress(Ble, "Scan complete. No matching device found."); + break; + case BleScanState::kConnecting: + break; } +} - BleConnectionDelegate::OnConnectionError(mBLEScanConfig.mAppState, CHIP_ERROR_TIMEOUT); - mBLEScanConfig.mBleScanState = BleScanState::kNotScanning; +void BLEManagerImpl::OnScanError(CHIP_ERROR err) +{ + ChipLogDetail(Ble, "BLE scan error: %" CHIP_ERROR_FORMAT, err.Format()); } bool BLEManagerImpl::gattGetServiceCb(LSHandle * sh, LSMessage * message, void * userData) diff --git a/src/platform/webos/BLEManagerImpl.h b/src/platform/webos/BLEManagerImpl.h index a9a8a0ac88312d..2ab9a5844e4fdc 100644 --- a/src/platform/webos/BLEManagerImpl.h +++ b/src/platform/webos/BLEManagerImpl.h @@ -135,10 +135,9 @@ class BLEManagerImpl final : public BLEManager, CHIP_ERROR CancelConnection() override; // ===== Members that implement virtual methods on ChipDeviceScannerDelegate - void OnScanComplete() override; void OnChipDeviceScanned(char * address) override; - void OnChipScanComplete() override; - + void OnScanComplete() override; + void OnScanError(CHIP_ERROR err) override; // ===== Members for internal use by the following friends. friend BLEManager & BLEMgr(); diff --git a/src/platform/webos/ChipDeviceScanner.cpp b/src/platform/webos/ChipDeviceScanner.cpp index 7e3a4b71b9f0a9..c79112b820bec3 100644 --- a/src/platform/webos/ChipDeviceScanner.cpp +++ b/src/platform/webos/ChipDeviceScanner.cpp @@ -336,7 +336,7 @@ CHIP_ERROR ChipDeviceScanner::StopChipScan() ChipLogProgress(DeviceLayer, "CHIP Scanner Async Thread Quit Done..Wait for Thread Windup...!"); // Report to Impl class - mDelegate->OnChipScanComplete(); + mDelegate->OnScanComplete(); mIsScanning = false; diff --git a/src/platform/webos/ChipDeviceScanner.h b/src/platform/webos/ChipDeviceScanner.h index d3481204c642a5..084d45d9ac0201 100644 --- a/src/platform/webos/ChipDeviceScanner.h +++ b/src/platform/webos/ChipDeviceScanner.h @@ -43,8 +43,10 @@ class ChipDeviceScannerDelegate virtual void OnChipDeviceScanned(char * address) = 0; // Called when a scan was completed (stopped or timed out) - virtual void OnScanComplete() = 0; - virtual void OnChipScanComplete() = 0; + virtual void OnScanComplete() = 0; + + // Called on scan error + virtual void OnScanError(CHIP_ERROR err) = 0; }; /// Allows scanning for CHIP devices diff --git a/src/platform/webos/NetworkCommissioningWiFiDriver.cpp b/src/platform/webos/NetworkCommissioningWiFiDriver.cpp index aa3d7159b3267f..deee11b66e494d 100644 --- a/src/platform/webos/NetworkCommissioningWiFiDriver.cpp +++ b/src/platform/webos/NetworkCommissioningWiFiDriver.cpp @@ -156,7 +156,7 @@ void LinuxWiFiDriver::ConnectNetwork(ByteSpan networkId, ConnectCallback * callb VerifyOrExit(NetworkMatch(mStagingNetwork, networkId), networkingStatus = Status::kNetworkIDNotFound); - ChipLogProgress(NetworkProvisioning, "LinuxNetworkCommissioningDelegate: SSID: %s", networkId.data()); + ChipLogProgress(NetworkProvisioning, "LinuxWiFiDriver: SSID: %.*s", static_cast(networkId.size()), networkId.data()); err = ConnectivityMgrImpl().ConnectWiFiNetworkAsync(ByteSpan(mStagingNetwork.ssid, mStagingNetwork.ssidLen), ByteSpan(mStagingNetwork.credentials, mStagingNetwork.credentialsLen), diff --git a/src/protocols/secure_channel/CASEServer.cpp b/src/protocols/secure_channel/CASEServer.cpp index 221ca831eb5dc9..b0d74694a5196b 100644 --- a/src/protocols/secure_channel/CASEServer.cpp +++ b/src/protocols/secure_channel/CASEServer.cpp @@ -73,7 +73,14 @@ CHIP_ERROR CASEServer::OnUnsolicitedMessageReceived(const PayloadHeader & payloa CHIP_ERROR CASEServer::OnMessageReceived(Messaging::ExchangeContext * ec, const PayloadHeader & payloadHeader, System::PacketBufferHandle && payload) { - ChipLogProgress(Inet, "CASE Server received Sigma1 message. Starting handshake. EC %p", ec); + if (!ec->GetSessionHandle()->IsUnauthenticatedSession()) + { + ChipLogError(Inet, "CASE Server received Sigma1 message %s EC %p", "over encrypted session. Ignoring.", ec); + return CHIP_ERROR_INCORRECT_STATE; + } + + ChipLogProgress(Inet, "CASE Server received Sigma1 message %s EC %p", ". Starting handshake.", ec); + CHIP_ERROR err = InitCASEHandshake(ec); SuccessOrExit(err); diff --git a/src/protocols/secure_channel/PASESession.cpp b/src/protocols/secure_channel/PASESession.cpp index 9be51e42ed90e2..603c7ac51d1fdc 100644 --- a/src/protocols/secure_channel/PASESession.cpp +++ b/src/protocols/secure_channel/PASESession.cpp @@ -794,6 +794,13 @@ CHIP_ERROR PASESession::ValidateReceivedMessage(ExchangeContext * exchange, cons { mExchangeCtxt = exchange; } + + if (!mExchangeCtxt->GetSessionHandle()->IsUnauthenticatedSession()) + { + ChipLogError(SecureChannel, "PASESession received PBKDFParamRequest over encrypted session. Ignoring."); + return CHIP_ERROR_INCORRECT_STATE; + } + mExchangeCtxt->UseSuggestedResponseTimeout(kExpectedHighProcessingTime); VerifyOrReturnError(!msg.IsNull(), CHIP_ERROR_INVALID_ARGUMENT); diff --git a/src/python_testing/TC_ACE_1_3.py b/src/python_testing/TC_ACE_1_3.py index dd3639d7c0b476..2d81b78230fe5f 100644 --- a/src/python_testing/TC_ACE_1_3.py +++ b/src/python_testing/TC_ACE_1_3.py @@ -18,7 +18,7 @@ import logging import chip.clusters as Clusters -from chip.interaction_model import InteractionModelError, Status +from chip.interaction_model import Status from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main from mobly import asserts @@ -43,7 +43,8 @@ async def read_descriptor_expect_success(self, th): async def read_descriptor_expect_unsupported_access(self, th): cluster = Clusters.Objects.Descriptor attribute = Clusters.Descriptor.Attributes.DeviceTypeList - await self.read_single_attribute_expect_error(dev_ctrl=th, endpoint=0, cluster=cluster, attribute=attribute, error=Status.UnsupportedAccess) + await self.read_single_attribute_expect_error( + dev_ctrl=th, endpoint=0, cluster=cluster, attribute=attribute, error=Status.UnsupportedAccess) @async_test_body async def test_TC_ACE_1_3(self): @@ -60,6 +61,8 @@ async def test_TC_ACE_1_3(self): self.print_step(1, "Commissioning, already done") TH0 = self.default_controller + # _ = TH0 Hack for flake8 F841 local variable 'TH0' is assigned to but never used + _ = TH0 fabric_admin = self.certificate_authority_manager.activeCaList[0].adminList[0] TH0_nodeid = self.matter_test_config.controller_node_id @@ -78,14 +81,16 @@ async def test_TC_ACE_1_3(self): catTags=[cat1v1, cat2v2]) self.print_step(2, "TH0 writes ACL all view on PIXIT.ACE.TESTENDPOINT") - TH0_admin_acl = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH0_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0, cluster=0x001f)]) - all_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + TH0_admin_acl = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH0_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0, cluster=0x001f)]) + all_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, all_view] await self.write_acl(acl) @@ -99,10 +104,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(6, "TH0 writes ACL TH1 view on EP0") - th1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH1_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH1_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th1_view] await self.write_acl(acl) self.print_step(7, "TH1 reads EP0 descriptor - expect SUCCESS") @@ -115,10 +121,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_unsupported_access(TH3) self.print_step(10, "TH0 writes ACL TH2 view on EP0") - th2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH2_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH2_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th2_view] await self.write_acl(acl) @@ -132,10 +139,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_unsupported_access(TH3) self.print_step(14, "TH0 writes ACL TH3 view on EP0") - th3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH3_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH3_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th3_view] await self.write_acl(acl) @@ -149,10 +157,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(18, "TH0 writes ACL TH1 TH2 view on EP0") - th12_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH1_nodeid, TH2_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th12_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH1_nodeid, TH2_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th12_view] await self.write_acl(acl) @@ -166,10 +175,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_unsupported_access(TH3) self.print_step(22, "TH0 writes ACL TH1 TH3 view on EP0") - th13_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH1_nodeid, TH3_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th13_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH1_nodeid, TH3_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th13_view] await self.write_acl(acl) @@ -183,10 +193,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(26, "TH0 writes ACL TH2 TH3 view on EP0") - th23_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH2_nodeid, TH3_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th23_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH2_nodeid, TH3_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th23_view] await self.write_acl(acl) @@ -200,10 +211,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(30, "TH0 writes ACL TH1 TH2 TH3 view on EP0") - th123_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[TH1_nodeid, TH2_nodeid, TH3_nodeid], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + th123_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[TH1_nodeid, TH2_nodeid, TH3_nodeid], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, th123_view] await self.write_acl(acl) @@ -217,10 +229,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(34, "TH0 writes ACL cat1v1 view on EP0") - cat1v1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat1v1)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat1v1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat1v1)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat1v1_view] await self.write_acl(acl) @@ -234,10 +247,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(38, "TH0 writes ACL cat1v2 view on EP0") - cat1v2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat1v2)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat1v2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat1v2)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat1v2_view] await self.write_acl(acl) @@ -252,10 +266,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_unsupported_access(TH3) self.print_step(42, "TH0 writes ACL cat1v3 view on EP0") - cat1v3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat1v3)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat1v3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat1v3)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat1v3_view] await self.write_acl(acl) @@ -270,10 +285,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_unsupported_access(TH3) self.print_step(46, "TH0 writes ACL cat2v1 view on EP0") - cat2v1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat2v1)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat2v1_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat2v1)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat2v1_view] await self.write_acl(acl) @@ -288,10 +304,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(50, "TH0 writes ACL cat2v2 view on EP0") - cat2v2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat2v2)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat2v2_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat2v2)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat2v2_view] await self.write_acl(acl) @@ -306,10 +323,11 @@ async def test_TC_ACE_1_3(self): await self.read_descriptor_expect_success(TH3) self.print_step(54, "TH0 writes ACL cat2v3 view on EP0") - cat2v3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[acl_subject(cat2v3)], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) + cat2v3_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[acl_subject(cat2v3)], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0)]) acl = [TH0_admin_acl, cat2v3_view] await self.write_acl(acl) diff --git a/src/python_testing/TC_ACE_1_4.py b/src/python_testing/TC_ACE_1_4.py index c7e179ef2300ab..8091ee38926b32 100644 --- a/src/python_testing/TC_ACE_1_4.py +++ b/src/python_testing/TC_ACE_1_4.py @@ -15,12 +15,10 @@ # limitations under the License. # -import logging import sys import chip.clusters as Clusters -import chip.clusters.Objects -from chip.interaction_model import InteractionModelError, Status +from chip.interaction_model import Status from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main from mobly import asserts @@ -53,13 +51,15 @@ async def read_descriptor_expect_success(self, endpoint: int) -> None: async def read_descriptor_expect_unsupported_access(self, endpoint: int) -> None: cluster = Clusters.Objects.Descriptor attribute = Clusters.Descriptor.Attributes.DeviceTypeList - await self.read_single_attribute_expect_error(endpoint=endpoint, cluster=cluster, attribute=attribute, error=Status.UnsupportedAccess) + await self.read_single_attribute_expect_error( + endpoint=endpoint, cluster=cluster, attribute=attribute, error=Status.UnsupportedAccess) async def read_appcluster_expect_success(self) -> None: await self.read_single_attribute_check_success(endpoint=self.endpoint, cluster=self.cluster, attribute=self.attribute) async def read_appcluster_expect_unsupported_access(self) -> None: - await self.read_single_attribute_expect_error(endpoint=self.endpoint, cluster=self.cluster, attribute=self.attribute, error=Status.UnsupportedAccess) + await self.read_single_attribute_expect_error( + endpoint=self.endpoint, cluster=self.cluster, attribute=self.attribute, error=Status.UnsupportedAccess) async def read_wildcard_endpoint(self, attribute: object) -> object: return await self.default_controller.ReadAttribute(self.dut_node_id, [(attribute)]) @@ -80,13 +80,17 @@ def check_read_success(self, results: object, endpoint: int, cluster: object, at async def test_TC_ACE_1_4(self): # TODO: Guard these on the PICS asserts.assert_true('PIXIT.ACE.APPENDPOINT' in self.matter_test_config.global_test_params, - "PIXIT.ACE.APPENDPOINT must be included on the command line in the --int-arg flag as PIXIT.ACE.APPENDPOINT:") + "PIXIT.ACE.APPENDPOINT must be included on the command line in " + "the --int-arg flag as PIXIT.ACE.APPENDPOINT:") asserts.assert_true('PIXIT.ACE.APPCLUSTER' in self.matter_test_config.global_test_params, - "PIXIT.ACE.APPCLUSTER must be included on the command line in the --string-arg flag as PIXIT.ACE.APPCLUSTER:") + "PIXIT.ACE.APPCLUSTER must be included on the command line in " + "the --string-arg flag as PIXIT.ACE.APPCLUSTER:") asserts.assert_true('PIXIT.ACE.APPATTRIBUTE' in self.matter_test_config.global_test_params, - "PIXIT.ACE.APPATTRIBUTE must be included on the command line in the --string-arg flag as PIXIT.ACE.APPATTRIBUTE:") + "PIXIT.ACE.APPATTRIBUTE must be included on the command line in " + "the --string-arg flag as PIXIT.ACE.APPATTRIBUTE:") asserts.assert_true('PIXIT.ACE.APPDEVTYPEID' in self.matter_test_config.global_test_params, - "PIXIT.ACE.APPDEVTYPEID must be included on the command line in the --int-arg flag as PIXIT.ACE.APPDEVTYPEID:") + "PIXIT.ACE.APPDEVTYPEID must be included on the command line in " + "the --int-arg flag as PIXIT.ACE.APPDEVTYPEID:") cluster_str = self.matter_test_config.global_test_params['PIXIT.ACE.APPCLUSTER'] attribute_str = self.matter_test_config.global_test_params['PIXIT.ACE.APPATTRIBUTE'] @@ -102,14 +106,16 @@ async def test_TC_ACE_1_4(self): self.print_step(1, "Commissioning, already done") self.print_step(2, "TH1 writes ACL all clusters view on all endpoints") - admin_acl = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[], - targets=[Clusters.AccessControl.Structs.Target(endpoint=0, cluster=Clusters.AccessControl.id)]) - all_view = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=[], - targets=[]) + admin_acl = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[], + targets=[Clusters.AccessControl.Structs.Target(endpoint=0, cluster=Clusters.AccessControl.id)]) + all_view = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=[], + targets=[]) acl = [admin_acl, all_view] await self.write_acl(acl) diff --git a/src/python_testing/TC_CGEN_2_4.py b/src/python_testing/TC_CGEN_2_4.py index ae9f912798b6a2..9240caff0e1afe 100644 --- a/src/python_testing/TC_CGEN_2_4.py +++ b/src/python_testing/TC_CGEN_2_4.py @@ -15,19 +15,13 @@ # limitations under the License. # -import asyncio import logging -import queue import time -from threading import Event import chip.CertificateAuthority import chip.clusters as Clusters import chip.FabricAdmin from chip import ChipDeviceCtrl -from chip.clusters.Attribute import SubscriptionTransaction, TypedAttributePath -from chip.interaction_model import InteractionModelError -from chip.utils import CommissioningBuildingBlocks from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main from mobly import asserts @@ -45,15 +39,18 @@ def OpenCommissioningWindow(self) -> int: logging.exception('Error running OpenCommissioningWindow %s', e) asserts.assert_true(False, 'Failed to open commissioning window') - async def CommissionToStageSendCompleteAndCleanup(self, stage: int, expectedErrorPart: chip.native.ErrorSDKPart, expectedErrCode: int): + async def CommissionToStageSendCompleteAndCleanup( + self, stage: int, expectedErrorPart: chip.native.ErrorSDKPart, expectedErrCode: int): logging.info("-----------------Fail on step {}-------------------------".format(stage)) pin, code = self.OpenCommissioningWindow() self.th2.ResetTestCommissioner() - # This will run the commissioning up to the point where stage x is run and the response is sent before the test commissioner simulates a failure + # This will run the commissioning up to the point where stage x is run and the + # response is sent before the test commissioner simulates a failure self.th2.SetTestCommissionerPrematureCompleteAfter(stage) success, errcode = self.th2.CommissionOnNetwork( - nodeId=self.dut_node_id, setupPinCode=pin, filterType=ChipDeviceCtrl.DiscoveryFilterType.LONG_DISCRIMINATOR, filter=self.matter_test_config.discriminator) + nodeId=self.dut_node_id, setupPinCode=pin, + filterType=ChipDeviceCtrl.DiscoveryFilterType.LONG_DISCRIMINATOR, filter=self.matter_test_config.discriminator) logging.info('Commissioning complete done. Successful? {}, errorcode = {}'.format(success, errcode)) asserts.assert_false(success, 'Commissioning complete did not error as expected') asserts.assert_true(errcode.sdk_part == expectedErrorPart, 'Unexpected error type returned from CommissioningComplete') @@ -92,7 +89,8 @@ async def test_TC_CGEN_2_4(self): logging.info('Step 16 - TH2 fully commissions the DUT') self.th2.ResetTestCommissioner() success, errcode = self.th2.CommissionOnNetwork( - nodeId=self.dut_node_id, setupPinCode=pin, filterType=ChipDeviceCtrl.DiscoveryFilterType.LONG_DISCRIMINATOR, filter=self.matter_test_config.discriminator) + nodeId=self.dut_node_id, setupPinCode=pin, + filterType=ChipDeviceCtrl.DiscoveryFilterType.LONG_DISCRIMINATOR, filter=self.matter_test_config.discriminator) logging.info('Commissioning complete done. Successful? {}, errorcode = {}'.format(success, errcode)) logging.info('Step 17 - TH1 sends an arm failsafe') @@ -109,11 +107,15 @@ async def test_TC_CGEN_2_4(self): newloc = Clusters.GeneralCommissioning.Enums.RegulatoryLocationType.kIndoor else: # TODO: figure out how to use the extender - #newloc = MatterIntEnum.extend_enum_if_value_doesnt_exist(Clusters.GeneralCommissioning.Enums.RegulatoryLocationType, 3) - newlog = cap + # newloc = MatterIntEnum.extend_enum_if_value_doesnt_exist( + # Clusters.GeneralCommissioning.Enums.RegulatoryLocationType, 3) + newloc = cap + + _ = newloc logging.info('Step 19 Send SetRgulatoryConfig with incorrect location') - #cmd = Clusters.GeneralCommissioning.Commands.SetRegulatoryConfig(newRegulatoryConfig=newloc, countryCode="XX", breadcrumb=0) + # cmd = Clusters.GeneralCommissioning.Commands.SetRegulatoryConfig( + # newRegulatoryConfig=newloc, countryCode="XX", breadcrumb=0) # try: # await self.th1.SendCommand(nodeid=self.dut_node_id, endpoint=0, payload=cmd) # except InteractionModelError as ex: diff --git a/src/python_testing/TC_DA_1_7.py b/src/python_testing/TC_DA_1_7.py index b16faccbd339bc..4b4dd66aa65866 100644 --- a/src/python_testing/TC_DA_1_7.py +++ b/src/python_testing/TC_DA_1_7.py @@ -21,7 +21,6 @@ from typing import Optional import chip.clusters as Clusters -from chip.interaction_model import Status from cryptography.exceptions import InvalidSignature from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import ec @@ -79,13 +78,15 @@ async def test_TC_DA_1_7(self): dev_ctrl = self.default_controller logging.info("Step 2: Get PAI of DUT1 with certificate chain request") - result = await dev_ctrl.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.CertificateChainRequest(2)) + result = await dev_ctrl.SendCommand(self.dut_node_id, 0, + Clusters.OperationalCredentials.Commands.CertificateChainRequest(2)) pai_1 = result.certificate asserts.assert_less_equal(len(pai_1), 600, "PAI cert must be at most 600 bytes") self.record_data({"pai_1": hex_from_bytes(pai_1)}) logging.info("Step 3: Get DAC of DUT1 with certificate chain request") - result = await dev_ctrl.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.CertificateChainRequest(1)) + result = await dev_ctrl.SendCommand(self.dut_node_id, 0, + Clusters.OperationalCredentials.Commands.CertificateChainRequest(1)) dac_1 = result.certificate asserts.assert_less_equal(len(dac_1), 600, "DAC cert must be at most 600 bytes") self.record_data({"dac_1": hex_from_bytes(dac_1)}) diff --git a/src/python_testing/TC_RR_1_1.py b/src/python_testing/TC_RR_1_1.py index 754b9dbf62a819..4a42498eb0146c 100644 --- a/src/python_testing/TC_RR_1_1.py +++ b/src/python_testing/TC_RR_1_1.py @@ -21,16 +21,9 @@ import queue import random import time -from binascii import hexlify -from threading import Event from typing import Any, Dict, List, Set -import chip.CertificateAuthority import chip.clusters as Clusters -import chip.FabricAdmin -from chip import ChipDeviceCtrl -from chip.clusters.Attribute import AttributeStatus, SubscriptionTransaction, TypedAttributePath -from chip.clusters.Types import NullValue from chip.interaction_model import Status as StatusEnum from chip.utils import CommissioningBuildingBlocks from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main @@ -113,7 +106,10 @@ async def test_TC_RR_1_1(self): # TODO: Shall we also verify SupportedFabrics attribute, and the CapabilityMinima attribute? logging.info("Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3") - capability_minima = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.BasicInformation.Attributes.CapabilityMinima) + capability_minima = await self.read_single_attribute(dev_ctrl, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.BasicInformation.Attributes.CapabilityMinima) asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3) # Step 1: Commission 5 fabrics with maximized NOC chains. 1a and 1b have already been completed at this time. @@ -127,13 +123,21 @@ async def test_TC_RR_1_1(self): client_list.append(dev_ctrl) if num_controllers_per_fabric > 1: - new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=dev_ctrl.fabricAdmin, adminDevCtrl=dev_ctrl, controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=self.dut_node_id, catTags=[0x0001_0001]) + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=dev_ctrl.fabricAdmin, + adminDevCtrl=dev_ctrl, + controllerNodeIds=node_ids, + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=self.dut_node_id, catTags=[0x0001_0001] + ) for controller in new_controllers: controller.name = all_names.pop(0) client_list.extend(new_controllers) # Step 1c - Ensure there are no leftover fabrics from another process. - commissioned_fabric_count: int = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) + commissioned_fabric_count: int = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) # Insert a fabric to self-test the next step. # This is not hidden behind a flag to avoid potential undetected bugs. @@ -144,23 +148,32 @@ async def test_TC_RR_1_1(self): new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001]) new_admin_ctrl.name = "THTF" - await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) + await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting( + commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, + existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) - commissioned_fabric_count = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) + commissioned_fabric_count = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) asserts.assert_not_equal(commissioned_fabric_count, 1, "TH Error: failed to add fabric for testing TH.") # Step 1c - perform removal. if commissioned_fabric_count > 1: logging.info("Removing extra fabrics from device.") - fabrics: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) + fabrics: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, endpoint=0, + attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) for fabric in fabrics: if fabric.fabricID == dev_ctrl.fabricId: continue # This is not the initial client's fabric, so remove it. - await dev_ctrl.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.RemoveFabric(fabricIndex=fabric.fabricIndex)) + await dev_ctrl.SendCommand( + self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.RemoveFabric(fabricIndex=fabric.fabricIndex)) - commissioned_fabric_count = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) + commissioned_fabric_count = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) asserts.assert_equal(commissioned_fabric_count, 1, "Failed to remove extra fabrics from DUT.") # Prepare clients for subsequent fabrics (step 1d) @@ -173,11 +186,20 @@ async def test_TC_RR_1_1(self): new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001]) new_admin_ctrl.name = all_names.pop(0) client_list.append(new_admin_ctrl) - await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) + await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, + newFabricDevCtrl=new_admin_ctrl, + existingNodeId=self.dut_node_id, + newNodeId=self.dut_node_id) if num_controllers_per_fabric > 1: - new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=new_fabric_admin, adminDevCtrl=new_admin_ctrl, - controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=self.dut_node_id, catTags=[0x0001_0001]) + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=new_fabric_admin, + adminDevCtrl=new_admin_ctrl, + controllerNodeIds=node_ids, + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=self.dut_node_id, + catTags=[0x0001_0001] + ) for controller in new_controllers: controller.name = all_names.pop(0) @@ -186,10 +208,14 @@ async def test_TC_RR_1_1(self): asserts.assert_equal(len(client_list), num_fabrics_to_commission * num_controllers_per_fabric, "Must have the right number of clients") - commissioned_fabric_count = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) + commissioned_fabric_count = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) asserts.assert_equal(commissioned_fabric_count, num_fabrics_to_commission, "Must have the right number of fabrics commissioned.") - fabric_table: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) + fabric_table: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) client_by_name = {client.name: client for client in client_list} local_session_id_by_client_name = {client.name: client.GetConnectedDeviceSync( @@ -211,15 +237,22 @@ async def test_TC_RR_1_1(self): await client.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel(label)) # Read back - fabric_metadata = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics) + fabric_metadata = await self.read_single_attribute(client, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.OperationalCredentials.Attributes.Fabrics) print(fabric_metadata) asserts.assert_equal(fabric_metadata[0].label, label, "Fabrics[x].label must match what was written") # Before subscribing, set the NodeLabel to "Before Subscriptions" logging.info(f"Step 2b: Set BasicInformation.NodeLabel to {BEFORE_LABEL}") - await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=BEFORE_LABEL))]) + await client_list[0].WriteAttribute(self.dut_node_id, + [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=BEFORE_LABEL))]) - node_label = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.BasicInformation.Attributes.NodeLabel) + node_label = await self.read_single_attribute(client, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.BasicInformation.Attributes.NodeLabel) asserts.assert_equal(node_label, BEFORE_LABEL, "NodeLabel must match what was written") # Step 3: Add 4 Access Control entries on DUT with a list of 4 Subjects and 3 Targets with the following parameters (...) @@ -236,7 +269,8 @@ async def test_TC_RR_1_1(self): await client.WriteAttribute(self.dut_node_id, [(0, Clusters.AccessControl.Attributes.Acl(acl))]) logging.info(f"Step 3b: Validating ACL entry for fabric {fabric.fabricIndex}") - acl_readback = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.AccessControl.Attributes.Acl) + acl_readback = await self.read_single_attribute( + client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.AccessControl.Attributes.Acl) fabric_index = 9999 for entry in acl_readback: asserts.assert_equal(entry.fabricIndex, fabric.fabricIndex, "Fabric Index of response entries must match") @@ -264,8 +298,12 @@ async def test_TC_RR_1_1(self): for sub_idx, client in enumerate(client_list): logging.info("Establishing subscription %d/%d from controller node %s" % (sub_idx + 1, len(client_list), client.name)) - sub = await client.ReadAttribute(nodeid=self.dut_node_id, attributes=subscription_contents, - reportInterval=(min_report_interval_sec, max_report_interval_sec), keepSubscriptions=False) + sub = await client.ReadAttribute( + nodeid=self.dut_node_id, + attributes=subscription_contents, + reportInterval=(min_report_interval_sec, max_report_interval_sec), + keepSubscriptions=False + ) self._subscriptions.append(sub) attribute_handler = AttributeChangeAccumulator( @@ -308,7 +346,8 @@ async def test_TC_RR_1_1(self): logging.info( "Step 7: Change attribute with one client, await all attributes changed successfully without loss of subscriptions") await asyncio.sleep(1) - await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=AFTER_LABEL))]) + await client_list[0].WriteAttribute(self.dut_node_id, + [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=AFTER_LABEL))]) all_changes = {client.name: False for client in client_list} @@ -366,11 +405,16 @@ async def test_TC_RR_1_1(self): for sub_idx, client in enumerate(client_list): logging.info("Reading NodeLabel (%d/%d) from controller node %s" % (sub_idx + 1, len(client_list), client.name)) - label_readback = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.BasicInformation.Attributes.NodeLabel) + label_readback = await self.read_single_attribute(client, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.BasicInformation.Attributes.NodeLabel) asserts.assert_equal(label_readback, AFTER_LABEL) - # On each client, read back the local session id for the CASE session to the DUT and ensure it's the same as that of the session established right at the - # beginning of the test. In tandem with checking that the number of sessions to the DUT is exactly one, this ensures we have not established any new CASE + # On each client, read back the local session id for the CASE session to the DUT and ensure + # it's the same as that of the session established right at the beginning of the test. + # In tandem with checking that the number of sessions to the DUT is exactly one, + # this ensures we have not established any new CASE # sessions in this test. if check_local_session_id_unchanged: logging.info("Step 8b: Validate that the local CASE session ID hasn't changed") @@ -383,7 +427,8 @@ async def test_TC_RR_1_1(self): if (beginning_session_id != end_session_id): logging.error( - f"Test ended with a different session ID created from what we had before for {client.name} (total sessions = {total_sessions})") + f"Test ended with a different session ID created from what we had before for {client.name} " + f"(total sessions = {total_sessions})") num_failed_clients = num_failed_clients + 1 elif (total_sessions != 1): logging.error(f"Test ended with more than 1 session for {client.name}") @@ -406,20 +451,22 @@ async def test_TC_RR_1_1(self): # The test for Step 10 and all of Steps 11 to 14 are only performed if Groups cluster instances are found. if counted_groups_clusters > 0: - indicated_max_groups_per_fabric: int = await self.read_single_attribute(dev_ctrl, - node_id=self.dut_node_id, - endpoint=0, - attribute=Clusters.GroupKeyManagement.Attributes.MaxGroupsPerFabric) + indicated_max_groups_per_fabric: int = await self.read_single_attribute( + dev_ctrl, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.GroupKeyManagement.Attributes.MaxGroupsPerFabric) if indicated_max_groups_per_fabric < 4 * counted_groups_clusters: - asserts.fail(f"Failed Step 10: MaxGroupsPerFabric < 4 * counted_groups_clusters") + asserts.fail("Failed Step 10: MaxGroupsPerFabric < 4 * counted_groups_clusters") # Step 11: Confirm MaxGroupKeysPerFabric meets the minimum requirement of 3. - indicated_max_group_keys_per_fabric: int = await self.read_single_attribute(dev_ctrl, - node_id=self.dut_node_id, - endpoint=0, - attribute=Clusters.GroupKeyManagement.Attributes.MaxGroupKeysPerFabric) + indicated_max_group_keys_per_fabric: int = await self.read_single_attribute( + dev_ctrl, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.GroupKeyManagement.Attributes.MaxGroupKeysPerFabric) if indicated_max_group_keys_per_fabric < 3: - asserts.fail(f"Failed Step 11: MaxGroupKeysPerFabric < 3") + asserts.fail("Failed Step 11: MaxGroupKeysPerFabric < 3") # Create a list of per-fabric clients to use for filling group resources accross all fabrics. fabric_unique_clients: List[Any] = [] @@ -431,7 +478,8 @@ async def test_TC_RR_1_1(self): fabric_unique_clients.append(client_by_name[client_name]) # Step 12: Write and verify indicated_max_group_keys_per_fabric group keys to all fabrics. - group_keys: List[List[Clusters.GroupKeyManagement.Structs.GroupKeySetStruct]] = await self.fill_and_validate_group_key_sets( + group_keys: List[List[ + Clusters.GroupKeyManagement.Structs.GroupKeySetStruct]] = await self.fill_and_validate_group_key_sets( num_fabrics_to_commission, fabric_unique_clients, indicated_max_group_keys_per_fabric) # Step 13: Write and verify indicated_max_groups_per_fabric group/key mappings for all fabrics. @@ -443,11 +491,14 @@ async def test_TC_RR_1_1(self): group_key_idx: int = group_idx % len(group_keys[fabric_list_idx]) group_key_map[fabric_list_idx][group_id] = group_keys[fabric_list_idx][group_key_idx].groupKeySetID - await self.fill_and_validate_group_key_map(num_fabrics_to_commission, fabric_unique_clients, group_key_map, fabric_table) + await self.fill_and_validate_group_key_map( + num_fabrics_to_commission, fabric_unique_clients, group_key_map, fabric_table) # Step 14: Add all the groups to the discovered groups-supporting endpoints and verify GroupTable - group_table_written: List[Dict[int, Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct]] = await self.add_all_groups( - num_fabrics_to_commission, fabric_unique_clients, group_key_map, groups_cluster_endpoints, indicated_max_groups_per_fabric, fabric_table) + group_table_written: List[ + Dict[int, Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct]] = await self.add_all_groups( + num_fabrics_to_commission, fabric_unique_clients, group_key_map, + groups_cluster_endpoints, indicated_max_groups_per_fabric, fabric_table) await self.validate_group_table(num_fabrics_to_commission, fabric_unique_clients, group_table_written, fabric_table) # Read heap watermarks after the test @@ -476,11 +527,15 @@ async def fill_user_label_list(self, dev_ctrl, target_node_id): for cluster in clusters: if cluster == Clusters.UserLabel: logging.info("Step 9a: Filling UserLabel cluster on endpoint %d" % endpoint_id) - statuses = await dev_ctrl.WriteAttribute(target_node_id, [(endpoint_id, Clusters.UserLabel.Attributes.LabelList(labels))]) + statuses = await dev_ctrl.WriteAttribute(target_node_id, + [(endpoint_id, Clusters.UserLabel.Attributes.LabelList(labels))]) asserts.assert_equal(statuses[0].Status, StatusEnum.Success, "Label write must succeed") logging.info("Step 9b: Validate UserLabel cluster contents after write on endpoint %d" % endpoint_id) - read_back_labels = await self.read_single_attribute(dev_ctrl, node_id=target_node_id, endpoint=endpoint_id, attribute=Clusters.UserLabel.Attributes.LabelList) + read_back_labels = await self.read_single_attribute(dev_ctrl, + node_id=target_node_id, + endpoint=endpoint_id, + attribute=Clusters.UserLabel.Attributes.LabelList) print(read_back_labels) asserts.assert_equal(read_back_labels, labels, "LabelList attribute must match what was written") @@ -488,7 +543,8 @@ async def fill_user_label_list(self, dev_ctrl, target_node_id): async def fill_and_validate_group_key_sets(self, fabrics: int, clients: List[Any], - keys_per_fabric: int) -> List[List[Clusters.GroupKeyManagement.Structs.GroupKeySetStruct]]: + keys_per_fabric: int) -> List[List[ + Clusters.GroupKeyManagement.Structs.GroupKeySetStruct]]: # Step 12: Write indicated_max_group_keys_per_fabric group keys to all fabrics. group_keys: List[List[Clusters.GroupKeyManagement.Structs.GroupKeySetStruct]] = [[] for _ in range(fabrics)] for client_idx in range(fabrics): @@ -500,7 +556,8 @@ async def fill_and_validate_group_key_sets(self, logging.info("Step 12: Setting group key on fabric %d at index '%d'" % (client_idx+1, group_key_cluster_idx)) group_keys[client_idx].append(self.build_group_key(client_idx, group_key_cluster_idx, keys_per_fabric)) - await client.SendCommand(self.dut_node_id, 0, Clusters.GroupKeyManagement.Commands.KeySetWrite(group_keys[client_idx][group_key_list_idx])) + await client.SendCommand(self.dut_node_id, 0, Clusters.GroupKeyManagement.Commands.KeySetWrite( + group_keys[client_idx][group_key_list_idx])) # Step 12 verification: After all the key sets were written, read all the information back. for client_idx in range(fabrics): @@ -516,7 +573,8 @@ async def fill_and_validate_group_key_sets(self, ipk_group_key_id: Set[int] = set(read_group_key_ids) - set(known_group_key_ids) asserts.assert_equal(keys_per_fabric, len(read_group_key_ids), - "KeySetReadAllIndicesResponse length does not match the key support indicated: %d." % (keys_per_fabric)) + "KeySetReadAllIndicesResponse length does " + "not match the key support indicated: %d." % (keys_per_fabric)) asserts.assert_equal(len(ipk_group_key_id), 1, "Read more than 1 key ID that did not match written values after IPK (only expected 1 for IPK).") @@ -527,7 +585,8 @@ async def fill_and_validate_group_key_map(self, fabrics: int, clients: List[Any], group_key_map: List[Dict[int, int]], - fabric_table: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct]) -> None: + fabric_table: List[ + Clusters.OperationalCredentials.Structs.FabricDescriptorStruct]) -> None: # Step 13: Write and verify indicated_max_groups_per_fabric group/key mappings for all fabrics. mapping_structs: List[List[Clusters.GroupKeyManagement.Structs.GroupKeyMapStruct]] = [[] for _ in range(fabrics)] for client_idx in range(fabrics): @@ -535,12 +594,14 @@ async def fill_and_validate_group_key_map(self, fabric_idx: int = fabric_table[client_idx].fabricIndex for group in group_key_map[client_idx]: - mapping_structs[client_idx].append(Clusters.GroupKeyManagement.Structs.GroupKeyMapStruct(groupId=group, - groupKeySetID=group_key_map[client_idx][group], - fabricIndex=fabric_idx)) + mapping_structs[client_idx].append(Clusters.GroupKeyManagement.Structs.GroupKeyMapStruct( + groupId=group, + groupKeySetID=group_key_map[client_idx][group], + fabricIndex=fabric_idx)) logging.info("Step 13: Setting group key map on fabric %d" % (fabric_idx)) - await client.WriteAttribute(self.dut_node_id, [(0, Clusters.GroupKeyManagement.Attributes.GroupKeyMap(mapping_structs[client_idx]))]) + await client.WriteAttribute( + self.dut_node_id, [(0, Clusters.GroupKeyManagement.Attributes.GroupKeyMap(mapping_structs[client_idx]))]) # Step 13 verification: After all the group key maps were written, read all the information back. for client_idx in range(fabrics): @@ -548,7 +609,8 @@ async def fill_and_validate_group_key_map(self, fabric_idx: int = fabric_table[client_idx].fabricIndex logging.info("Step 13: Reading group key map on fabric %d" % (fabric_idx)) - group_key_map_readback = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.GroupKeyManagement.Attributes.GroupKeyMap) + group_key_map_readback = await self.read_single_attribute( + client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.GroupKeyManagement.Attributes.GroupKeyMap) found_entry: int = 0 for read_entry in group_key_map_readback: @@ -570,7 +632,9 @@ async def add_all_groups(self, group_key_map: List[Dict[int, int]], group_endpoints: Dict[int, Any], groups_per_fabric: int, - fabric_table: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct]) -> List[Dict[int, Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct]]: + fabric_table: List[ + Clusters.OperationalCredentials.Structs.FabricDescriptorStruct]) -> List[ + Dict[int, Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct]]: # Step 14: Add indicated_max_groups_per_fabric to each fabric through the Groups clusters on supporting endpoints. written_group_table_map: List[Dict[int, Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct]] = [ {} for _ in range(fabrics)] @@ -598,12 +662,13 @@ async def add_all_groups(self, group_name: str = self.random_string(16) if name_supported else "" command: Clusters.Groups.Commands.AddGroup = Clusters.Groups.Commands.AddGroup( groupID=group_id, groupName=group_name) - written_group_table_map[client_idx][group_id] = Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct(groupId=group_id, - groupName=group_name, - fabricIndex=fabric_idx, - endpoints=[endpoint_id]) - add_response: Clusters.Groups.Commands.AddGroupResponse = await client.SendCommand(self.dut_node_id, endpoint_id, command, - responseType=Clusters.Groups.Commands.AddGroupResponse) + written_group_table_map[client_idx][group_id] = Clusters.GroupKeyManagement.Structs.GroupInfoMapStruct( + groupId=group_id, + groupName=group_name, + fabricIndex=fabric_idx, + endpoints=[endpoint_id]) + add_response: Clusters.Groups.Commands.AddGroupResponse = await client.SendCommand( + self.dut_node_id, endpoint_id, command, responseType=Clusters.Groups.Commands.AddGroupResponse) asserts.assert_equal(StatusEnum.Success, add_response.status) asserts.assert_equal(group_id, add_response.groupID) @@ -650,22 +715,34 @@ def build_acl(self): # - Privilege field: Administer (5) # - AuthMode field: CASE (2) # - Subjects field: [0xFFFF_FFFD_0001_0001, 0x2000_0000_0000_0001, 0x2000_0000_0000_0002, 0x2000_0000_0000_0003] - # - Targets field: [{Endpoint: 0}, {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC30}, {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC31}] + # - Targets field: [ + # {Endpoint: 0}, + # {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC30}, + # {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC31} + # ] # . struct # - Privilege field: Manage (4) # - AuthMode field: CASE (2) # - Subjects field: [0x1000_0000_0000_0001, 0x1000_0000_0000_0002, 0x1000_0000_0000_0003, 0x1000_0000_0000_0004] - # - Targets field: [{Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC01, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC22}] + # - Targets field: [ + # {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC20}, + # {Cluster: 0xFFF1_FC01, DeviceType: 0xFFF1_FC21}, + # {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC22} + # ] # . struct # - Privilege field: Operate (3) # - AuthMode field: CASE (2) # - Subjects field: [0x3000_0000_0000_0001, 0x3000_0000_0000_0002, 0x3000_0000_0000_0003, 0x3000_0000_0000_0004] - # - Targets field: [{Cluster: 0xFFF1_FC40, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC41, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC42}] + # - Targets field: [{Cluster: 0xFFF1_FC40, DeviceType: 0xFFF1_FC20}, + # {Cluster: 0xFFF1_FC41, DeviceType: 0xFFF1_FC21}, + # {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC42}] # . struct # - Privilege field: View (1) # - AuthMode field: CASE (2) # - Subjects field: [0x4000_0000_0000_0001, 0x4000_0000_0000_0002, 0x4000_0000_0000_0003, 0x4000_0000_0000_0004] - # - Targets field: [{Cluster: 0xFFF1_FC80, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC81, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC82, DeviceType: 0xFFF1_FC22}] + # - Targets field: [{Cluster: 0xFFF1_FC80, DeviceType: 0xFFF1_FC20}, + # {Cluster: 0xFFF1_FC81, DeviceType: 0xFFF1_FC21}, + # {Cluster: 0xFFF1_FC82, DeviceType: 0xFFF1_FC22}] # Administer ACL entry admin_subjects = [0xFFFF_FFFD_0001_0001, 0x2000_0000_0000_0001, 0x2000_0000_0000_0002, 0x2000_0000_0000_0003] @@ -675,10 +752,12 @@ def build_acl(self): Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC00, deviceType=0xFFF1_BC30), Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC01, deviceType=0xFFF1_BC31) ] - admin_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=admin_subjects, - targets=admin_targets) + admin_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=admin_subjects, + targets=admin_targets + ) acl.append(admin_acl_entry) # Manage ACL entry @@ -689,10 +768,12 @@ def build_acl(self): Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC02, deviceType=0xFFF1_BC22) ] - manage_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kManage, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=manage_subjects, - targets=manage_targets) + manage_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kManage, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=manage_subjects, + targets=manage_targets + ) acl.append(manage_acl_entry) # Operate ACL entry @@ -703,10 +784,12 @@ def build_acl(self): Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC42, deviceType=0xFFF1_BC42) ] - operate_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kOperate, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=operate_subjects, - targets=operate_targets) + operate_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kOperate, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=operate_subjects, + targets=operate_targets + ) acl.append(operate_acl_entry) # View ACL entry @@ -717,15 +800,17 @@ def build_acl(self): Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC82, deviceType=0xFFF1_BC22) ] - view_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct(privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, - authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, - subjects=view_subjects, - targets=view_targets) + view_acl_entry = Clusters.AccessControl.Structs.AccessControlEntryStruct( + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kView, + authMode=Clusters.AccessControl.Enums.AccessControlEntryAuthModeEnum.kCase, + subjects=view_subjects, + targets=view_targets) acl.append(view_acl_entry) return acl - def build_group_key(self, fabric_index: int, group_key_index: int, keys_per_fabric: int) -> Clusters.GroupKeyManagement.Structs.GroupKeySetStruct: + def build_group_key(self, fabric_index: int, + group_key_index: int, keys_per_fabric: int) -> Clusters.GroupKeyManagement.Structs.GroupKeySetStruct: asserts.assert_not_equal(group_key_index, 0, "TH Internal Error: IPK key set index (0) should not be re-generated.") # groupKeySetID is definted as uint16 in the Matter specification. @@ -735,14 +820,15 @@ def build_group_key(self, fabric_index: int, group_key_index: int, keys_per_fabr set_id: int = fabric_index*keys_per_fabric + group_key_index asserts.assert_less_equal( set_id, 0xFFFF, "Invalid Key Set ID. This may be a limitation of the test harness, not the device under test.") - return Clusters.GroupKeyManagement.Structs.GroupKeySetStruct(groupKeySetID=set_id, - groupKeySecurityPolicy=Clusters.GroupKeyManagement.Enums.GroupKeySecurityPolicyEnum.kTrustFirst, - epochKey0=self.random_string(16).encode(), - epochStartTime0=(set_id * 4), - epochKey1=self.random_string(16).encode(), - epochStartTime1=(set_id * 4 + 1), - epochKey2=self.random_string(16).encode(), - epochStartTime2=(set_id * 4 + 2)) + return Clusters.GroupKeyManagement.Structs.GroupKeySetStruct( + groupKeySetID=set_id, + groupKeySecurityPolicy=Clusters.GroupKeyManagement.Enums.GroupKeySecurityPolicyEnum.kTrustFirst, + epochKey0=self.random_string(16).encode(), + epochStartTime0=(set_id * 4), + epochKey1=self.random_string(16).encode(), + epochStartTime1=(set_id * 4 + 1), + epochKey2=self.random_string(16).encode(), + epochStartTime2=(set_id * 4 + 2)) async def read_heap_statistics(self, dev_ctrl): diagnostics_contents = [ @@ -759,8 +845,10 @@ async def read_heap_statistics(self, dev_ctrl): for attribute in diagnostics_contents: asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics], "Must have read back attribute %s" % (attribute.__name__)) - high_watermark = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark] - current_usage = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed] + high_watermark = swdiag_info[0][Clusters.SoftwareDiagnostics][ + Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark] + current_usage = swdiag_info[0][Clusters.SoftwareDiagnostics][ + Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed] return high_watermark, current_usage diff --git a/src/python_testing/TC_SC_3_6.py b/src/python_testing/TC_SC_3_6.py index 55d34baffee24b..a6994cbf288539 100644 --- a/src/python_testing/TC_SC_3_6.py +++ b/src/python_testing/TC_SC_3_6.py @@ -21,9 +21,8 @@ import time from threading import Event -import chip.CertificateAuthority import chip.clusters as Clusters -import chip.FabricAdmin +from chip.clusters import ClusterObjects as ClustersObjects from chip.clusters.Attribute import SubscriptionTransaction, TypedAttributePath from chip.utils import CommissioningBuildingBlocks from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main @@ -36,7 +35,7 @@ class AttributeChangeAccumulator: - def __init__(self, name: str, expected_attribute: Clusters.ClusterAttributeDescriptor, output: queue.Queue): + def __init__(self, name: str, expected_attribute: ClustersObjects.ClusterAttributeDescriptor, output: queue.Queue): self._name = name self._output = output self._expected_attribute = expected_attribute @@ -116,7 +115,12 @@ async def test_TC_SC_3_6(self): logging.info("Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3") - capability_minima = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.BasicInformation.Attributes.CapabilityMinima) + capability_minima = await self.read_single_attribute( + dev_ctrl, + node_id=self.dut_node_id, + endpoint=0, + attribute=Clusters.BasicInformation.Attributes.CapabilityMinima + ) asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3) logging.info("Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics" % @@ -130,7 +134,13 @@ async def test_TC_SC_3_6(self): client_list.append(dev_ctrl) if num_controllers_per_fabric > 1: - new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=dev_ctrl.fabricAdmin, adminDevCtrl=dev_ctrl, controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=self.dut_node_id) + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=dev_ctrl.fabricAdmin, + adminDevCtrl=dev_ctrl, + controllerNodeIds=node_ids, + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=self.dut_node_id + ) for controller in new_controllers: controller.name = all_names.pop(0) client_list.extend(new_controllers) @@ -144,11 +154,21 @@ async def test_TC_SC_3_6(self): new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId) new_admin_ctrl.name = all_names.pop(0) client_list.append(new_admin_ctrl) - await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) + await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting( + commissionerDevCtrl=dev_ctrl, + newFabricDevCtrl=new_admin_ctrl, + existingNodeId=self.dut_node_id, + newNodeId=self.dut_node_id + ) if num_controllers_per_fabric > 1: - new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=new_fabric_admin, adminDevCtrl=new_admin_ctrl, - controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=self.dut_node_id) + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( + fabricAdmin=new_fabric_admin, + adminDevCtrl=new_admin_ctrl, + controllerNodeIds=node_ids, + privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, + targetNodeId=self.dut_node_id + ) for controller in new_controllers: controller.name = all_names.pop(0) @@ -159,7 +179,8 @@ async def test_TC_SC_3_6(self): # Before subscribing, set the NodeLabel to "Before Subscriptions" logging.info("Pre-conditions: writing initial value of NodeLabel, so that we can control for change of attribute detection") - await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=BEFORE_LABEL))]) + await client_list[0].WriteAttribute(self.dut_node_id, + [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=BEFORE_LABEL))]) # Subscribe with all clients to NodeLabel attribute sub_handlers = [] @@ -170,8 +191,12 @@ async def test_TC_SC_3_6(self): for sub_idx, client in enumerate(client_list): logging.info("Establishing subscription %d/%d from controller node %s" % (sub_idx + 1, len(client_list), client.name)) - sub = await client.ReadAttribute(nodeid=self.dut_node_id, attributes=[(0, Clusters.BasicInformation.Attributes.NodeLabel)], - reportInterval=(min_report_interval_sec, max_report_interval_sec), keepSubscriptions=False) + sub = await client.ReadAttribute( + nodeid=self.dut_node_id, + attributes=[(0, Clusters.BasicInformation.Attributes.NodeLabel)], + reportInterval=(min_report_interval_sec, max_report_interval_sec), + keepSubscriptions=False + ) self._subscriptions.append(sub) attribute_handler = AttributeChangeAccumulator( @@ -190,7 +215,8 @@ async def test_TC_SC_3_6(self): logging.info( "Step 1 (second part): Change attribute with one client, await all attributes changed within time") await asyncio.sleep(1) - await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=AFTER_LABEL))]) + await client_list[0].WriteAttribute(self.dut_node_id, + [(0, Clusters.BasicInformation.Attributes.NodeLabel(value=AFTER_LABEL))]) all_changes = {client.name: False for client in client_list} diff --git a/src/python_testing/TC_TestEventTrigger.py b/src/python_testing/TC_TestEventTrigger.py index 495baa72aae7d0..a27e2f78760879 100644 --- a/src/python_testing/TC_TestEventTrigger.py +++ b/src/python_testing/TC_TestEventTrigger.py @@ -15,11 +15,8 @@ # limitations under the License. # -import logging -from imaplib import Commands - import chip.clusters as Clusters -from chip.interaction_model import InteractionModelError, Status +from chip.interaction_model import InteractionModelError from matter_testing_support import MatterBaseTest, async_test_body, default_matter_test_main from mobly import asserts @@ -40,35 +37,78 @@ class TestEventTrigger(MatterBaseTest): @async_test_body async def test_all_zeros_key(self): dev_ctrl = self.default_controller - with asserts.assert_raises_regex(InteractionModelError, "ConstraintError", "All-zero TestEventTrigger key must return ConstraintError"): - await dev_ctrl.SendCommand(self.dut_node_id, endpoint=0, payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kAllZerosKey, eventTrigger=kValidEventTrigger)) + with asserts.assert_raises_regex(InteractionModelError, + "ConstraintError", "All-zero TestEventTrigger key must return ConstraintError"): + await dev_ctrl.SendCommand( + self.dut_node_id, + endpoint=0, + payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kAllZerosKey, + eventTrigger=kValidEventTrigger) + ) @async_test_body async def test_incorrect_key(self): dev_ctrl = self.default_controller - test_event_triggers_enabled = await self.read_single_attribute(dev_ctrl, self.dut_node_id, endpoint=0, attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled) + test_event_triggers_enabled = await self.read_single_attribute( + dev_ctrl, + self.dut_node_id, + endpoint=0, + attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled + ) asserts.assert_true(test_event_triggers_enabled, "This test expects Test Event Triggers are Enabled") - with asserts.assert_raises_regex(InteractionModelError, "ConstraintError", "Bad TestEventTrigger key must return ConstraintError"): - await dev_ctrl.SendCommand(self.dut_node_id, endpoint=0, payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kBadKey, eventTrigger=kValidEventTrigger)) + with asserts.assert_raises_regex(InteractionModelError, + "ConstraintError", "Bad TestEventTrigger key must return ConstraintError"): + await dev_ctrl.SendCommand( + self.dut_node_id, + endpoint=0, + payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kBadKey, + eventTrigger=kValidEventTrigger) + ) @async_test_body async def test_correct_key_valid_code(self): dev_ctrl = self.default_controller - test_event_triggers_enabled = await self.read_single_attribute(dev_ctrl, self.dut_node_id, endpoint=0, attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled) + test_event_triggers_enabled = await self.read_single_attribute( + dev_ctrl, + self.dut_node_id, + endpoint=0, + attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled + ) asserts.assert_true(test_event_triggers_enabled, "This test expects Test Event Triggers are Enabled") # No response to command --> Success yields "None". - asserts.assert_is_none(await dev_ctrl.SendCommand(self.dut_node_id, endpoint=0, payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kExpectedKey, eventTrigger=kValidEventTrigger))) + asserts.assert_is_none( + await dev_ctrl.SendCommand( + self.dut_node_id, + endpoint=0, + payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kExpectedKey, + eventTrigger=kValidEventTrigger) + ) + ) @async_test_body async def test_correct_key_invalid_code(self): dev_ctrl = self.default_controller - test_event_triggers_enabled = await self.read_single_attribute(dev_ctrl, self.dut_node_id, endpoint=0, attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled) + test_event_triggers_enabled = await self.read_single_attribute( + dev_ctrl, + self.dut_node_id, + endpoint=0, + attribute=Clusters.GeneralDiagnostics.Attributes.TestEventTriggersEnabled + ) asserts.assert_true(test_event_triggers_enabled, "This test expects Test Event Triggers are Enabled") - with asserts.assert_raises_regex(InteractionModelError, "InvalidCommand", "Unsupported EventTrigger must return InvalidCommand"): - await dev_ctrl.SendCommand(self.dut_node_id, endpoint=0, payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger(enableKey=kExpectedKey, eventTrigger=kInvalidEventTrigger)) + with asserts.assert_raises_regex(InteractionModelError, + "InvalidCommand", + "Unsupported EventTrigger must return InvalidCommand"): + await dev_ctrl.SendCommand( + self.dut_node_id, + endpoint=0, + payload=Clusters.GeneralDiagnostics.Commands.TestEventTrigger( + enableKey=kExpectedKey, + eventTrigger=kInvalidEventTrigger + ) + ) if __name__ == "__main__": diff --git a/src/python_testing/hello_test.py b/src/python_testing/hello_test.py index 70d4bbf97ddcd8..d7bad4c2dc193d 100644 --- a/src/python_testing/hello_test.py +++ b/src/python_testing/hello_test.py @@ -27,7 +27,12 @@ class HelloTest(MatterBaseTest): @async_test_body async def test_names_as_expected(self): dev_ctrl = self.default_controller - vendor_name = await self.read_single_attribute(dev_ctrl, self.dut_node_id, 0, Clusters.BasicInformation.Attributes.VendorName) + vendor_name = await self.read_single_attribute( + dev_ctrl, + self.dut_node_id, + 0, + Clusters.BasicInformation.Attributes.VendorName + ) logging.info("Found VendorName: %s" % (vendor_name)) asserts.assert_equal(vendor_name, "TEST_VENDOR", "VendorName must be TEST_VENDOR!") @@ -35,7 +40,12 @@ async def test_names_as_expected(self): @async_test_body async def test_failure_on_wrong_endpoint(self): dev_ctrl = self.default_controller - result = await self.read_single_attribute(dev_ctrl, self.dut_node_id, 9999, Clusters.BasicInformation.Attributes.ProductName) + result = await self.read_single_attribute( + dev_ctrl, + self.dut_node_id, + 9999, + Clusters.BasicInformation.Attributes.ProductName + ) asserts.assert_true(isinstance(result, Clusters.Attribute.ValueDecodeFailure), "Should fail to read on endpoint 9999") asserts.assert_equal(result.Reason.status, Status.UnsupportedEndpoint, "Failure reason should be UnsupportedEndpoint") diff --git a/src/python_testing/matter_testing_support.py b/src/python_testing/matter_testing_support.py index 76f1adb72d61b6..46c205ed0b3cc3 100644 --- a/src/python_testing/matter_testing_support.py +++ b/src/python_testing/matter_testing_support.py @@ -40,11 +40,10 @@ import chip.clusters as Clusters import chip.logging import chip.native -from chip.ChipStack import * +from chip.ChipStack import ChipStack from chip.interaction_model import InteractionModelError, Status from chip.storage import PersistentStorage -from chip.utils import CommissioningBuildingBlocks -from mobly import asserts, base_test, logger, signals, utils +from mobly import asserts, base_test, signals, utils from mobly.config_parser import ENV_MOBLY_LOGPATH, TestRunConfig from mobly.test_runner import TestRunner @@ -180,7 +179,9 @@ def _init_stack(self, already_initialized: bool, **kwargs): if already_initialized: self._chip_stack = builtins.chipStack self._logger.warn( - "Re-using existing ChipStack object found in current interpreter: storage path %s will be ignored!" % (self._config.storage_path)) + "Re-using existing ChipStack object found in current interpreter: " + "storage path %s will be ignored!" % (self._config.storage_path) + ) # TODO: Warn that storage will not follow what we set in config else: self._chip_stack = ChipStack(**kwargs) @@ -265,12 +266,15 @@ def certificate_authority_manager(self) -> chip.CertificateAuthority.Certificate def dut_node_id(self) -> int: return self.matter_test_config.dut_node_id - async def read_single_attribute(self, dev_ctrl: ChipDeviceCtrl, node_id: int, endpoint: int, attribute: object, fabricFiltered: bool = True) -> object: + async def read_single_attribute( + self, dev_ctrl: ChipDeviceCtrl, node_id: int, endpoint: int, attribute: object, fabricFiltered: bool = True) -> object: result = await dev_ctrl.ReadAttribute(node_id, [(endpoint, attribute)], fabricFiltered=fabricFiltered) data = result[endpoint] return list(data.values())[0][attribute] - async def read_single_attribute_check_success(self, cluster: object, attribute: object, dev_ctrl: ChipDeviceCtrl = None, node_id: int = None, endpoint: int = 0) -> object: + async def read_single_attribute_check_success( + self, cluster: object, attribute: object, + dev_ctrl: ChipDeviceCtrl = None, node_id: int = None, endpoint: int = 0) -> object: if dev_ctrl is None: dev_ctrl = self.default_controller if node_id is None: @@ -283,7 +287,9 @@ async def read_single_attribute_check_success(self, cluster: object, attribute: asserts.assert_false(isinstance(attr_ret, Clusters.Attribute.ValueDecodeFailure), err_msg) return attr_ret - async def read_single_attribute_expect_error(self, cluster: object, attribute: object, error: Status, dev_ctrl: ChipDeviceCtrl = None, node_id: int = None, endpoint: int = 0) -> object: + async def read_single_attribute_expect_error( + self, cluster: object, attribute: object, + error: Status, dev_ctrl: ChipDeviceCtrl = None, node_id: int = None, endpoint: int = 0) -> object: if dev_ctrl is None: dev_ctrl = self.default_controller if node_id is None: @@ -586,7 +592,8 @@ def parse_matter_test_args(argv: List[str]) -> MatterTestConfig: help='NodeID to use for initial/default controller (default: %d)' % _DEFAULT_CONTROLLER_NODE_ID) basic_group.add_argument('-n', '--dut-node-id', type=int_decimal_or_hex, metavar='NODE_ID', default=_DEFAULT_DUT_NODE_ID, - help='Node ID for primary DUT communication, and NodeID to assign if commissioning (default: %d)' % _DEFAULT_DUT_NODE_ID) + help='Node ID for primary DUT communication, ' + 'and NodeID to assign if commissioning (default: %d)' % _DEFAULT_DUT_NODE_ID) commission_group = parser.add_argument_group(title="Commissioning", description="Arguments to commission a node") @@ -616,9 +623,11 @@ def parse_matter_test_args(argv: List[str]) -> MatterTestConfig: help='Thread operational dataset as a hex string for ble-thread commissioning') commission_group.add_argument('--admin-vendor-id', action="store", type=int_decimal_or_hex, default=_DEFAULT_ADMIN_VENDOR_ID, - metavar="VENDOR_ID", help="VendorID to use during commissioning (default 0x%04X)" % _DEFAULT_ADMIN_VENDOR_ID) + metavar="VENDOR_ID", + help="VendorID to use during commissioning (default 0x%04X)" % _DEFAULT_ADMIN_VENDOR_ID) commission_group.add_argument('--case-admin-subject', action="store", type=int_decimal_or_hex, - metavar="CASE_ADMIN_SUBJECT", help="Set the CASE admin subject to an explicit value (default to commissioner Node ID)") + metavar="CASE_ADMIN_SUBJECT", + help="Set the CASE admin subject to an explicit value (default to commissioner Node ID)") commission_group.add_argument('--commission-only', action="store_true", default=False, help="If true, test exits after commissioning without running subsequent tests") @@ -638,7 +647,8 @@ def parse_matter_test_args(argv: List[str]) -> MatterTestConfig: fabric_group.add_argument('-r', '--root-index', type=root_index, metavar='ROOT_INDEX_OR_NAME', default=_DEFAULT_TRUST_ROOT_INDEX, - help='Root of trust under which to operate/commission for single-fabric basic usage. alpha/beta/gamma are aliases for 1/2/3. Default (%d)' % _DEFAULT_TRUST_ROOT_INDEX) + help='Root of trust under which to operate/commission for single-fabric basic usage. ' + 'alpha/beta/gamma are aliases for 1/2/3. Default (%d)' % _DEFAULT_TRUST_ROOT_INDEX) fabric_group.add_argument('-c', '--chip-tool-credentials-path', type=pathlib.Path, metavar='PATH', @@ -696,14 +706,33 @@ def _commission_device(self) -> bool: # TODO: support by manual code and QR if conf.commissioning_method == "on-network": - return dev_ctrl.CommissionOnNetwork(nodeId=conf.dut_node_id, setupPinCode=conf.setup_passcode, filterType=DiscoveryFilterType.LONG_DISCRIMINATOR, filter=conf.discriminator) + return dev_ctrl.CommissionOnNetwork( + nodeId=conf.dut_node_id, + setupPinCode=conf.setup_passcode, + filterType=DiscoveryFilterType.LONG_DISCRIMINATOR, + filter=conf.discriminator + ) elif conf.commissioning_method == "ble-wifi": - return dev_ctrl.CommissionWiFi(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.wifi_ssid, conf.wifi_passphrase) + return dev_ctrl.CommissionWiFi( + conf.discriminator, + conf.setup_passcode, + conf.dut_node_id, + conf.wifi_ssid, + conf.wifi_passphrase + ) elif conf.commissioning_method == "ble-thread": - return dev_ctrl.CommissionThread(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.thread_operational_dataset) + return dev_ctrl.CommissionThread( + conf.discriminator, + conf.setup_passcode, + conf.dut_node_id, + conf.thread_operational_dataset + ) elif conf.commissioning_method == "on-network-ip": logging.warning("==== USING A DIRECT IP COMMISSIONING METHOD NOT SUPPORTED IN THE LONG TERM ====") - return dev_ctrl.CommissionIP(ipaddr=conf.commissionee_ip_address_just_for_testing, setupPinCode=conf.setup_passcode, nodeid=conf.dut_node_id) + return dev_ctrl.CommissionIP( + ipaddr=conf.commissionee_ip_address_just_for_testing, + setupPinCode=conf.setup_passcode, nodeid=conf.dut_node_id + ) else: raise ValueError("Invalid commissioning method %s!" % conf.commissioning_method) @@ -751,8 +780,11 @@ def default_matter_test_main(argv=None, **kwargs): # TODO: Steer to right FabricAdmin! # TODO: If CASE Admin Subject is a CAT tag range, then make sure to issue NOC with that CAT tag - default_controller = stack.certificate_authorities[0].adminList[0].NewController(nodeId=matter_test_config.controller_node_id, - paaTrustStorePath=str(matter_test_config.paa_trust_store_path), catTags=matter_test_config.controller_cat_tags) + default_controller = stack.certificate_authorities[0].adminList[0].NewController( + nodeId=matter_test_config.controller_node_id, + paaTrustStorePath=str(matter_test_config.paa_trust_store_path), + catTags=matter_test_config.controller_cat_tags + ) test_config.user_params["default_controller"] = stash_globally(default_controller) test_config.user_params["matter_test_config"] = stash_globally(matter_test_config) diff --git a/src/setup_payload/tests/run_python_setup_payload_gen_test.py b/src/setup_payload/tests/run_python_setup_payload_gen_test.py index ae8b3a14e53ca3..b52000c1a6fd98 100644 --- a/src/setup_payload/tests/run_python_setup_payload_gen_test.py +++ b/src/setup_payload/tests/run_python_setup_payload_gen_test.py @@ -15,14 +15,13 @@ # limitations under the License. import os -import random import re import subprocess import sys CHIP_TOPDIR = os.path.dirname(os.path.realpath(__file__))[:-len(os.path.join('src', 'setup_payload', 'tests'))] sys.path.insert(0, os.path.join(CHIP_TOPDIR, 'src', 'setup_payload', 'python')) -from generate_setup_payload import INVALID_PASSCODES, CommissioningFlow, SetupPayload # noqa: E402 +from generate_setup_payload import CommissioningFlow, SetupPayload # noqa: E402 def payload_param_dict(): diff --git a/src/test_driver/efr32/py/nl_test_runner/nl_test_runner.py b/src/test_driver/efr32/py/nl_test_runner/nl_test_runner.py index 01c124bb6bf2d9..b4f096afed100d 100644 --- a/src/test_driver/efr32/py/nl_test_runner/nl_test_runner.py +++ b/src/test_driver/efr32/py/nl_test_runner/nl_test_runner.py @@ -90,7 +90,7 @@ def read(): return serial_device.read(8192) def runner(client) -> int: """ Run the tests""" def on_error_callback(call_object, error): - raise Exception("Error running test RPC: {}".format(status)) + raise Exception("Error running test RPC: {}".format(error)) rpc = client.client.channel(1).rpcs.chip.rpc.NlTest.Run invoke = rpc.invoke(rpc.request(), on_error=on_error_callback) diff --git a/src/test_driver/esp32/run_qemu_image.py b/src/test_driver/esp32/run_qemu_image.py index 6d684c95ce7df1..90e06191b121ea 100755 --- a/src/test_driver/esp32/run_qemu_image.py +++ b/src/test_driver/esp32/run_qemu_image.py @@ -17,7 +17,6 @@ import os import re import subprocess -import sys import click import coloredlogs @@ -147,7 +146,7 @@ def main(log_level, no_log_timestamps, image, file_image_list, qemu, verbose): print("========== TEST OUTPUT END ============") logging.info("Image %s PASSED", path) - except: + except Exception: # make sure output is visible in stdout print("========== TEST OUTPUT BEGIN ============") print(output) diff --git a/src/test_driver/linux-cirque/PythonCommissioningTest.py b/src/test_driver/linux-cirque/PythonCommissioningTest.py new file mode 100755 index 00000000000000..052474a48c8f47 --- /dev/null +++ b/src/test_driver/linux-cirque/PythonCommissioningTest.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +""" +Copyright (c) 2021 Project CHIP Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import logging +import os +import pprint +import sys +import time + +from helper.CHIPTestBase import CHIPVirtualHome + +logger = logging.getLogger('MobileDeviceTest') +logger.setLevel(logging.INFO) + +sh = logging.StreamHandler() +sh.setFormatter( + logging.Formatter( + '%(asctime)s [%(name)s] %(levelname)s %(message)s')) +logger.addHandler(sh) + +CHIP_PORT = 5540 + +CIRQUE_URL = "http://localhost:5000" +CHIP_REPO = os.path.join(os.path.abspath( + os.path.dirname(__file__)), "..", "..", "..") +TEST_EXTPANID = "fedcba9876543210" +TEST_DISCRIMINATOR = 3840 +TEST_DISCRIMINATOR2 = 3584 +MATTER_DEVELOPMENT_PAA_ROOT_CERTS = "credentials/development/paa-root-certs" + +DEVICE_CONFIG = { + 'device0': { + 'type': 'MobileDevice', + 'base_image': 'connectedhomeip/chip-cirque-device-base', + 'capability': ['TrafficControl', 'Mount'], + 'rcp_mode': True, + 'docker_network': 'Ipv6', + 'traffic_control': {'latencyMs': 100}, + "mount_pairs": [[CHIP_REPO, CHIP_REPO]], + }, + 'device1': { + 'type': 'CHIPEndDevice', + 'base_image': 'connectedhomeip/chip-cirque-device-base', + 'capability': ['Thread', 'TrafficControl', 'Mount'], + 'rcp_mode': True, + 'docker_network': 'Ipv6', + 'traffic_control': {'latencyMs': 100}, + "mount_pairs": [[CHIP_REPO, CHIP_REPO]], + }, + 'device2': { + 'type': 'CHIPEndDevice', + 'base_image': 'connectedhomeip/chip-cirque-device-base', + 'capability': ['Thread', 'TrafficControl', 'Mount'], + 'rcp_mode': True, + 'docker_network': 'Ipv6', + 'traffic_control': {'latencyMs': 100}, + "mount_pairs": [[CHIP_REPO, CHIP_REPO]], + } +} + + +class TestCommissioner(CHIPVirtualHome): + def __init__(self, device_config): + super().__init__(CIRQUE_URL, device_config) + self.logger = logger + + def setup(self): + self.initialize_home() + + def test_routine(self): + self.run_controller_test() + + def run_controller_test(self): + servers = [{ + "ip": device['description']['ipv6_addr'], + "id": device['id'] + } for device in self.non_ap_devices + if device['type'] == 'CHIPEndDevice'] + req_ids = [device['id'] for device in self.non_ap_devices + if device['type'] == 'MobileDevice'] + + servers[0]['discriminator'] = TEST_DISCRIMINATOR + servers[0]['nodeid'] = 1 + servers[1]['discriminator'] = TEST_DISCRIMINATOR2 + servers[1]['nodeid'] = 2 + + for server in servers: + self.execute_device_cmd(server['id'], "CHIPCirqueDaemon.py -- run gdb -return-child-result -q -ex \"set pagination off\" -ex run -ex \"bt 25\" --args {} --thread --discriminator {}".format( + os.path.join(CHIP_REPO, "out/debug/standalone/chip-all-clusters-app"), server['discriminator'])) + + self.reset_thread_devices([server['id'] for server in servers]) + + req_device_id = req_ids[0] + + self.execute_device_cmd(req_device_id, "pip3 install {}".format(os.path.join( + CHIP_REPO, "out/debug/linux_x64_gcc/controller/python/chip_clusters-0.0-py3-none-any.whl"))) + self.execute_device_cmd(req_device_id, "pip3 install {}".format(os.path.join( + CHIP_REPO, "out/debug/linux_x64_gcc/controller/python/chip_core-0.0-cp37-abi3-linux_x86_64.whl"))) + self.execute_device_cmd(req_device_id, "pip3 install {}".format(os.path.join( + CHIP_REPO, "out/debug/linux_x64_gcc/controller/python/chip_repl-0.0-py3-none-any.whl"))) + + command = "gdb -return-child-result -q -ex run -ex bt --args python3 {} -t 150 -d {} --paa-trust-store-path {} --nodeid {}".format( + os.path.join( + CHIP_REPO, "src/controller/python/test/test_scripts/python_commissioning_flow_test.py"), + TEST_DISCRIMINATOR, + os.path.join(CHIP_REPO, MATTER_DEVELOPMENT_PAA_ROOT_CERTS), + servers[0]['nodeid']) + ret = self.execute_device_cmd(req_device_id, command) + + self.assertEqual(ret['return_code'], '0', + "Test failed: non-zero return code") + + command = "gdb -return-child-result -q -ex run -ex bt --args python3 {} -t 150 -d {} --paa-trust-store-path {} --nodeid {} --bad-cert-issuer".format( + os.path.join( + CHIP_REPO, "src/controller/python/test/test_scripts/python_commissioning_flow_test.py"), + TEST_DISCRIMINATOR2, + os.path.join(CHIP_REPO, MATTER_DEVELOPMENT_PAA_ROOT_CERTS), + servers[1]['nodeid']) + ret = self.execute_device_cmd(req_device_id, command) + + self.assertEqual(ret['return_code'], '0', + "Test failed: non-zero return code") + + +if __name__ == "__main__": + sys.exit(TestCommissioner(DEVICE_CONFIG).run_test()) diff --git a/src/test_driver/tizen/.gn b/src/test_driver/tizen/.gn new file mode 100644 index 00000000000000..fa6b2fc9621e28 --- /dev/null +++ b/src/test_driver/tizen/.gn @@ -0,0 +1,25 @@ +# Copyright (c) 2020 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build_overrides/build.gni") + +# The location of the build configuration file. +buildconfig = "${build_root}/config/BUILDCONFIG.gn" + +# CHIP uses angle bracket includes. +check_system_includes = true + +default_args = { + target_os = "tizen" +} diff --git a/src/test_driver/tizen/BUILD.gn b/src/test_driver/tizen/BUILD.gn new file mode 100644 index 00000000000000..9d7c863d3ce7ca --- /dev/null +++ b/src/test_driver/tizen/BUILD.gn @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +group("check") { + testonly = true + deps = [ "integration_tests/lighting-app:check" ] +} diff --git a/src/test_driver/tizen/README.md b/src/test_driver/tizen/README.md new file mode 100644 index 00000000000000..b4e8ed39ad5312 --- /dev/null +++ b/src/test_driver/tizen/README.md @@ -0,0 +1,54 @@ +# CHIP Tests on QEMU + +Tizen runs mostly on ARM architecture. In order to run tests on Tizen, we need +to use QEMU. This document describes how to build and run CHIP tests on QEMU. + +## Obtaining Tizen QEMU Docker Image + +All tools and dependencies required to build and run tests on Tizen on QEMU are +included in the `chip-build-tizen-qemu` docker image. One can pull the docker +image from hub.docker.com or build it locally using the provided Dockerfile in +`integrations/docker/images/chip-build-tizen-qemu` directory. + +```sh +# Pull the image from hub.docker.com +docker pull connectedhomeip/chip-build-tizen-qemu:latest +``` + +## Building and Running Tests on QEMU + +All steps described below should be done inside the docker container. + +```sh +docker run -it --rm --name chip-tizen-qemu \ + connectedhomeip/chip-build-tizen-qemu:latest /bin/bash +``` + +### Clone the connectedhomeip repository + +```sh +git clone https://github.com/project-chip/connectedhomeip.git +``` + +### Activate the environment + +```sh +cd connectedhomeip +source scripts/activate.sh +``` + +### Generate and run test target + +As for now, Tizen QEMU-based test driver does not support BLE. In order to +disable BLE, one needs to pass `chip_config_network_layer_ble=false` to the args +argument of the `gn gen` command. + +```sh +# Generate test target +gn gen --check --fail-on-unused-args \ + --root="$PWD/src/test_driver/tizen" \ + --args="target_os=\"tizen\" target_cpu=\"arm\" chip_config_network_layer_ble=false" \ + out/tizen-check +# Run Tizen QEMU-based tests +ninja -C out/tizen-check check +``` diff --git a/src/test_driver/tizen/build_overrides b/src/test_driver/tizen/build_overrides new file mode 120000 index 00000000000000..f2758328a72777 --- /dev/null +++ b/src/test_driver/tizen/build_overrides @@ -0,0 +1 @@ +../../../examples/build_overrides \ No newline at end of file diff --git a/src/test_driver/tizen/integration_tests/lighting-app/BUILD.gn b/src/test_driver/tizen/integration_tests/lighting-app/BUILD.gn new file mode 100644 index 00000000000000..36fb2c340a23ec --- /dev/null +++ b/src/test_driver/tizen/integration_tests/lighting-app/BUILD.gn @@ -0,0 +1,46 @@ +# Copyright (c) 2020 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build_overrides/chip.gni") +import("//build_overrides/tizen.gni") + +import("${tizen_sdk_build_root}/tizen_sdk.gni") + +tizen_qemu_mkisofs("test-runner") { + runner = "runner.sh" + + # Build applications used in the test. + deps = [ + "${chip_root}/examples/chip-tool:chip-tool", + "${chip_root}/examples/lighting-app/tizen:chip-lighting-app:tpk", + ] + + # Use artifacts created by the dependencies. + assets = [ + rebase_path("${root_build_dir}/chip-tool"), + rebase_path( + "${root_build_dir}/org.tizen.matter.example.lighting/out/org.tizen.matter.example.lighting-1.0.0.tpk"), + ] +} + +tizen_qemu_run("check") { + # Enable network support, so Tizen can obtain current date/time from the + # network. Correct date/time is required for the commissioning process - + # attestation will fail otherwise. + virtio_net = true + + deps = [ ":test-runner" ] + mkisofs_outputs = get_target_outputs(":test-runner") + iso_image = rebase_path(mkisofs_outputs[0]) +} diff --git a/src/test_driver/tizen/integration_tests/lighting-app/runner.sh b/src/test_driver/tizen/integration_tests/lighting-app/runner.sh new file mode 100755 index 00000000000000..c214123e5c8c92 --- /dev/null +++ b/src/test_driver/tizen/integration_tests/lighting-app/runner.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +# +# Copyright (c) 2021 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +# Print CHIP logs on stdout +dlogutil CHIP & + +# Install lighting Matter app +pkgcmd -i -t tpk -p /mnt/chip/org.tizen.matter.* +# Launch lighting Matter app +app_launcher -s org.tizen.matter.example.lighting + +# TEST: pair app using network commissioning +/mnt/chip/chip-tool pairing onnetwork 1 20202021 +# TEST: turn on light +/mnt/chip/chip-tool onoff on 1 1 +# TEST: turn off light +/mnt/chip/chip-tool onoff off 1 1 diff --git a/src/test_driver/tizen/third_party/connectedhomeip b/src/test_driver/tizen/third_party/connectedhomeip new file mode 120000 index 00000000000000..c866b86874994d --- /dev/null +++ b/src/test_driver/tizen/third_party/connectedhomeip @@ -0,0 +1 @@ +../../../.. \ No newline at end of file diff --git a/src/tools/chip-cert/gen_com_dut_test_vectors.py b/src/tools/chip-cert/gen_com_dut_test_vectors.py index bdbc7ca4cdd8ab..d8f22aa2c7093c 100755 --- a/src/tools/chip-cert/gen_com_dut_test_vectors.py +++ b/src/tools/chip-cert/gen_com_dut_test_vectors.py @@ -123,7 +123,8 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": "Certificate Basic Constraint extension PathLen field presence is wrong (present for DAC not present for PAI)", + "description": "Certificate Basic Constraint extension PathLen field presence is wrong " + "(present for DAC not present for PAI)", "test_folder": 'ext_basic_pathlen_presence_wrong', "error_flag": 'ext-basic-pathlen-presence-wrong', "is_success_case": 'false', @@ -165,7 +166,8 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": "Certificate Key Usage extension diginalSignature field is wrong (not present for DAC and present for PAI, which is OK as optional)", + "description": "Certificate Key Usage extension diginalSignature field is wrong " + "(not present for DAC and present for PAI, which is OK as optional)", "test_folder": 'ext_key_usage_dig_sig_wrong', "error_flag": 'ext-key-usage-dig-sig', "is_success_case": 'false', @@ -229,7 +231,8 @@ class CertType(Enum): "is_success_case": 'true', }, { - "description": 'Fallback VID and PID encoding example from spec: valid example showing that order or separators are not considered at all for the overall validity of the embedded fields', + "description": 'Fallback VID and PID encoding example from spec: valid example showing that ' + 'order or separators are not considered at all for the overall validity of the embedded fields', "common_name": 'Mpid:00B1,ACME Matter Devel DAC 5CDA9899,Mvid:FFF1', "test_folder": 'vidpid_fallback_encoding_03', "is_success_case": 'true', @@ -241,31 +244,36 @@ class CertType(Enum): "is_success_case": 'true', }, { - "description": 'Fallback VID and PID encoding example from spec: valid, but highly discouraged, since embedding of substrings within other substrings may be confusing to human readers', + "description": 'Fallback VID and PID encoding example from spec: valid, but highly discouraged, ' + 'since embedding of substrings within other substrings may be confusing to human readers', "common_name": 'Mvid:FFF1ACME Matter Devel DAC 5CDAMpid:00B19899', "test_folder": 'vidpid_fallback_encoding_05', "is_success_case": 'true', }, { - "description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mvid: is not exactly 4 uppercase hexadecimal digits', + "description": 'Fallback VID and PID encoding example from spec: invalid, ' + 'since substring following Mvid: is not exactly 4 uppercase hexadecimal digits', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FF1 Mpid:00B1', "test_folder": 'vidpid_fallback_encoding_06', "is_success_case": 'false', }, { - "description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mvid: is not exactly 4 uppercase hexadecimal digits', + "description": 'Fallback VID and PID encoding example from spec: invalid, ' + 'since substring following Mvid: is not exactly 4 uppercase hexadecimal digits', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:fff1 Mpid:00B1', "test_folder": 'vidpid_fallback_encoding_07', "is_success_case": 'false', }, { - "description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mpid: is not exactly 4 uppercase hexadecimal digits', + "description": 'Fallback VID and PID encoding example from spec: invalid, ' + 'since substring following Mpid: is not exactly 4 uppercase hexadecimal digits', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1 Mpid:B1', "test_folder": 'vidpid_fallback_encoding_08', "is_success_case": 'false', }, { - "description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mpid: is not exactly 4 uppercase hexadecimal digits', + "description": 'Fallback VID and PID encoding example from spec: invalid, ' + 'since substring following Mpid: is not exactly 4 uppercase hexadecimal digits', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mpid: Mvid:FFF1', "test_folder": 'vidpid_fallback_encoding_09', "is_success_case": 'false', @@ -303,7 +311,8 @@ class CertType(Enum): }, # Examples with both fallback encoding in the common name and using Matter specific OIDs { - "description": 'Mix of Fallback and Matter OID encoding for VID and PID: valid, Matter OIDs are used and wrong values in the common-name are ignored', + "description": 'Mix of Fallback and Matter OID encoding for VID and PID: valid, ' + 'Matter OIDs are used and wrong values in the common-name are ignored', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF2 Mpid:00B2', "vid": 0xFFF1, "pid": 0x00B1, @@ -311,7 +320,8 @@ class CertType(Enum): "is_success_case": 'true', }, { - "description": 'Mix of Fallback and Matter OID encoding for VID and PID: wrong, Correct values encoded in the common-name are ignored', + "description": 'Mix of Fallback and Matter OID encoding for VID and PID: wrong, ' + 'Correct values encoded in the common-name are ignored', "common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1 Mpid:00B1', "vid": 0xFFF2, "pid": 0x00B2, @@ -319,7 +329,8 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": 'Mix of Fallback and Matter OID encoding for VID and PID: invalid, PID is using Matter OID then VID must also use Matter OID', + "description": 'Mix of Fallback and Matter OID encoding for VID and PID: invalid, ' + 'PID is using Matter OID then VID must also use Matter OID', "common_name": 'Mvid:FFF1', "pid": 0x00B1, "test_folder": 'vidpid_fallback_encoding_17', @@ -413,7 +424,8 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": "The device_type_id field doesn't match the device_type_id value in the DCL entries associated with the VID and PID.", + "description": "The device_type_id field doesn't match the device_type_id value in the DCL entries " + "associated with the VID and PID.", "test_folder": 'device_type_id_mismatch', "error_flag": 'device-type-id-mismatch', "is_success_case": 'false', @@ -467,13 +479,15 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": 'The version_number field matches the VID and PID used in a DeviceSoftwareVersionModel entry in the DCL matching the certification record associated with the product presenting this CD.', + "description": 'The version_number field matches the VID and PID used in a DeviceSoftwareVersionModel ' + 'entry in the DCL matching the certification record associated with the product presenting this CD.', "test_folder": 'version_number_match', "error_flag": 'no-error', "is_success_case": 'true', }, { - "description": "The version_number field doesn't match the VID and PID used in a DeviceSoftwareVersionModel entry in the DCL matching the certification record associated with the product presenting this CD.", + "description": "The version_number field doesn't match the VID and PID used in a DeviceSoftwareVersionModel " + "entry in the DCL matching the certification record associated with the product presenting this CD.", "test_folder": 'version_number_wrong', "error_flag": 'version-number-wrong', "is_success_case": 'false', @@ -509,19 +523,22 @@ class CertType(Enum): "is_success_case": 'false', }, { - "description": 'The dac_origin_vendor_id and dac_origin_product_id fields present and contain the VID and PID values that match the VID and PID found in the DAC Subject DN.', + "description": 'The dac_origin_vendor_id and dac_origin_product_id fields present and contain ' + 'the VID and PID values that match the VID and PID found in the DAC Subject DN.', "test_folder": 'dac_origin_vid_pid_present_match', "error_flag": 'dac-origin-vid-pid-present', "is_success_case": 'true', }, { - "description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the VID value doesn't match the VID found in the DAC Subject DN.", + "description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the VID value " + "doesn't match the VID found in the DAC Subject DN.", "test_folder": 'dac_origin_vid_pid_present_vid_mismatch', "error_flag": 'dac-origin-vid-mismatch', "is_success_case": 'false', }, { - "description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the PID value doesn't match the PID found in the DAC Subject DN.", + "description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the PID value " + "doesn't match the PID found in the DAC Subject DN.", "test_folder": 'dac_origin_vid_pid_present_pid_mismatch', "error_flag": 'dac-origin-pid-mismatch', "is_success_case": 'false', @@ -663,7 +680,8 @@ def __init__(self, cert_type: CertType, paa_path, test_case_out_dir): class DevCertBuilder: - def __init__(self, cert_type: CertType, error_type: str, paa_path: str, test_case_out_dir: str, chip_cert: str, vid: int, pid: int, custom_cn_attribute: str, valid_from: str): + def __init__(self, cert_type: CertType, error_type: str, paa_path: str, test_case_out_dir: str, chip_cert: str, vid: int, + pid: int, custom_cn_attribute: str, valid_from: str): self.vid = vid self.pid = pid self.cert_type = cert_type @@ -710,8 +728,9 @@ def make_certs_and_keys(self) -> None: else: return - cmd = self.chipcert + ' gen-att-cert ' + type_flag + error_type_flag + ' -c "' + subject_name + '" -C ' + self.signer.cert_pem + ' -K ' + \ - self.signer.key_pem + vid_flag + pid_flag + validity_flags + ' -o ' + self.own.cert_pem + ' -O ' + self.own.key_pem + cmd = self.chipcert + ' gen-att-cert ' + type_flag + error_type_flag + ' -c "' + subject_name + '" -C ' + \ + self.signer.cert_pem + ' -K ' + self.signer.key_pem + vid_flag + pid_flag + \ + validity_flags + ' -o ' + self.own.cert_pem + ' -O ' + self.own.key_pem subprocess.run(cmd, shell=True) cmd = 'openssl x509 -inform pem -in ' + self.own.cert_pem + \ ' -out ' + self.own.cert_der + ' -outform DER' @@ -762,7 +781,11 @@ def generate_test_case_vector_json(test_case_out_dir: str, test_cert: str, test_ json_dict["description"] = test_cert.upper() + " Test Vector: " + test_case["description"] if "is_success_case" in test_case: # These test cases are expected to fail when error injected in DAC but expected to pass when error injected in PAI - if (test_cert == 'pai') and (test_case["test_folder"] in ['ext_basic_pathlen0', 'vidpid_fallback_encoding_08', 'vidpid_fallback_encoding_09', 'ext_key_usage_dig_sig_wrong']): + if (test_cert == 'pai') and (test_case["test_folder"] in ['ext_basic_pathlen0', + 'vidpid_fallback_encoding_08', + 'vidpid_fallback_encoding_09', + 'ext_key_usage_dig_sig_wrong' + ]): json_dict["is_success_case"] = "true" else: json_dict["is_success_case"] = test_case["is_success_case"] @@ -944,13 +967,18 @@ def main(): if test_case["error_flag"] == 'dac-origin-pid-present' or test_case["error_flag"] == 'dac-origin-vid-pid-present': dac_origin_flag += ' -r 0x{:X}'.format(pid) - if test_case["error_flag"] == 'authorized-paa-list-count0' or test_case["error_flag"] == 'authorized-paa-list-count1-valid' or test_case["error_flag"] == 'authorized-paa-list-count2-valid' or test_case["error_flag"] == 'authorized-paa-list-count3-invalid' or test_case["error_flag"] == 'authorized-paa-list-count10-valid' or test_case["error_flag"] == 'authorized-paa-list-count10-invalid': + if test_case["error_flag"] == 'authorized-paa-list-count0' or test_case["error_flag"] == 'authorized-paa-list-count1-valid'\ + or test_case["error_flag"] == 'authorized-paa-list-count2-valid'\ + or test_case["error_flag"] == 'authorized-paa-list-count3-invalid'\ + or test_case["error_flag"] == 'authorized-paa-list-count10-valid'\ + or test_case["error_flag"] == 'authorized-paa-list-count10-invalid': authorized_paa_flag = ' -a ' + args.paapath + 'Cert.pem' else: authorized_paa_flag = '' - cmd = chipcert + ' gen-cd -I -E ' + test_case["error_flag"] + ' -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + test_case_out_dir + '/cd.der' + \ - ' -f 1 ' + vid_flag + pid_flag + dac_origin_flag + authorized_paa_flag + ' -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0' + cmd = chipcert + ' gen-cd -I -E ' + test_case["error_flag"] + ' -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + \ + test_case_out_dir + '/cd.der' + ' -f 1 ' + vid_flag + pid_flag + dac_origin_flag + authorized_paa_flag + \ + ' -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0' subprocess.run(cmd, shell=True) # Generate Test Case Data Container in JSON Format diff --git a/src/tools/chip-cert/gen_op_cert_test_vectors.py b/src/tools/chip-cert/gen_op_cert_test_vectors.py index d1a074f1bed234..4f1e2d39565b7d 100755 --- a/src/tools/chip-cert/gen_op_cert_test_vectors.py +++ b/src/tools/chip-cert/gen_op_cert_test_vectors.py @@ -376,7 +376,8 @@ class CertFormat(Enum): "is_get_cert_type_expected_to_fail": False, }, { - "description": "Certificate Key Usage extension diginalSignature field is wrong (not present for NOC and present for ICAC/RCAC)", + "description": "Certificate Key Usage extension diginalSignature field is wrong " + "(not present for NOC and present for ICAC/RCAC)", "test_name": 'Ext-KeyUsage-DigSig-Wrong', "error_flag": 'ext-key-usage-dig-sig', "is_chip_to_x509_expected_to_fail": False, @@ -512,7 +513,8 @@ def __init__(self, cert_type: CertType, cert_form: CertFormat, test_case_out_dir class OpCertBuilder: - def __init__(self, cert_type: CertType, cert_form: CertFormat, signer_cert: str, signer_key: str, error_type: str, test_name: str, test_case_out_dir: str, chip_cert: str): + def __init__(self, cert_type: CertType, cert_form: CertFormat, signer_cert: str, signer_key: str, error_type: str, + test_name: str, test_case_out_dir: str, chip_cert: str): self.cert_type = cert_type self.cert_form = cert_form self.error_type = error_type @@ -616,7 +618,8 @@ def main(): for test_case in DER_CERT_ERROR_TEST_CASES: for cert_type in [CertType.NOC, CertType.ICAC, CertType.RCAC]: # The following error cases are applicable only for NOC - if (test_case["error_flag"] == 'subject-node-id-invalid' or test_case["error_flag"] == 'ext-basic-pathlen-presence-wrong') and cert_type != CertType.NOC: + if (test_case["error_flag"] == 'subject-node-id-invalid' or + test_case["error_flag"] == 'ext-basic-pathlen-presence-wrong') and cert_type != CertType.NOC: break if cert_type == CertType.NOC: @@ -658,7 +661,8 @@ def main(): break # The following error cases are applicable only for NOC - if (test_case["error_flag"] == 'subject-node-id-invalid' or test_case["error_flag"] == 'subject-fabric-id-missing') and cert_type != CertType.NOC: + if (test_case["error_flag"] == 'subject-node-id-invalid' + or test_case["error_flag"] == 'subject-fabric-id-missing') and cert_type != CertType.NOC: break if cert_type == CertType.NOC: @@ -685,7 +689,8 @@ def main(): if test_case["is_validate_chip_rcac_expected_to_fail"]: c_validate_chip_rcac_error_cases += builder.add_cert_to_error_cases() validate_chip_rcac_error_cases_count += 1 - if test_case["is_get_cert_type_expected_to_fail"] and not (test_case["error_flag"] == 'subject-cat-twice' and cert_type == CertType.NOC): + if test_case["is_get_cert_type_expected_to_fail"] and not (test_case["error_flag"] == 'subject-cat-twice' + and cert_type == CertType.NOC): c_get_cert_type_error_cases += builder.add_cert_to_error_cases() get_cert_type_error_cases_count += 1 diff --git a/src/transport/Session.h b/src/transport/Session.h index a05c27f1f7ca3b..29ebf593a9b5e3 100644 --- a/src/transport/Session.h +++ b/src/transport/Session.h @@ -222,6 +222,8 @@ class Session bool IsSecureSession() const { return GetSessionType() == SessionType::kSecure; } + bool IsUnauthenticatedSession() const { return GetSessionType() == SessionType::kUnauthenticated; } + void DispatchSessionEvent(SessionDelegate::Event event) { // Holders might remove themselves when notified. diff --git a/third_party/boringssl/repo/BUILD.gn b/third_party/boringssl/repo/BUILD.gn index cb613d28454edd..634be1687232a8 100644 --- a/third_party/boringssl/repo/BUILD.gn +++ b/third_party/boringssl/repo/BUILD.gn @@ -20,7 +20,10 @@ import("BUILD.generated.gni") config("boringssl_config") { include_dirs = [ "src/include" ] - cflags = [ "-Wno-unused-variable" ] + cflags = [ + "-Wno-unused-variable", + "-Wno-conversion", + ] if (is_clang) { cflags += [ "-Wno-shorten-64-to-32" ] diff --git a/third_party/editline/BUILD.gn b/third_party/editline/BUILD.gn index 900333b14d510e..ec2cdd4a2e3bb2 100644 --- a/third_party/editline/BUILD.gn +++ b/third_party/editline/BUILD.gn @@ -18,8 +18,10 @@ import("${build_root}/config/compiler/compiler.gni") config("editline_config") { include_dirs = [ "repo/include" ] + cflags = [ "-Wno-conversion" ] + if (is_clang) { - cflags = [ "-Wno-shorten-64-to-32" ] + cflags += [ "-Wno-shorten-64-to-32" ] } } diff --git a/third_party/java_deps/BUILD.gn b/third_party/java_deps/BUILD.gn index ffcd48dfe5fe3c..d265a43b33a6fe 100644 --- a/third_party/java_deps/BUILD.gn +++ b/third_party/java_deps/BUILD.gn @@ -24,3 +24,7 @@ java_prebuilt("annotation") { java_prebuilt("json") { jar_path = "artifacts/json-20220924.jar" } + +java_prebuilt("kotlin-stdlib") { + jar_path = "artifacts/kotlin-stdlib-1.8.10.jar" +} diff --git a/third_party/java_deps/set_up_java_deps.sh b/third_party/java_deps/set_up_java_deps.sh index e0210b3bfacdff..6098e956718055 100755 --- a/third_party/java_deps/set_up_java_deps.sh +++ b/third_party/java_deps/set_up_java_deps.sh @@ -19,3 +19,4 @@ mkdir -p third_party/java_deps/artifacts curl --fail --location --silent --show-error https://repo1.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar -o third_party/java_deps/artifacts/jsr305-3.0.2.jar curl --fail --location --silent --show-error https://repo1.maven.org/maven2/org/json/json/20220924/json-20220924.jar -o third_party/java_deps/artifacts/json-20220924.jar +curl --fail --location --silent --show-error https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.8.10/kotlin-stdlib-1.8.10.jar -o third_party/java_deps/artifacts/kotlin-stdlib-1.8.10.jar diff --git a/third_party/mbedtls/mbedtls.gni b/third_party/mbedtls/mbedtls.gni index 2c65b2491ecdc2..e865e9c6584ce9 100644 --- a/third_party/mbedtls/mbedtls.gni +++ b/third_party/mbedtls/mbedtls.gni @@ -28,6 +28,7 @@ template("mbedtls_target") { "-Wno-string-concatenation", "-Wno-unused-but-set-parameter", "-Wno-format-nonliteral", # Because of mbedtls_debug_print_msg + "-Wno-conversion", # Lots of -Wconversion warnings, sadly. ] if (is_clang) { diff --git a/third_party/mbedtls/repo b/third_party/mbedtls/repo index 908e810098a216..b361e04207831f 160000 --- a/third_party/mbedtls/repo +++ b/third_party/mbedtls/repo @@ -1 +1 @@ -Subproject commit 908e810098a216a448f1065c509c3dac8a6b5821 +Subproject commit b361e04207831f753a29d6036361d4447aaaa3bc diff --git a/third_party/nlunit-test/BUILD.gn b/third_party/nlunit-test/BUILD.gn index 48f81f0a395301..d6907714acd446 100644 --- a/third_party/nlunit-test/BUILD.gn +++ b/third_party/nlunit-test/BUILD.gn @@ -18,8 +18,10 @@ import("${build_root}/config/compiler/compiler.gni") config("nlunit-test_config") { include_dirs = [ "repo/src" ] + cflags = [ "-Wno-conversion" ] + if (is_clang) { - cflags = [ "-Wno-shorten-64-to-32" ] + cflags += [ "-Wno-shorten-64-to-32" ] } } diff --git a/third_party/openthread/repo b/third_party/openthread/repo index afbb2d57998ad8..7f0b5fac4b9e95 160000 --- a/third_party/openthread/repo +++ b/third_party/openthread/repo @@ -1 +1 @@ -Subproject commit afbb2d57998ad8d150bab909c1784d6ea9160209 +Subproject commit 7f0b5fac4b9e95dfe96a2d61a4469c9fe8ec4802 diff --git a/third_party/silabs/efr32_sdk.gni b/third_party/silabs/efr32_sdk.gni index e51dc3e79eae6f..cb43cb73826bf8 100644 --- a/third_party/silabs/efr32_sdk.gni +++ b/third_party/silabs/efr32_sdk.gni @@ -76,7 +76,6 @@ template("efr32_sdk") { "${efr32_sdk_root}", "${efr32_sdk_root}/util/plugin/security_manager/", "${efr32_sdk_root}/hardware/kit/common/bsp", - "${efr32_sdk_root}/app/common/util/app_assert/", "${efr32_sdk_root}/hardware/board/inc", "${efr32_sdk_root}/hardware/driver/memlcd/inc", "${efr32_sdk_root}/hardware/driver/memlcd/src/ls013b7dh03", @@ -129,8 +128,6 @@ template("efr32_sdk") { "${efr32_sdk_root}/platform/base/hal/plugin/psstore", "${efr32_sdk_root}/platform/base/hal/plugin/antenna", "${efr32_sdk_root}/protocol/bluetooth/inc/", - "${efr32_sdk_root}/app/bluetooth/common/in_place_ota_dfu/", - "${efr32_sdk_root}/app/bluetooth/common/in_place_ota_dfu/config/", "${efr32_sdk_root}/util/plugin/plugin-common/fem-control", "${efr32_sdk_root}/util/silicon_labs/silabs_core/graphics", "${efr32_sdk_root}/util/silicon_labs/silabs_core/memory_manager", @@ -348,10 +345,7 @@ template("efr32_sdk") { ] if (!chip_enable_ble_rs911x) { - libs += [ - "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG12P/GCC/binapploader.o", - "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG12P/GCC/libbluetooth.a", - ] + libs += [ "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG12P/GCC/libbluetooth.a" ] } defines += [ "EFR32MG12" ] @@ -366,7 +360,6 @@ template("efr32_sdk") { ] libs += [ - "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG21/GCC/binapploader.o", "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG21/GCC/libbluetooth.a", "${sdk_support_root}/platform/radio/rail_lib/autogen/librail_release/librail_multiprotocol_efr32xg21_gcc_release.a", "${sdk_support_root}/platform/emdrv/nvm3/lib/libnvm3_CM33_gcc.a", @@ -388,7 +381,6 @@ template("efr32_sdk") { ] libs += [ - "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG24/GCC/libapploader.a", "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG24/GCC/libbluetooth.a", "${sdk_support_root}/platform/radio/rail_lib/autogen/librail_release/librail_multiprotocol_efr32xg24_gcc_release.a", "${sdk_support_root}/platform/emdrv/nvm3/lib/libnvm3_CM33_gcc.a", @@ -411,7 +403,6 @@ template("efr32_sdk") { ] libs += [ - "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG24/GCC/libapploader.a", "${sdk_support_root}/protocol/bluetooth/lib/EFR32MG24/GCC/libbluetooth.a", "${sdk_support_root}/platform/radio/rail_lib/autogen/librail_release/librail_multiprotocol_module_efr32xg24_gcc_release.a", "${sdk_support_root}/platform/emdrv/nvm3/lib/libnvm3_CM33_gcc.a", @@ -489,7 +480,6 @@ template("efr32_sdk") { source_set(sdk_target_name) { sources = [ "${chip_root}/third_party/mbedtls/repo/include/mbedtls/platform.h", - "${efr32_sdk_root}/app/bluetooth/common/in_place_ota_dfu/sl_bt_in_place_ota_dfu.c", "${efr32_sdk_root}/hardware/board/src/sl_board_control_gpio.c", "${efr32_sdk_root}/hardware/board/src/sl_board_init.c", "${efr32_sdk_root}/platform/CMSIS/RTOS2/Source/os_systick.c", @@ -781,7 +771,6 @@ template("efr32_sdk") { "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_emu_s1.c", "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_hfxo_s1.c", "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_lfxo_s1.c", - "${efr32_sdk_root}/protocol/bluetooth/src/sl_apploader_util_s1.c", "${efr32_sdk_root}/util/third_party/freertos/kernel/portable/GCC/ARM_CM4F/port.c", ] } else if (silabs_family == "efr32mg21") { @@ -859,7 +848,6 @@ template("efr32_sdk") { "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_hfxo_s2.c", "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_lfxo_s2.c", "${efr32_sdk_root}/platform/service/hfxo_manager/src/sl_hfxo_manager_hal_s2.c", - "${efr32_sdk_root}/protocol/bluetooth/src/sl_apploader_util_s2.c", "${efr32_sdk_root}/util/third_party/freertos/kernel/portable/GCC/ARM_CM33_NTZ/non_secure/port.c", "${efr32_sdk_root}/util/third_party/freertos/kernel/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c", ] @@ -898,7 +886,6 @@ template("efr32_sdk") { "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_hfxo_mgm24.c", "${efr32_sdk_root}/platform/service/device_init/src/sl_device_init_lfxo_s2.c", "${efr32_sdk_root}/platform/service/hfxo_manager/src/sl_hfxo_manager_hal_s2.c", - "${efr32_sdk_root}/protocol/bluetooth/src/sl_apploader_util_s2.c", "${efr32_sdk_root}/util/third_party/freertos/kernel/portable/GCC/ARM_CM33_NTZ/non_secure/port.c", "${efr32_sdk_root}/util/third_party/freertos/kernel/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c", ] diff --git a/third_party/silabs/matter_support b/third_party/silabs/matter_support index de4c8e3075e9ce..9d8b890d978d18 160000 --- a/third_party/silabs/matter_support +++ b/third_party/silabs/matter_support @@ -1 +1 @@ -Subproject commit de4c8e3075e9ce4b7487c351a7c042b490fb8d3e +Subproject commit 9d8b890d978d18157c7e6fbe6e2ee0d5bdae9a11 diff --git a/third_party/tizen/tizen_sdk.gni b/third_party/tizen/tizen_sdk.gni index 0ac79b3fb5ba3a..32bb5539a483f6 100644 --- a/third_party/tizen/tizen_sdk.gni +++ b/third_party/tizen/tizen_sdk.gni @@ -110,20 +110,22 @@ template("tizen_sdk_package") { "It is required to specify a `sign_security_profile` which " + "should be used for signing TPK package.") - # Output directory where packaging will occur. We need a separate directory - # for this, because Tizen Studio CLI scans "res" (resources), "shared" and - # "lib" directories for items to pack. In our case it could include in the - # TPK package libraries available in ${root_out_dir}/lib directory. - tizen_package_dir = "${root_build_dir}/package" - tizen_package_out_dir = "${tizen_package_dir}/out" - # Extract data from Tizen XML manifest. manifest = exec_script(tizen_manifest_parser, [ rebase_path(invoker.manifest, root_build_dir) ], "json") manifest_package = manifest["package"] + manifest_package_name = manifest_package["name"] + manifest_package_version = manifest_package["version"] manifest_apps = manifest["apps"] + # Output directory where packaging will occur. We need a separate directory + # for this, because Tizen Studio CLI scans "res" (resources), "shared" and + # "lib" directories for items to pack. In our case it could include in the + # TPK package libraries available in ${root_out_dir}/lib directory. + tizen_package_dir = "${root_build_dir}/${manifest_package_name}" + tizen_package_out_dir = "${tizen_package_dir}/out" + # Copy Tizen manifest from the source directory. copy("${target_name}:manifest") { sources = [ invoker.manifest ] @@ -146,10 +148,10 @@ template("tizen_sdk_package") { } } - tpk = manifest_package["name"] + "-" + manifest_package["version"] + ".tpk" + tpk = "${manifest_package_name}-${manifest_package_version}.tpk" tizen_sdk(target_name) { deps = invoker.deps + dependencies - outputs = [ "${tizen_package_out_dir}/" + tpk ] + outputs = [ "${tizen_package_out_dir}/${tpk}" ] project_build_dir = tizen_package_dir args = [ "package", @@ -225,7 +227,7 @@ template("tizen_qemu_run") { "--image-iso=" + invoker.iso_image, "--output=" + rebase_path(output_log_file), ] - if (defined(invoker.virtio_net)) { + if (defined(invoker.virtio_net) && invoker.virtio_net) { args += [ "--virtio-net" ] } diff --git a/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.cpp b/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.cpp index 944a428f8ce1e0..ebd102aded5e53 100644 --- a/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.cpp +++ b/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.cpp @@ -3357,9 +3357,9 @@ namespace Attributes { namespace HourFormat { -EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormat * value) +EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum * value) { - using Traits = NumericAttributeTraits; + using Traits = NumericAttributeTraits; Traits::StorageType temp; uint8_t * readable = Traits::ToAttributeStoreRepresentation(temp); EmberAfStatus status = emberAfReadAttribute(endpoint, Clusters::TimeFormatLocalization::Id, Id, readable, sizeof(temp)); @@ -3371,9 +3371,9 @@ EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLoca *value = Traits::StorageToWorking(temp); return status; } -EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormat value) +EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum value) { - using Traits = NumericAttributeTraits; + using Traits = NumericAttributeTraits; if (!Traits::CanRepresentValue(/* isNullable = */ false, value)) { return EMBER_ZCL_STATUS_CONSTRAINT_ERROR; @@ -3388,9 +3388,9 @@ EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLoca namespace ActiveCalendarType { -EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarType * value) +EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum * value) { - using Traits = NumericAttributeTraits; + using Traits = NumericAttributeTraits; Traits::StorageType temp; uint8_t * readable = Traits::ToAttributeStoreRepresentation(temp); EmberAfStatus status = emberAfReadAttribute(endpoint, Clusters::TimeFormatLocalization::Id, Id, readable, sizeof(temp)); @@ -3402,9 +3402,9 @@ EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLoca *value = Traits::StorageToWorking(temp); return status; } -EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarType value) +EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum value) { - using Traits = NumericAttributeTraits; + using Traits = NumericAttributeTraits; if (!Traits::CanRepresentValue(/* isNullable = */ false, value)) { return EMBER_ZCL_STATUS_CONSTRAINT_ERROR; diff --git a/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.h b/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.h index 655c55a2d24f99..510d8bd4cc582b 100644 --- a/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.h +++ b/zzz_generated/app-common/app-common/zap-generated/attributes/Accessors.h @@ -645,13 +645,14 @@ namespace TimeFormatLocalization { namespace Attributes { namespace HourFormat { -EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormat * value); // HourFormat -EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormat value); +EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum * value); // HourFormatEnum +EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::HourFormatEnum value); } // namespace HourFormat namespace ActiveCalendarType { -EmberAfStatus Get(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarType * value); // CalendarType -EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarType value); +EmberAfStatus Get(chip::EndpointId endpoint, + chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum * value); // CalendarTypeEnum +EmberAfStatus Set(chip::EndpointId endpoint, chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum value); } // namespace ActiveCalendarType namespace FeatureMap { diff --git a/zzz_generated/app-common/app-common/zap-generated/cluster-enums-check.h b/zzz_generated/app-common/app-common/zap-generated/cluster-enums-check.h index 8fe45e0441c1b9..5e979cb27f78a9 100644 --- a/zzz_generated/app-common/app-common/zap-generated/cluster-enums-check.h +++ b/zzz_generated/app-common/app-common/zap-generated/cluster-enums-check.h @@ -374,9 +374,9 @@ static auto __attribute__((unused)) EnsureKnownEnumValue(OtaSoftwareUpdateReques } } -static auto __attribute__((unused)) EnsureKnownEnumValue(TimeFormatLocalization::CalendarType val) +static auto __attribute__((unused)) EnsureKnownEnumValue(TimeFormatLocalization::CalendarTypeEnum val) { - using EnumType = TimeFormatLocalization::CalendarType; + using EnumType = TimeFormatLocalization::CalendarTypeEnum; switch (val) { case EnumType::kBuddhist: @@ -396,9 +396,9 @@ static auto __attribute__((unused)) EnsureKnownEnumValue(TimeFormatLocalization: return static_cast(12); } } -static auto __attribute__((unused)) EnsureKnownEnumValue(TimeFormatLocalization::HourFormat val) +static auto __attribute__((unused)) EnsureKnownEnumValue(TimeFormatLocalization::HourFormatEnum val) { - using EnumType = TimeFormatLocalization::HourFormat; + using EnumType = TimeFormatLocalization::HourFormatEnum; switch (val) { case EnumType::k12hr: diff --git a/zzz_generated/app-common/app-common/zap-generated/cluster-enums.h b/zzz_generated/app-common/app-common/zap-generated/cluster-enums.h index 3475c05ea9947d..df02a6b4a9add0 100644 --- a/zzz_generated/app-common/app-common/zap-generated/cluster-enums.h +++ b/zzz_generated/app-common/app-common/zap-generated/cluster-enums.h @@ -470,8 +470,8 @@ namespace LocalizationConfiguration {} // namespace LocalizationConfiguration namespace TimeFormatLocalization { -// Enum for CalendarType -enum class CalendarType : uint8_t +// Enum for CalendarTypeEnum +enum class CalendarTypeEnum : uint8_t { kBuddhist = 0x00, kChinese = 0x01, @@ -492,8 +492,8 @@ enum class CalendarType : uint8_t kUnknownEnumValue = 12, }; -// Enum for HourFormat -enum class HourFormat : uint8_t +// Enum for HourFormatEnum +enum class HourFormatEnum : uint8_t { k12hr = 0x00, k24hr = 0x01, diff --git a/zzz_generated/app-common/app-common/zap-generated/cluster-objects.h b/zzz_generated/app-common/app-common/zap-generated/cluster-objects.h index afabb3027bd84b..42a11b64f7bd3c 100644 --- a/zzz_generated/app-common/app-common/zap-generated/cluster-objects.h +++ b/zzz_generated/app-common/app-common/zap-generated/cluster-objects.h @@ -5751,9 +5751,9 @@ namespace Attributes { namespace HourFormat { struct TypeInfo { - using Type = chip::app::Clusters::TimeFormatLocalization::HourFormat; - using DecodableType = chip::app::Clusters::TimeFormatLocalization::HourFormat; - using DecodableArgType = chip::app::Clusters::TimeFormatLocalization::HourFormat; + using Type = chip::app::Clusters::TimeFormatLocalization::HourFormatEnum; + using DecodableType = chip::app::Clusters::TimeFormatLocalization::HourFormatEnum; + using DecodableArgType = chip::app::Clusters::TimeFormatLocalization::HourFormatEnum; static constexpr ClusterId GetClusterId() { return Clusters::TimeFormatLocalization::Id; } static constexpr AttributeId GetAttributeId() { return Attributes::HourFormat::Id; } @@ -5763,9 +5763,9 @@ struct TypeInfo namespace ActiveCalendarType { struct TypeInfo { - using Type = chip::app::Clusters::TimeFormatLocalization::CalendarType; - using DecodableType = chip::app::Clusters::TimeFormatLocalization::CalendarType; - using DecodableArgType = chip::app::Clusters::TimeFormatLocalization::CalendarType; + using Type = chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum; + using DecodableType = chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum; + using DecodableArgType = chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum; static constexpr ClusterId GetClusterId() { return Clusters::TimeFormatLocalization::Id; } static constexpr AttributeId GetAttributeId() { return Attributes::ActiveCalendarType::Id; } @@ -5775,9 +5775,10 @@ struct TypeInfo namespace SupportedCalendarTypes { struct TypeInfo { - using Type = chip::app::DataModel::List; - using DecodableType = chip::app::DataModel::DecodableList; - using DecodableArgType = const chip::app::DataModel::DecodableList &; + using Type = chip::app::DataModel::List; + using DecodableType = chip::app::DataModel::DecodableList; + using DecodableArgType = + const chip::app::DataModel::DecodableList &; static constexpr ClusterId GetClusterId() { return Clusters::TimeFormatLocalization::Id; } static constexpr AttributeId GetAttributeId() { return Attributes::SupportedCalendarTypes::Id; } @@ -5830,9 +5831,9 @@ struct TypeInfo CHIP_ERROR Decode(TLV::TLVReader & reader, const ConcreteAttributePath & path); Attributes::HourFormat::TypeInfo::DecodableType hourFormat = - static_cast(0); + static_cast(0); Attributes::ActiveCalendarType::TypeInfo::DecodableType activeCalendarType = - static_cast(0); + static_cast(0); Attributes::SupportedCalendarTypes::TypeInfo::DecodableType supportedCalendarTypes; Attributes::GeneratedCommandList::TypeInfo::DecodableType generatedCommandList; Attributes::AcceptedCommandList::TypeInfo::DecodableType acceptedCommandList; diff --git a/zzz_generated/app-common/app-common/zap-generated/enums.h b/zzz_generated/app-common/app-common/zap-generated/enums.h index db52df2fdd3394..97e4e536571461 100644 --- a/zzz_generated/app-common/app-common/zap-generated/enums.h +++ b/zzz_generated/app-common/app-common/zap-generated/enums.h @@ -251,9 +251,3 @@ enum EmberAfStepMode : uint8_t #define EMBER_AF_COLOR_LOOP_UPDATE_FLAGS_UPDATE_TIME_OFFSET (2) #define EMBER_AF_COLOR_LOOP_UPDATE_FLAGS_UPDATE_START_HUE (8) #define EMBER_AF_COLOR_LOOP_UPDATE_FLAGS_UPDATE_START_HUE_OFFSET (3) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_ON_OFF (1) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_ON_OFF_OFFSET (0) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_LIGHTING (2) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_LIGHTING_OFFSET (1) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_FREQUENCY (4) -#define EMBER_AF_LEVEL_CONTROL_FEATURE_FREQUENCY_OFFSET (2) diff --git a/zzz_generated/chip-tool/zap-generated/cluster/Commands.h b/zzz_generated/chip-tool/zap-generated/cluster/Commands.h index 9a559a63ffea5f..98e822796ecc4a 100644 --- a/zzz_generated/chip-tool/zap-generated/cluster/Commands.h +++ b/zzz_generated/chip-tool/zap-generated/cluster/Commands.h @@ -9677,13 +9677,13 @@ void registerClusterTimeFormatLocalization(Commands & commands, CredentialIssuer make_unique(Id, "feature-map", Attributes::FeatureMap::Id, credsIssuerConfig), // make_unique(Id, "cluster-revision", Attributes::ClusterRevision::Id, credsIssuerConfig), // make_unique>(Id, credsIssuerConfig), // - make_unique>( + make_unique>( Id, "hour-format", 0, UINT8_MAX, Attributes::HourFormat::Id, WriteCommandType::kWrite, credsIssuerConfig), // - make_unique>( + make_unique>( Id, "active-calendar-type", 0, UINT8_MAX, Attributes::ActiveCalendarType::Id, WriteCommandType::kWrite, credsIssuerConfig), // - make_unique< - WriteAttributeAsComplex>>( + make_unique>>( Id, "supported-calendar-types", Attributes::SupportedCalendarTypes::Id, WriteCommandType::kForceWrite, credsIssuerConfig), // make_unique>>( diff --git a/zzz_generated/chip-tool/zap-generated/cluster/logging/DataModelLogger.cpp b/zzz_generated/chip-tool/zap-generated/cluster/logging/DataModelLogger.cpp index 5af013702ebcee..578944136f8fdd 100644 --- a/zzz_generated/chip-tool/zap-generated/cluster/logging/DataModelLogger.cpp +++ b/zzz_generated/chip-tool/zap-generated/cluster/logging/DataModelLogger.cpp @@ -5194,17 +5194,17 @@ CHIP_ERROR DataModelLogger::LogAttribute(const chip::app::ConcreteDataAttributeP switch (path.mAttributeId) { case TimeFormatLocalization::Attributes::HourFormat::Id: { - chip::app::Clusters::TimeFormatLocalization::HourFormat value; + chip::app::Clusters::TimeFormatLocalization::HourFormatEnum value; ReturnErrorOnFailure(chip::app::DataModel::Decode(*data, value)); return DataModelLogger::LogValue("HourFormat", 1, value); } case TimeFormatLocalization::Attributes::ActiveCalendarType::Id: { - chip::app::Clusters::TimeFormatLocalization::CalendarType value; + chip::app::Clusters::TimeFormatLocalization::CalendarTypeEnum value; ReturnErrorOnFailure(chip::app::DataModel::Decode(*data, value)); return DataModelLogger::LogValue("ActiveCalendarType", 1, value); } case TimeFormatLocalization::Attributes::SupportedCalendarTypes::Id: { - chip::app::DataModel::DecodableList value; + chip::app::DataModel::DecodableList value; ReturnErrorOnFailure(chip::app::DataModel::Decode(*data, value)); return DataModelLogger::LogValue("SupportedCalendarTypes", 1, value); } diff --git a/zzz_generated/darwin/controller-clusters/zap-generated/CHIPClientCallbacks.h b/zzz_generated/darwin/controller-clusters/zap-generated/CHIPClientCallbacks.h index a05cc25bea55f2..ef6cbd56ee6145 100644 --- a/zzz_generated/darwin/controller-clusters/zap-generated/CHIPClientCallbacks.h +++ b/zzz_generated/darwin/controller-clusters/zap-generated/CHIPClientCallbacks.h @@ -182,7 +182,8 @@ typedef void (*LocalizationConfigurationEventListListAttributeCallback)( typedef void (*LocalizationConfigurationAttributeListListAttributeCallback)( void * context, const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationSupportedCalendarTypesListAttributeCallback)( - void * context, const chip::app::DataModel::DecodableList & data); + void * context, + const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationGeneratedCommandListListAttributeCallback)( void * context, const chip::app::DataModel::DecodableList & data); typedef void (*TimeFormatLocalizationAcceptedCommandListListAttributeCallback)(