diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d9dcc4b8..9bf87ada 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -99,7 +99,7 @@ jobs: -Dsonar.python.coverage.reportPaths=.tox/report/tmp/coverage.xml -Dsonar.python.xunit.reportPath=.tox/py311-test/tmp/xunit-result.xml - compat-test-python3-mac: + compat-test-python3-windows-and-mac: strategy: matrix: python3-version: ['11', '12', '13'] @@ -179,7 +179,7 @@ jobs: cmake --build --preset build-${{ matrix.compiler }}-${{ matrix.architecture }}-${{ matrix.language }}-debugasan cmake --build --preset build-${{ matrix.compiler }}-${{ matrix.architecture }}-${{ matrix.language }}-release - language-verification-c-cpp-clang-native-extra: + language-verification-c-clang-native-extra: runs-on: ubuntu-latest needs: test container: ghcr.io/opencyphal/toolshed:ts22.4.10 @@ -190,14 +190,31 @@ jobs: - name: verify working-directory: verification run: | - cmake -DNUNAVUT_EXTRA_GENERATOR_ARGS="--enable-override-variable-array-capacity;--embed-auditing-info" --preset config-clang-native-cpp-20 - cmake --build --preset build-clang-native-cpp-20-debugcov --target cov_all cmake -DNUNAVUT_EXTRA_GENERATOR_ARGS="--enable-override-variable-array-capacity;--embed-auditing-info" --preset config-clang-native-c-11 cmake --build --preset build-clang-native-c-11-debugcov --target cov_all - name: upload-verification-coverage-reports uses: actions/upload-artifact@v4 with: - name: verification-coverage-reports + name: verification-c-coverage-reports + path: verification/build/DebugCov/coverage/* + + language-verification-cpp-clang-native-extra: + runs-on: ubuntu-latest + needs: test + container: ghcr.io/opencyphal/toolshed:ts22.4.10 + steps: + - uses: actions/checkout@v4 + with: + submodules: true + - name: verify + working-directory: verification + run: | + cmake -DNUNAVUT_EXTRA_GENERATOR_ARGS="--enable-override-variable-array-capacity;--embed-auditing-info" --preset config-clang-native-cpp-20 + cmake --build --preset build-clang-native-cpp-20-debugcov --target cov_all + - name: upload-verification-coverage-reports + uses: actions/upload-artifact@v4 + with: + name: verification-cpp-coverage-reports path: verification/build/DebugCov/coverage/* language-verification-python: diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1d28a009..5defb86f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -36,6 +36,9 @@ recommend the following environment for vscode:: tox devenv -e local source venv/bin/activate +On Windows that last line is instead:: + + ./venv/Scripts/activate cmake ================================================ diff --git a/src/nunavut/_generators.py b/src/nunavut/_generators.py index dba5be35..8accbe13 100644 --- a/src/nunavut/_generators.py +++ b/src/nunavut/_generators.py @@ -10,8 +10,6 @@ """ import abc -import multiprocessing -import multiprocessing.pool from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, Iterable, List, Mapping, Optional, Type, Union @@ -297,6 +295,7 @@ def generate_all( resource_types: int = ResourceType.ANY.value, embed_auditing_info: bool = False, dry_run: bool = False, + jobs: int = 0, no_overwrite: bool = False, allow_unregulated_fixed_port_id: bool = False, omit_dependencies: bool = False, @@ -389,6 +388,9 @@ def generate_all( :param bool dry_run: If True then no files will be generated/written but all logic will be exercised with commensurate logging and errors. + :param int jobs: + The number of parallel jobs to use when generating code. If 1 then no parallelism is used. If 0 then the + number of jobs is determined by the number of CPUs available. :param bool no_overwrite: If True then generated files will not be allowed to overwrite existing files under the `outdir` path causing errors. @@ -424,6 +426,7 @@ def generate_all( resource_types, embed_auditing_info, dry_run, + jobs, no_overwrite, allow_unregulated_fixed_port_id, omit_dependencies, @@ -441,6 +444,7 @@ def generate_all_for_language( resource_types: int = ResourceType.ANY.value, embed_auditing_info: bool = False, dry_run: bool = False, + jobs: int = 0, no_overwrite: bool = False, allow_unregulated_fixed_port_id: bool = False, omit_dependencies: bool = False, @@ -465,6 +469,9 @@ def generate_all_for_language( source will be embedded in the generated files at the cost of build reproducibility. :param dry_run: If True then no files will be generated/written but all logic will be exercised with commensurate logging and errors. + :param int jobs: + The number of parallel jobs to use when generating code. If 1 then no parallelism is used. If 0 then the + number of jobs is determined by the number of CPUs available. :param no_overwrite: If True then generated files will not be allowed to overwrite existing files under the `outdir` path causing errors. :param allow_unregulated_fixed_port_id: If True then errors will become warning when using fixed port identifiers @@ -485,6 +492,7 @@ def generate_all_for_language( language_context, target_files, root_namespace_directories_or_names, + jobs, allow_unregulated_fixed_port_id=allow_unregulated_fixed_port_id, omit_dependencies=omit_dependencies, ) diff --git a/src/nunavut/_namespace.py b/src/nunavut/_namespace.py index 95fa2656..5841f92f 100644 --- a/src/nunavut/_namespace.py +++ b/src/nunavut/_namespace.py @@ -455,6 +455,8 @@ def read_files( index: "Namespace", dsdl_files: Union[Path, str, Iterable[Union[Path, str]]], root_namespace_directories_or_names: Optional[Union[Path, str, Iterable[Union[Path, str]]]], + jobs: int = 0, + job_timeout_seconds: float = 0, lookup_directories: Optional[Union[Path, str, Iterable[Union[Path, str]]]] = None, print_output_handler: Optional[Callable[[Path, int, str], None]] = None, allow_unregulated_fixed_port_id: bool = False, @@ -466,6 +468,10 @@ def read_files( :param Namespace index: The index namespace to add the new namespaces and types to. :param Path | str | Iterable[Path | str] dsdl_files: The dsdl files to read. :param Path | str | Iterable[Path | str] root_namespace_directories_or_names: See :meth:`pydsdl.read_files`. + :param int jobs: The number of parallel jobs to allow when reading multiple files. 0 Indicates no limit and 1 + diasallows all parallelism. + :param float job_timeout_seconds: Maximum time in fractional seconds any one read file job is allowed to take + before timing out. 0 disables timeouts. :param Path | str | Iterable[Path | str] lookup_directories: See :meth:`pydsdl.read_files`. :param Callable[[Path, int, str], None] print_output_handler: A callback to handle print output. :param bool allow_unregulated_fixed_port_id: Allow unregulated fixed port ids. @@ -481,32 +487,53 @@ def read_files( already_read: set[Path] = set() - running_lookups: list[multiprocessing.pool.AsyncResult] = [] - with multiprocessing.pool.Pool() as pool: + if jobs == 1: + # Don't use multiprocessing when jobs is 1. while fileset: next_file = fileset.pop() - running_lookups.append( - pool.apply_async( - pydsdl.read_files, - args=( - next_file, - root_namespace_directories_or_names, - lookup_directories, - print_output_handler, - allow_unregulated_fixed_port_id, - ), - ) + target_type, dependent_types = pydsdl.read_files( + next_file, + root_namespace_directories_or_names, + lookup_directories, + print_output_handler, + allow_unregulated_fixed_port_id, ) already_read.add(next_file) # TODO: canonical paths for keying here? - if not fileset: - for lookup in running_lookups: - target_type, dependent_types = lookup.get() - Namespace.add_types(index, (target_type[0], dependent_types)) - if not omit_dependencies: - for dependent_type in dependent_types: - if dependent_type.source_file_path not in already_read: - fileset.add(dependent_type.source_file_path) - running_lookups.clear() + Namespace.add_types(index, (target_type[0], dependent_types)) + if not omit_dependencies: + for dependent_type in dependent_types: + if dependent_type.source_file_path not in already_read: + fileset.add(dependent_type.source_file_path) + else: + running_lookups: list[multiprocessing.pool.AsyncResult] = [] + with multiprocessing.pool.Pool(processes=None if jobs == 0 else jobs) as pool: + while fileset: + next_file = fileset.pop() + running_lookups.append( + pool.apply_async( + pydsdl.read_files, + args=( + next_file, + root_namespace_directories_or_names, + lookup_directories, + print_output_handler, + allow_unregulated_fixed_port_id, + ), + ) + ) + already_read.add(next_file) # TODO: canonical paths for keying here? + if not fileset: + for lookup in running_lookups: + if job_timeout_seconds <= 0: + target_type, dependent_types = lookup.get() + else: + target_type, dependent_types = lookup.get(timeout=job_timeout_seconds) + Namespace.add_types(index, (target_type[0], dependent_types)) + if not omit_dependencies: + for dependent_type in dependent_types: + if dependent_type.source_file_path not in already_read: + fileset.add(dependent_type.source_file_path) + running_lookups.clear() return index @@ -518,6 +545,8 @@ def _( lctx: LanguageContext, dsdl_files: Optional[Union[Path, str, Iterable[Union[Path, str]]]], root_namespace_directories_or_names: Optional[Union[Path, str, Iterable[Union[Path, str]]]], + jobs: int = 0, + job_timeout_seconds: float = 0, lookup_directories: Optional[Union[Path, str, Iterable[Union[Path, str]]]] = None, print_output_handler: Optional[Callable[[Path, int, str], None]] = None, allow_unregulated_fixed_port_id: bool = False, @@ -530,6 +559,10 @@ def _( :param LanguageContext lctx: The language context to use when building the namespace. :param Path | str | Iterable[Path | str] dsdl_files: The dsdl files to read. :param Path | str | Iterable[Path | str] root_namespace_directories_or_names: See :meth:`pydsdl.read_files`. + :param int jobs: The number of parallel jobs to allow when reading multiple files. 0 Indicates no limit and 1 + diasallows all parallelism. + :param float job_timeout_seconds: Maximum time in fractional seconds any one read file job is allowed to take + before timing out. 0 disables timeouts. :param Path | str | Iterable[Path | str] lookup_directories: See :meth:`pydsdl.read_files`. :param Callable[[Path, int, str], None] print_output_handler: A callback to handle print output. :param bool allow_unregulated_fixed_port_id: Allow unregulated fixed port ids. @@ -539,6 +572,8 @@ def _( Namespace.Identity(output_path, lctx), dsdl_files, root_namespace_directories_or_names, + jobs, + job_timeout_seconds, lookup_directories, print_output_handler, allow_unregulated_fixed_port_id, @@ -553,6 +588,8 @@ def _( lctx: LanguageContext, dsdl_files: Optional[Union[Path, str, Iterable[Union[Path, str]]]], root_namespace_directories_or_names: Optional[Union[Path, str, Iterable[Union[Path, str]]]], + jobs: int = 0, + job_timeout_seconds: float = 0, lookup_directories: Optional[Union[Path, str, Iterable[Union[Path, str]]]] = None, print_output_handler: Optional[Callable[[Path, int, str], None]] = None, allow_unregulated_fixed_port_id: bool = False, @@ -565,6 +602,10 @@ def _( :param LanguageContext lctx: The language context to use when building the namespace. :param Path | str | Iterable[Path | str] dsdl_files: The dsdl files to read. :param Path | str | Iterable[Path | str] root_namespace_directories_or_names: See :meth:`pydsdl.read_files`. + :param int The number of parallel jobs to allow when reading multiple files. 0 Indicates no limit and 1 + diasallows all parallelism. + :param float job_timeout_seconds: Maximum time in fractional seconds any one read file job is allowed to take + before timing out. 0 disables timeouts. :param Path | str | Iterable[Path | str] lookup_directories: See :meth:`pydsdl.read_files`. :param Callable[[Path, int, str], None] print_output_handler: A callback to handle print output. :param bool allow_unregulated_fixed_port_id: Allow unregulated fixed port ids. @@ -574,6 +615,8 @@ def _( Namespace.Identity(Path(output_path), lctx), dsdl_files, root_namespace_directories_or_names, + jobs, + job_timeout_seconds, lookup_directories, print_output_handler, allow_unregulated_fixed_port_id, diff --git a/src/nunavut/cli/__init__.py b/src/nunavut/cli/__init__.py index 7623ae80..d13d34f4 100644 --- a/src/nunavut/cli/__init__.py +++ b/src/nunavut/cli/__init__.py @@ -522,6 +522,27 @@ def extension_type(raw_arg: str) -> str: run_mode_group.add_argument("--dry-run", "-d", action="store_true", help="If True then no files will be generated.") + run_mode_group.add_argument( + "--jobs", + "-j", + type=int, + default=0, + help=textwrap.dedent( + """ + + Limits the number of subprocesses nnvg can use to parallelize type discovery + and code generation. + + If set to 0 then the number of jobs will be set to the number of CPUs available + on the system. + + If set to 1 then no subprocesses will be used and all work will be done in th + main process. + + """ + ).lstrip(), + ) + run_mode_group.add_argument( "--list-outputs", action="store_true", diff --git a/test/gentest_nnvg/test_nnvg.py b/test/gentest_nnvg/test_nnvg.py index c9065290..25f4fb7e 100644 --- a/test/gentest_nnvg/test_nnvg.py +++ b/test/gentest_nnvg/test_nnvg.py @@ -52,6 +52,31 @@ def test_realgen_using_nnvg(gen_paths: Any, run_nnvg: Callable) -> None: assert note.exists() +@pytest.mark.parametrize("jobs", [0, 1, 2]) +def test_realgen_using_nnvg_jobs(gen_paths: Any, run_nnvg_main: Callable, jobs: int) -> None: + """ + Sanity test that nnvg can generate code from known types. + """ + public_regulated_data_types = gen_paths.root_dir / Path("submodules") / Path("public_regulated_data_types") + + nnvg_args0 = [ + "--outdir", + gen_paths.out_dir.as_posix(), + "-j", + str(jobs), + "-l", + "c", + "--lookup-dir", + (public_regulated_data_types / Path("uavcan")).as_posix(), + (public_regulated_data_types / Path("uavcan", "node", "430.GetInfo.1.0.dsdl")).as_posix(), + ] + + run_nnvg_main(gen_paths, nnvg_args0) + + get_info = gen_paths.out_dir / Path("uavcan") / Path("node") / Path("GetInfo_1_0").with_suffix(".h") + assert get_info.exists() + + def test_DSDL_INCLUDE_PATH(gen_paths: Any, run_nnvg_main: Callable) -> None: """ Verify that the DSDL_INCLUDE_PATH environment variable is used by nnvg. diff --git a/verification/CMakeLists.txt b/verification/CMakeLists.txt index 83dcdfcb..d7ef29a8 100644 --- a/verification/CMakeLists.txt +++ b/verification/CMakeLists.txt @@ -62,7 +62,7 @@ find_package(Nunavut 3.0 REQUIRED) # # We generate coverage reports. Please look at them (It wasn't easy to get this to work). # -find_package(lcov REQUIRED) +find_package(verification-coverage REQUIRED) find_package(genhtml REQUIRED) # @@ -161,18 +161,11 @@ endif() # We generate individual test binaires so we can record which test generated # what coverage. We also allow test authors to generate coverage reports for # just one test allowing for faster iteration. -add_custom_target( - lcov_zero - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --zerocounters - --directory ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Resetting coverage counters." -) -set(ALL_TESTS "") -set(ALL_TESTS_WITH_LCOV "") -set(ALL_TEST_COVERAGE "") +set(ALL_TEST_RUNS "") +set(ALL_TEST_INFO_FILES "") +# Don't allow tests to run in parallel +set_property(GLOBAL PROPERTY JOB_POOLS coverage_test_runs=1) function(runTestCpp) set(options "") @@ -191,9 +184,10 @@ function(runTestCpp) set(NATIVE_TEST "${CMAKE_CURRENT_SOURCE_DIR}/cpp/suite/${runTestCpp_TEST_FILE}") get_filename_component(NATIVE_TEST_NAME ${NATIVE_TEST} NAME_WE) - define_native_unit_test(FRAMEWORK "gtest" - TEST_NAME ${NATIVE_TEST_NAME} - TEST_SOURCE ${NATIVE_TEST} + define_native_unit_test( + FRAMEWORK "gtest" + NAME ${NATIVE_TEST_NAME} + SOURCE ${NATIVE_TEST} OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} DSDL_TARGETS ${runTestCpp_LINK} @@ -212,16 +206,23 @@ function(runTestCpp) target_compile_options(${NATIVE_TEST_NAME} PRIVATE "-Wno-deprecated-declarations") target_link_libraries(${NATIVE_TEST_NAME} PUBLIC o1heap) target_include_directories(${NATIVE_TEST_NAME} PUBLIC "${NUNAVUT_SUBMODULES_DIR}/CETL/include") - define_native_test_run(TEST_NAME ${NATIVE_TEST_NAME} OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR}) - define_native_test_run_with_lcov(${NATIVE_TEST_NAME} ${NUNAVUT_VERIFICATIONS_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/\\*) + define_native_test_run( + NAME ${NATIVE_TEST_NAME} + OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} + OUT_CUSTOM_TARGET LOCAL_TEST_RUN_TARGET + ) + define_coverage_native_test_run( + NAME ${NATIVE_TEST_NAME} + JOB_POOL coverage_test_runs + OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} + SOURCE_FILTER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/\\* + OUT_INFO_FILE LOCAL_TEST_INFO_FILE + ) define_native_test_coverage(${NATIVE_TEST_NAME} ${NUNAVUT_VERIFICATIONS_BINARY_DIR}) - list(APPEND ALL_TESTS "run_${NATIVE_TEST_NAME}") - list(APPEND ALL_TESTS_WITH_LCOV "run_${NATIVE_TEST_NAME}_with_lcov") - list(APPEND ALL_TEST_COVERAGE "--add-tracefile") - list(APPEND ALL_TEST_COVERAGE "${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.${NATIVE_TEST_NAME}.filtered.info") - set(ALL_TESTS ${ALL_TESTS} PARENT_SCOPE) - set(ALL_TESTS_WITH_LCOV ${ALL_TESTS_WITH_LCOV} PARENT_SCOPE) - set(ALL_TEST_COVERAGE ${ALL_TEST_COVERAGE} PARENT_SCOPE) + list(APPEND ALL_TEST_RUNS "${LOCAL_TEST_RUN_TARGET}") + list(APPEND ALL_TEST_INFO_FILES "${LOCAL_TEST_INFO_FILE}") + set(ALL_TEST_RUNS ${ALL_TEST_RUNS} PARENT_SCOPE) + set(ALL_TEST_INFO_FILES ${ALL_TEST_INFO_FILES} PARENT_SCOPE) endfunction() if(LOCAL_NUNAVUT_VERIFICATION_TARGET_LANG STREQUAL "cpp") @@ -252,23 +253,31 @@ function(runTestC) set(NATIVE_TEST "${CMAKE_CURRENT_SOURCE_DIR}/c/suite/${runTestC_TEST_FILE}") get_filename_component(NATIVE_TEST_NAME ${NATIVE_TEST} NAME_WE) - define_native_unit_test(FRAMEWORK ${runTestC_FRAMEWORK} - TEST_NAME ${NATIVE_TEST_NAME} - TEST_SOURCE ${NATIVE_TEST} + define_native_unit_test( + FRAMEWORK ${runTestC_FRAMEWORK} + NAME ${NATIVE_TEST_NAME} + SOURCE ${NATIVE_TEST} OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} DSDL_TARGETS ${runTestC_LINK} ) - define_native_test_run(TEST_NAME ${NATIVE_TEST_NAME} OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR}) - define_native_test_run_with_lcov(${NATIVE_TEST_NAME} ${NUNAVUT_VERIFICATIONS_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/\\*) + define_native_test_run( + NAME ${NATIVE_TEST_NAME} + OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} + OUT_CUSTOM_TARGET LOCAL_TEST_RUN_TARGET + ) + define_coverage_native_test_run( + NAME ${NATIVE_TEST_NAME} + JOB_POOL coverage_test_runs + OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} + SOURCE_FILTER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/\\* + OUT_INFO_FILE LOCAL_TEST_INFO_FILE + ) define_native_test_coverage(${NATIVE_TEST_NAME} ${NUNAVUT_VERIFICATIONS_BINARY_DIR}) - list(APPEND ALL_TESTS "run_${NATIVE_TEST_NAME}") - list(APPEND ALL_TESTS_WITH_LCOV "run_${NATIVE_TEST_NAME}_with_lcov") - list(APPEND ALL_TEST_COVERAGE "--add-tracefile") - list(APPEND ALL_TEST_COVERAGE "${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.${NATIVE_TEST_NAME}.filtered.info") - set(ALL_TESTS ${ALL_TESTS} PARENT_SCOPE) - set(ALL_TESTS_WITH_LCOV ${ALL_TESTS_WITH_LCOV} PARENT_SCOPE) - set(ALL_TEST_COVERAGE ${ALL_TEST_COVERAGE} PARENT_SCOPE) + list(APPEND ALL_TEST_RUNS "${LOCAL_TEST_RUN_TARGET}") + list(APPEND ALL_TEST_INFO_FILES "${LOCAL_TEST_INFO_FILE}") + set(ALL_TEST_RUNS ${ALL_TEST_RUNS} PARENT_SCOPE) + set(ALL_TEST_INFO_FILES ${ALL_TEST_INFO_FILES} PARENT_SCOPE) endfunction() if(LOCAL_NUNAVUT_VERIFICATION_TARGET_LANG STREQUAL "c") @@ -284,70 +293,31 @@ endif() # +---------------------------------------------------------------------------+ # Finally, we setup an overall report. the coverage.info should be uploaded # to a coverage reporting service as part of the CI pipeline. -add_custom_command( - OUTPUT ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.all.info - COMMAND - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - ${ALL_TEST_COVERAGE} - --output-file ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.all.info - DEPENDS ${ALL_TESTS_WITH_LCOV} +define_coverage_summary( + INFO_FILES ${ALL_TEST_INFO_FILES} + OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR} + OUT_INFO_FILE LOCAL_INFO_SUMMARY ) -add_custom_command( - OUTPUT ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.info - COMMAND - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - --extract ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.all.info - ${LOCAL_PROJECT_ROOT}/\\* - --output-file ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.info - DEPENDS ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.all.info -) - -add_custom_target( - cov_info - DEPENDS ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.info -) +# This just gives us a utility to clean things up manually. You don't need to hook it up to anything. +define_coverage_zero_all(OUTDIR ${NUNAVUT_VERIFICATIONS_BINARY_DIR}) add_custom_target( cov_all - ${GENHTML} --title "${PROJECT_NAME} native test coverage" - --output-directory ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage/all - --demangle-cpp - --sort - --num-spaces 4 - --function-coverage - --branch-coverage - --legend - --highlight - --show-details - ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.info - DEPENDS ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.info + ${GENHTML} + --title "${PROJECT_NAME} native test coverage" + --output-directory ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage/all + --demangle-cpp + --sort + --num-spaces 4 + --function-coverage + --branch-coverage + --legend + --highlight + --show-details + ${LOCAL_INFO_SUMMARY} + DEPENDS ${LOCAL_INFO_SUMMARY} COMMENT "Build and run all tests and generate an overall html coverage report." ) -add_custom_target( - test_all - DEPENDS - ${ALL_TESTS} -) - -add_custom_target( - cov_all_archive - COMMAND - ${CMAKE_COMMAND} - -E tar - "cfv" - "coverage_all.zip" - --format=zip - "${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage/all" - DEPENDS - cov_all - BYPRODUCTS - "${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage_all.zip" - COMMENT - "Build and run all tests and generate an overall html coverage report as a zip archive." -) +add_custom_target(test_all DEPENDS ${ALL_TEST_RUNS}) diff --git a/verification/cmake/modules/Findlcov.cmake b/verification/cmake/modules/Findlcov.cmake deleted file mode 100644 index a7c3834e..00000000 --- a/verification/cmake/modules/Findlcov.cmake +++ /dev/null @@ -1,120 +0,0 @@ -# -# Find lcov and deal with clang weirdness. -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# - -find_program(LCOV lcov) - -if(LCOV) - - # +---------------------------------------------------------------------------+ - # What follows are some gymnastics to allow coverage reports to be generated - # using either gcc or clang but resulting in the same .info format. The - # consistent output is needed to ensure we can merge and compare coverage data - # regardless of the compiler used to create the tests. - - set(NUNAVUT_GOV_TOOL_ARG ) - - if (NUNAVUT_USE_LLVM_COV) - # Try to find llvm coverage. If we don't find it - # we'll simply omit the tool arg and hope that lcov - # can figure it out. - # We also add some hints to help on osx. You may need to install llvm from - # homebrew since it doesn't look like it comes with xcode. - find_program(LLVM_COV - NAMES - llvm-cov - llvm-cov-6.0 - HINTS - /usr/local/opt/llvm/bin - ) - - if (LLVM_COV) - message(STATUS "Generating an llvm-cov wrapper to enable lcov report generation from clang output.") - # Thanks to http://logan.tw/posts/2015/04/28/check-code-coverage-with-clang-and-lcov/ - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/gcov_tool.sh "#!/usr/bin/env bash\nexec ${LLVM_COV} gcov \"$@\"\n") - file(COPY ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/gcov_tool.sh - DESTINATION ${CMAKE_CURRENT_BINARY_DIR} - NO_SOURCE_PERMISSIONS - FILE_PERMISSIONS OWNER_READ - OWNER_WRITE - OWNER_EXECUTE - GROUP_READ - GROUP_EXECUTE - WORLD_READ - WORLD_EXECUTE) - set(NUNAVUT_GOV_TOOL_ARG "--gcov-tool" "${CMAKE_CURRENT_BINARY_DIR}/gcov_tool.sh") - else() - message(WARNING "llvm-cov was not found but we are compiling using clang. The coverage report build step may fail.") - endif() - endif() - # +---------------------------------------------------------------------------+ - - # - # function: define_native_test_run_with_lcov - creates a makefile target that will build and - # run individual unit tests. This also properly sets up the coverage counters. - # - # param: ARG_TEST_NAME string - The name of the test to run. A target will be created - # with the name run_${ARG_TEST_NAME}_with_lcov - # param: ARG_OUTDIR path - The path where the test binaries live. - # param: ARG_SOURCE_FILTER_DIR pattern - pattern for paths to include (exclusively) in the coverage - # data. - # - function(define_native_test_run_with_lcov ARG_TEST_NAME ARG_OUTDIR ARG_SOURCE_FILTER_DIR) - message(STATUS "Adding test ${ARG_TEST_NAME} for source ${ARG_SOURCE_FILTER_DIR}") - add_custom_command( - COMMAND # Reset coverage data - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --zerocounters - --directory ${CMAKE_CURRENT_BINARY_DIR} - COMMAND # Generate initial "zero coverage" data. - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - --capture - --initial - --directory ${CMAKE_CURRENT_BINARY_DIR} - --output-file ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.baseline.info - COMMAND - ${ARG_OUTDIR}/${ARG_TEST_NAME} - COMMAND # Generate coverage from tests. - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - --capture - --directory ${CMAKE_CURRENT_BINARY_DIR} - --test-name ${ARG_TEST_NAME} - --output-file ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.test.info - COMMAND # Combine all the test runs with the baseline - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - --add-tracefile ${NUNAVUT_VERIFICATIONS_BINARY_DIR}/coverage.baseline.info - --add-tracefile ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.test.info - --output-file ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.info - COMMAND # Filter only the interesting data - ${LCOV} - ${NUNAVUT_GOV_TOOL_ARG} - --rc lcov_branch_coverage=1 - --extract ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.info - ${ARG_SOURCE_FILTER_DIR} - --output-file ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.filtered.info - OUTPUT ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.filtered.info - DEPENDS ${ARG_TEST_NAME} - ) - - add_custom_target( - run_${ARG_TEST_NAME}_with_lcov - DEPENDS ${ARG_OUTDIR}/coverage.${ARG_TEST_NAME}.filtered.info - ) - - endfunction() - -endif() - -include(FindPackageHandleStandardArgs) - -find_package_handle_standard_args(lcov - LCOV_FOUND -) diff --git a/verification/cmake/modules/Findverification-coverage.cmake b/verification/cmake/modules/Findverification-coverage.cmake new file mode 100644 index 00000000..00fe599b --- /dev/null +++ b/verification/cmake/modules/Findverification-coverage.cmake @@ -0,0 +1,304 @@ +# +# Copyright (C) OpenCyphal Development Team +# Copyright Amazon.com Inc. or its affiliates. +# SPDX-License-Identifier: MIT +# + +find_program(LCOV lcov) + +if(LCOV) + + # +---------------------------------------------------------------------------+ + # What follows are some gymnastics to allow coverage reports to be generated + # using either gcc or clang but resulting in the same .info format. The + # consistent output is needed to ensure we can merge and compare coverage data + # regardless of the compiler used to create the tests. + + set(VERIFICATION_COVERAGE_GOV_TOOL_ARG ) + + if (VERIFICATION_COVERAGE_USE_LLVM_COV) + # Try to find llvm coverage. If we don't find it + # we'll simply omit the tool arg and hope that lcov + # can figure it out. + # We also add some hints to help on osx. You may need to install llvm from + # homebrew since it doesn't look like it comes with xcode. + find_program(LLVM_COV + NAMES + llvm-cov + llvm-cov-6.0 + HINTS + /usr/local/opt/llvm/bin + ) + + if (LLVM_COV) + message(STATUS "Generating an llvm-cov wrapper to enable lcov report generation from clang output.") + # Thanks to http://logan.tw/posts/2015/04/28/check-code-coverage-with-clang-and-lcov/ + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/gcov_tool.sh "#!/usr/bin/env bash\nexec ${LLVM_COV} gcov \"$@\"\n") + file(COPY ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/gcov_tool.sh + DESTINATION ${CMAKE_CURRENT_BINARY_DIR} + NO_SOURCE_PERMISSIONS + FILE_PERMISSIONS OWNER_READ + OWNER_WRITE + OWNER_EXECUTE + GROUP_READ + GROUP_EXECUTE + WORLD_READ + WORLD_EXECUTE) + set(VERIFICATION_COVERAGE_GOV_TOOL_ARG "--gcov-tool" "${CMAKE_CURRENT_BINARY_DIR}/gcov_tool.sh") + else() + message(WARNING "llvm-cov was not found but we are compiling using clang. The coverage report build step may fail.") + endif() + endif() + # +---------------------------------------------------------------------------+ + + # + # function: define_coverage_native_test_run - creates a makefile target that will build and + # run individual unit tests. This also properly sets up the coverage counters. + # + # param: NAME string - The name of the test to run. + # param: JOB_POOL optional[string] - The name of a Ninja job pool to add the custom command to. + # param: BASE_DIR optional[path] (default: CMAKE_CURRENT_BINARY_DIR) + # - The root path under which object files for the test can be found. As these are + # normally found under ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles its best to use + # CMAKE_CURRENT_BINARY_DIR as this value which is the default. This can, however + # cause problems when using Ninja Multi-Config. + # param: OUTDIR path - The path where the test binaries live and under which the info files will be + # generated. + # param: SOURCE_FILTER_DIR pattern - pattern for paths to include (exclusively) in the coverage + # data. For example, test/foo/* + # param: OUT_CUSTOM_TARGET - If set, this is the name of a local variable set in the calling (parent) scope + # that contains the name of the custom target (i.e. add_custom_target) defined by + # this method that will run the test and lcov to generate an info file. + # param: OUT_INFO_FILE - If set, this is the name of a local variable set in the calling (parent) scope + # that will contain the info file generated with coverage data from the test run. + # + function(define_coverage_native_test_run) + + # +-[input]----------------------------------------------------------------+ + set(options) + set(singleValueArgs + NAME + JOB_POOL + OUTDIR + SOURCE_FILTER_DIR + BASE_DIR + OUT_CUSTOM_TARGET + OUT_INFO_FILE + ) + set(multiValueArgs) + cmake_parse_arguments(PARSE_ARGV 0 ARG "${options}" "${singleValueArgs}" "${multiValueArgs}") + + if (NOT ARG_BASE_DIR) + set(ARG_BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + endif() + + # +-[body]-----------------------------------------------------------------+ + message(STATUS "Adding test ${ARG_NAME} for source ${ARG_SOURCE_FILTER_DIR} (${ARG_OUTDIR}/${ARG_NAME})") + + set(LOCAL_INFO_FILE "${ARG_OUTDIR}/coverage.${ARG_NAME}.filtered.info") + set(LOCAL_RUN_TARGET "run_${ARG_NAME}_with_lcov") + if (ARG_JOB_POOL) + list(APPEND LOCAL_JOB_POOL_ARG "JOB_POOL" ${ARG_JOB_POOL}) + else() + set(LOCAL_JOB_POOL_ARG) + endif() + + add_custom_command( + WORKING_DIRECTORY ${ARG_BASE_DIR} + ${LOCAL_JOB_POOL_ARG} + COMMAND # Reset coverage data + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --zerocounters + --directory ${ARG_BASE_DIR} + COMMAND # Generate initial "zero coverage" data. + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + --capture + --initial + --directory ${ARG_BASE_DIR} + --output-file ${ARG_OUTDIR}/coverage.baseline.info + COMMAND + ${ARG_OUTDIR}/${ARG_NAME} + COMMAND # Generate coverage from tests. + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + --capture + --directory ${ARG_BASE_DIR} + --test-name ${ARG_NAME} + --output-file ${ARG_OUTDIR}/coverage.${ARG_NAME}.test.info + COMMAND # Combine all the test runs with the baseline + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + --add-tracefile ${ARG_OUTDIR}/coverage.baseline.info + --add-tracefile ${ARG_OUTDIR}/coverage.${ARG_NAME}.test.info + --output-file ${ARG_OUTDIR}/coverage.${ARG_NAME}.info + COMMAND # Filter only the interesting data + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + --extract ${ARG_OUTDIR}/coverage.${ARG_NAME}.info + ${ARG_SOURCE_FILTER_DIR} + --output-file ${LOCAL_INFO_FILE} + OUTPUT ${LOCAL_INFO_FILE} + DEPENDS ${ARG_NAME} + ) + + add_custom_target(${LOCAL_RUN_TARGET} DEPENDS ${LOCAL_INFO_FILE}) + + # +-[OUT]---------------------------------------------------------------------+ + + if (ARG_OUT_CUSTOM_TARGET) + set(${ARG_OUT_CUSTOM_TARGET} "${LOCAL_RUN_TARGET}" PARENT_SCOPE) + endif() + + if (ARG_OUT_INFO_FILE) + set(${ARG_OUT_INFO_FILE} ${LOCAL_INFO_FILE} PARENT_SCOPE) + endif() + + endfunction() + + # + # function: define_coverage_summary - Runs lcov over info files generated by test runs defined by calls to + # define_coverage_native_test_run and creates a single, summarized coverage info file. + # + # Example Usage:: + # + # define_coverage_native_test_run( + # NAME my_test + # OUTDIR ${CMAKE_CURRENT_BINARY_DIR}/$ + # SOURCE_FILTER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/\\* + # OUT_INFO_FILE LOCAL_INFO_FILE + # ) + # + # list(APPEND ALL_INFO_FILES ${LOCAL_INFO_FILE}) + # + # # add other tests and append to LOCAL_INFO_FILE list. + # + # define_coverage_summary( + # INFO_FILES ${LOCAL_INFO_FILE} + # OUTDIR ${CMAKE_CURRENT_BINARY_DIR}/$ + # OUT_INFO_FILE LOCAL_SUMMARY_INFO_FILE + # ) + # # ${LOCAL_SUMMARY_INFO_FILE} can be used as input to genhtml or uploaded to a coverage index service like + # # coveralls.io + # + # param: INFO_FILES list[path] - A list of info files to include in the summary. + # param: OUTDIR path - The path where the info files live. + # param: OUT_CUSTOM_TARGET - If set, this is the name of a local variable set in the calling (parent) scope + # that contains the name of a custom target (i.e. add_custom_target) defined by + # this method that run the info summary rule. + # param: OUT_INFO_FILE - If set, this is the name of a local variable set in the calling (parent) scope + # that will contain the info file generated with coverage data from the test run. + # + function(define_coverage_summary) + + # +-[inputs]---------------------------------------------------------------+ + set(options) + set(singleValueArgs + OUTDIR + OUT_INFO_FILE + OUT_CUSTOM_TARGET + ) + set(multiValueArgs + INFO_FILES + ) + cmake_parse_arguments(PARSE_ARGV 0 ARG "${options}" "${singleValueArgs}" "${multiValueArgs}") + + # +-[body]-----------------------------------------------------------------+ + + set(LOCAL_ALL_INFO_FILE "${ARG_OUTDIR}/coverage.all.info") + set(LOCAL_FILTERED_INFO_FILE "${ARG_OUTDIR}/coverage.info") + list(REMOVE_DUPLICATES ARG_INFO_FILES) + set(LOCAL_ADD_TRACEFILE_ARGS) + foreach(LOCAL_INFO_FILE ${ARG_INFO_FILES}) + list(APPEND LOCAL_ADD_TRACEFILE_ARGS --add-tracefile ${LOCAL_INFO_FILE}) + endforeach() + + add_custom_command( + OUTPUT ${LOCAL_ALL_INFO_FILE} + COMMAND + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + ${LOCAL_ADD_TRACEFILE_ARGS} + --output-file ${LOCAL_ALL_INFO_FILE} + DEPENDS ${ARG_INFO_FILES} + ) + + add_custom_command( + OUTPUT ${LOCAL_FILTERED_INFO_FILE} + COMMAND + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --rc lcov_branch_coverage=1 + --extract ${LOCAL_ALL_INFO_FILE} + ${LOCAL_PROJECT_ROOT}/\\* + --output-file ${LOCAL_FILTERED_INFO_FILE} + DEPENDS ${LOCAL_ALL_INFO_FILE} + ) + + add_custom_target( + cov_info + DEPENDS ${LOCAL_FILTERED_INFO_FILE} + ) + + # +-[outputs]--------------------------------------------------------------+ + if (ARG_OUT_CUSTOM_TARGET) + set(${ARG_OUT_CUSTOM_TARGET} "cov_info" PARENT_SCOPE) + endif() + + if (ARG_OUT_INFO_FILE) + set(${ARG_OUT_INFO_FILE} ${LOCAL_FILTERED_INFO_FILE} PARENT_SCOPE) + endif() + + endfunction() + + + + # + # function: define_coverage_zero_all - Defines a custom rule to zero out all counter under a given directory. + # + # param: OUTDIR path - The path where the info files live. + # param: OUT_CUSTOM_TARGET - If set, this is the name of a local variable set in the calling (parent) scope + # that contains the name of a custom target (i.e. add_custom_target) defined by + # this method that run the info summary rule. + # + function(define_coverage_zero_all) + # +-[inputs]---------------------------------------------------------------+ + set(options) + set(singleValueArgs + OUTDIR + OUT_CUSTOM_TARGET + ) + set(multiValueArgs) + cmake_parse_arguments(PARSE_ARGV 0 ARG "${options}" "${singleValueArgs}" "${multiValueArgs}") + + # +-[body]-----------------------------------------------------------------+ + + add_custom_target( + cov_zero + ${LCOV} + ${VERIFICATION_COVERAGE_GOV_TOOL_ARG} + --zerocounters + --directory ${ARG_OUTDIR} + COMMENT "Resetting coverage counters under ${ARG_OUTDIR}" + ) + + # +-[outputs]--------------------------------------------------------------+ + if (ARG_OUT_CUSTOM_TARGET) + set(${ARG_OUT_CUSTOM_TARGET} "cov_zero" PARENT_SCOPE) + endif() + + endfunction() + +endif() + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(verification-coverage + LCOV_FOUND +) diff --git a/verification/cmake/toolchains/clang-native.cmake b/verification/cmake/toolchains/clang-native.cmake index f6e704b7..55d1701f 100644 --- a/verification/cmake/toolchains/clang-native.cmake +++ b/verification/cmake/toolchains/clang-native.cmake @@ -8,4 +8,4 @@ set(CMAKE_C_COMPILER clang CACHE FILEPATH "C compiler") set(CMAKE_CXX_COMPILER clang++ CACHE FILEPATH "C++ compiler") set(CMAKE_ASM_COMPILER clang CACHE FILEPATH "assembler") -set(NUNAVUT_USE_LLVM_COV ON CACHE BOOL "Enable gcov compatibility with lcov coverage tools.") +set(VERIFICATION_COVERAGE_USE_LLVM_COV ON CACHE BOOL "Enable gcov compatibility with lcov coverage tools.") diff --git a/verification/cmake/toolchains/gcc-native.cmake b/verification/cmake/toolchains/gcc-native.cmake index 10293003..235d44be 100644 --- a/verification/cmake/toolchains/gcc-native.cmake +++ b/verification/cmake/toolchains/gcc-native.cmake @@ -7,4 +7,4 @@ set(CMAKE_C_COMPILER gcc CACHE FILEPATH "C compiler") set(CMAKE_CXX_COMPILER g++ CACHE FILEPATH "C++ compiler") set(CMAKE_ASM_COMPILER gcc CACHE FILEPATH "assembler") -set(NUNAVUT_USE_LLVM_COV OFF CACHE BOOL "Enable gcov compatibility with lcov coverage tools.") +set(VERIFICATION_COVERAGE_USE_LLVM_COV OFF CACHE BOOL "Enable gcov compatibility with lcov coverage tools.") diff --git a/verification/cmake/utils.cmake b/verification/cmake/utils.cmake index c673fed0..d933cc15 100644 --- a/verification/cmake/utils.cmake +++ b/verification/cmake/utils.cmake @@ -12,8 +12,8 @@ # to the "all" target to build a gtest binary for the given test source. # # param: FRAMEWORK [gtest|unity] - The name of the test framework to use. -# param: TEST_NAME string - The name to give the test target. -# param: TEST_SOURCE List[path] - A list of source files to compile into +# param: NAME string - The name to give the test target. +# param: SOURCE List[path] - A list of source files to compile into # the test binary. # param: OUTDIR path - A path to output test binaries and coverage data under. # param: DSDL_TARGETS List[str] - Zero to many targets that generate types under test. @@ -22,8 +22,8 @@ function(define_native_unit_test) # +--[ INPUTS ]-----------------------------------------------------------+ set(options "") - set(monoValues FRAMEWORK TEST_NAME OUTDIR) - set(multiValues TEST_SOURCE DSDL_TARGETS) + set(monoValues FRAMEWORK NAME OUTDIR) + set(multiValues SOURCE DSDL_TARGETS) cmake_parse_arguments( ARG @@ -34,44 +34,44 @@ function(define_native_unit_test) ) # +--[ BODY ]------------------------------------------------------------+ - add_executable(${ARG_TEST_NAME} ${ARG_TEST_SOURCE}) + add_executable(${ARG_NAME} ${ARG_SOURCE}) if (ARG_DSDL_TARGETS) - add_dependencies(${ARG_TEST_NAME} ${ARG_DSDL_TARGETS}) - target_link_libraries(${ARG_TEST_NAME} PUBLIC ${ARG_DSDL_TARGETS}) + add_dependencies(${ARG_NAME} ${ARG_DSDL_TARGETS}) + target_link_libraries(${ARG_NAME} PUBLIC ${ARG_DSDL_TARGETS}) endif() if (${ARG_FRAMEWORK} STREQUAL "gtest") - target_link_libraries(${ARG_TEST_NAME} PUBLIC gmock_main) + target_link_libraries(${ARG_NAME} PUBLIC gmock_main) elseif (${ARG_FRAMEWORK} STREQUAL "unity") - target_link_libraries(${ARG_TEST_NAME} PUBLIC unity_core) + target_link_libraries(${ARG_NAME} PUBLIC unity_core) elseif (${ARG_FRAMEWORK} STREQUAL "none") - message(STATUS "${ARG_TEST_NAME}: No test framework") - target_compile_options(${ARG_TEST_NAME} PRIVATE "$<$:-fanalyzer>") - target_compile_options(${ARG_TEST_NAME} PRIVATE "$<$:-fanalyzer-checker=taint>") + message(STATUS "${ARG_NAME}: No test framework") + target_compile_options(${ARG_NAME} PRIVATE "$<$:-fanalyzer>") + target_compile_options(${ARG_NAME} PRIVATE "$<$:-fanalyzer-checker=taint>") else() message(FATAL_ERROR "${ARG_FRAMEWORK} isn't a supported unit test framework. Currently we support gtest and unity.") endif() - set_target_properties(${ARG_TEST_NAME} + set_target_properties(${ARG_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${ARG_OUTDIR}" ) - add_custom_command(OUTPUT ${ARG_OUTDIR}/${ARG_TEST_NAME}-disassembly.S - DEPENDS ${ARG_TEST_NAME} - COMMAND ${CMAKE_OBJDUMP} -d ${ARG_OUTDIR}/${ARG_TEST_NAME} + add_custom_command(OUTPUT ${ARG_OUTDIR}/${ARG_NAME}-disassembly.S + DEPENDS ${ARG_NAME} + COMMAND ${CMAKE_OBJDUMP} -d ${ARG_OUTDIR}/${ARG_NAME} --demangle --disassemble-zeroes --disassembler-options=reg-names-std --syms --special-syms --all-headers - --wide > ${ARG_OUTDIR}/${ARG_TEST_NAME}-disassembly.S - COMMENT "Creating disassembly from ${ARG_TEST_NAME}" + --wide > ${ARG_OUTDIR}/${ARG_NAME}-disassembly.S + COMMENT "Creating disassembly from ${ARG_NAME}" ) - add_custom_target(${ARG_TEST_NAME}-disassembly DEPENDS ${ARG_OUTDIR}/${ARG_TEST_NAME}-disassembly.S) + add_custom_target(${ARG_NAME}-disassembly DEPENDS ${ARG_OUTDIR}/${ARG_NAME}-disassembly.S) endfunction() @@ -80,14 +80,15 @@ endfunction() # function: define_native_test_run - creates a makefile target that will build and # run individual unit tests. # -# param: TEST_NAME string - The name of the test to run. A target will be created -# with the name run_${ARG_TEST_NAME} -# param: OUTDIR path - The path where the test binaries live. +# param: NAME string - The name of the test to run. +# param: OUTDIR path - The path where the test binaries live. +# param: OUT_CUSTOM_TARGET - A variable to set in the parent scope with the name of the custom target +# defined by this function. # function(define_native_test_run) # +--[ INPUTS ]-----------------------------------------------------------+ set(options "") - set(monoValues TEST_NAME OUTDIR) + set(monoValues NAME OUTDIR OUT_CUSTOM_TARGET) set(multiValues "") cmake_parse_arguments( @@ -98,15 +99,19 @@ function(define_native_test_run) ${ARGN} ) - # +--[ BODY ]------------------------------------------------------------+ + # +--[ BODY ]-------------------------------------------------------------+ add_custom_target( - run_${ARG_TEST_NAME} + run_${ARG_NAME} COMMAND - ${ARG_OUTDIR}/${ARG_TEST_NAME} + ${ARG_OUTDIR}/${ARG_NAME} DEPENDS - ${ARG_TEST_NAME} + ${ARG_NAME} ) + # +--[ OUTPUTS ]----------------------------------------------------------+ + + set(${ARG_OUT_CUSTOM_TARGET} "run_${ARG_NAME}" PARENT_SCOPE) + endfunction() #