Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow building of tests in build_release script #1555

Merged
merged 2 commits into from
Feb 20, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 105 additions & 28 deletions workspace_tools/build_release.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@
"""
import sys
from time import time
from os.path import join, abspath, dirname
from os.path import join, abspath, dirname, normpath
from optparse import OptionParser
import json

# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
Expand All @@ -28,6 +29,10 @@
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
from workspace_tools.paths import TEST_DIR
from workspace_tools.tests import TEST_MAP

OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC11U24', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
Expand Down Expand Up @@ -153,51 +158,123 @@

parser.add_option("-p", "--platforms", dest="platforms", default="", help="Build only for the platform namesseparated by comma")

parser.add_option("-L", "--list-config", action="store_true", dest="list_config",
default=False, help="List the platforms and toolchains in the release in JSON")

parser.add_option("", "--report-build", dest="report_build_file_name", help="Output the build results to an junit xml file")

parser.add_option("", "--build-tests", dest="build_tests", help="Build all tests in the given directories (relative to /libraries/tests)")

options, args = parser.parse_args()
start = time()
report = {}
properties = {}

platforms = None
if options.platforms != "":
platforms = set(options.platforms.split(","))
options, args = parser.parse_args()

for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue

if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list

if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
if options.list_config:
print json.dumps(OFFICIAL_MBED_LIBRARY_BUILD, indent=4)
sys.exit()

for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)
start = time()
build_report = {}
build_properties = {}

try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs, report=report, properties=properties)
platforms = None
if options.platforms != "":
platforms = set(options.platforms.split(","))

except Exception, e:
print str(e)
if options.build_tests:
# Get all paths
directories = options.build_tests.split(',')
for i in range(len(directories)):
directories[i] = normpath(join(TEST_DIR, directories[i]))

test_names = []

for test_id in TEST_MAP.keys():
# Prevents tests with multiple source dirs from being checked
if isinstance( TEST_MAP[test_id].source_dir, basestring):
test_path = normpath(TEST_MAP[test_id].source_dir)
for directory in directories:
if directory in test_path:
test_names.append(test_id)

mut_counter = 1
mut = {}
test_spec = {
"targets": {}
}

for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
toolchains = None
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue

if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list

if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))

mut[str(mut_counter)] = {
"mcu": target_name
}

mut_counter += 1

test_spec["targets"][target_name] = toolchains

single_test = SingleTestRunner(_muts=mut,
_opts_report_build_file_name=options.report_build_file_name,
_test_spec=test_spec,
_opts_test_by_names=",".join(test_names),
_opts_verbose=options.verbose,
_opts_only_build_tests=True,
_opts_suppress_summary=True,
_opts_jobs=options.jobs,
_opts_include_non_automated=True,
_opts_build_report=build_report,
_opts_build_properties=build_properties)
# Runs test suite in CLI mode
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, new_build_report, new_build_properties = single_test.execute()
else:
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue

if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list

if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))

for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)

try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs, report=build_report, properties=build_properties)

except Exception, e:
print str(e)

# Write summary of the builds
if options.report_build_file_name:
file_report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
file_report_exporter.report_to_file(report, options.report_build_file_name, test_suite_properties=properties)
file_report_exporter.report_to_file(build_report, options.report_build_file_name, test_suite_properties=build_properties)

print "\n\nCompleted in: (%.2f)s" % (time() - start)

print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(report)
status = print_report_exporter.report(build_report)

if not status:
sys.exit(1)
28 changes: 19 additions & 9 deletions workspace_tools/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,8 @@ def __init__(self,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
Expand All @@ -185,7 +187,8 @@ def __init__(self,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None):
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
Expand Down Expand Up @@ -241,6 +244,10 @@ def __init__(self,
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated

self.build_report = _opts_build_report
self.build_properties = _opts_build_properties

# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
Expand Down Expand Up @@ -382,7 +389,7 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();

valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)

for skipped_test_id in skipped_test_map_keys:
Expand Down Expand Up @@ -560,8 +567,6 @@ def execute(self):
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)

build_report = {}
build_properties = {}

if self.opts_parallel_test_exec:
###################################################################
Expand All @@ -575,7 +580,7 @@ def execute(self):
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, build_report, build_properties))
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
Expand All @@ -588,7 +593,7 @@ def execute(self):
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}

self.execute_thread_slice(q, target, toolchains, clean, test_ids, build_report, build_properties)
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()

if self.db_logger:
Expand All @@ -597,9 +602,9 @@ def execute(self):
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();

return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report, build_properties
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties

def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []

for test_id in test_map_keys:
Expand All @@ -626,7 +631,12 @@ def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue

if test.automated and test.is_supported(target, toolchain):
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
continue

if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
Expand Down