mirror of https://github.com/ARMmbed/mbed-os.git
Reorganized test_api.py to build all required libraries (RTOS, Ethernet, etc) for tests up front, before any tests are built
parent
4b0926fcdb
commit
cb4e13c1e3
|
@ -209,6 +209,7 @@ if __name__ == '__main__':
|
|||
_opts_log_file_name=opts.log_file_name,
|
||||
_opts_report_html_file_name=opts.report_html_file_name,
|
||||
_opts_report_junit_file_name=opts.report_junit_file_name,
|
||||
_opts_report_jenkins_file_name=opts.report_jenkins_file_name,
|
||||
_test_spec=test_spec,
|
||||
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
|
||||
_opts_goanna_for_tests=opts.goanna_for_tests,
|
||||
|
|
|
@ -48,6 +48,7 @@ from workspace_tools.targets import TARGET_MAP
|
|||
from workspace_tools.test_db import BaseDBAccess
|
||||
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
|
||||
from workspace_tools.build_api import get_target_supported_toolchains
|
||||
from workspace_tools.build_api import write_build_report
|
||||
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
|
||||
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
|
||||
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
|
||||
|
@ -153,6 +154,7 @@ class SingleTestRunner(object):
|
|||
_opts_log_file_name=None,
|
||||
_opts_report_html_file_name=None,
|
||||
_opts_report_junit_file_name=None,
|
||||
_opts_report_jenkins_file_name=None,
|
||||
_test_spec={},
|
||||
_opts_goanna_for_mbed_sdk=None,
|
||||
_opts_goanna_for_tests=None,
|
||||
|
@ -205,6 +207,7 @@ class SingleTestRunner(object):
|
|||
self.opts_log_file_name = _opts_log_file_name
|
||||
self.opts_report_html_file_name = _opts_report_html_file_name
|
||||
self.opts_report_junit_file_name = _opts_report_junit_file_name
|
||||
self.opts_report_jenkins_file_name = _opts_report_jenkins_file_name
|
||||
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
|
||||
self.opts_goanna_for_tests = _opts_goanna_for_tests
|
||||
self.opts_shuffle_test_order = _opts_shuffle_test_order
|
||||
|
@ -294,8 +297,17 @@ class SingleTestRunner(object):
|
|||
test_summary_ext = {}
|
||||
execute_thread_slice_lock = Lock()
|
||||
|
||||
def execute_thread_slice(self, q, target, toolchains, clean, test_ids):
|
||||
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
|
||||
for toolchain in toolchains:
|
||||
# Toolchain specific build successes and failures
|
||||
build_report[toolchain] = {
|
||||
"mbed_failure": False,
|
||||
"library_failure": False,
|
||||
"library_build_successes": [],
|
||||
"library_build_failures": [],
|
||||
"test_build_successes": [],
|
||||
"test_build_failures": []
|
||||
}
|
||||
# print target, toolchain
|
||||
# Test suite properties returned to external tools like CI
|
||||
test_suite_properties = {}
|
||||
|
@ -306,6 +318,7 @@ class SingleTestRunner(object):
|
|||
test_suite_properties['toolchain'] = toolchain
|
||||
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
|
||||
|
||||
|
||||
# print '=== %s::%s ===' % (target, toolchain)
|
||||
# Let's build our test
|
||||
if target not in TARGET_MAP:
|
||||
|
@ -328,9 +341,9 @@ class SingleTestRunner(object):
|
|||
continue
|
||||
except ToolException:
|
||||
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
|
||||
build_report[toolchain]["mbed_failure"] = True
|
||||
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
|
||||
q.put(target + '_'.join(toolchains))
|
||||
return
|
||||
continue
|
||||
|
||||
build_dir = join(BUILD_DIR, "test", target, toolchain)
|
||||
|
||||
|
@ -340,6 +353,7 @@ class SingleTestRunner(object):
|
|||
|
||||
# Enumerate through all tests and shuffle test order if requested
|
||||
test_map_keys = sorted(TEST_MAP.keys())
|
||||
|
||||
if self.opts_shuffle_test_order:
|
||||
random.shuffle(test_map_keys, self.shuffle_random_func)
|
||||
# Update database with shuffle seed f applicable
|
||||
|
@ -358,61 +372,29 @@ class SingleTestRunner(object):
|
|||
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
|
||||
self.db_logger.disconnect();
|
||||
|
||||
for test_id in test_map_keys:
|
||||
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
|
||||
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
|
||||
|
||||
for skipped_test_id in skipped_test_map_keys:
|
||||
test_suite_properties['skipped'].append(skipped_test_id)
|
||||
|
||||
|
||||
# First pass through all tests and determine which libraries need to be built
|
||||
libraries = set()
|
||||
for test_id in valid_test_map_keys:
|
||||
test = TEST_MAP[test_id]
|
||||
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
|
||||
continue
|
||||
|
||||
if test_ids and test_id not in test_ids:
|
||||
continue
|
||||
# Detect which lib should be added to test
|
||||
# Some libs have to compiled like RTOS or ETH
|
||||
for lib in LIBRARIES:
|
||||
if lib['build_dir'] in test.dependencies:
|
||||
libraries.add(lib['id'])
|
||||
|
||||
if self.opts_test_only_peripheral and not test.peripherals:
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
|
||||
test_suite_properties['skipped'].append(test_id)
|
||||
continue
|
||||
|
||||
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
|
||||
# We will skip tests not forced with -p option
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
|
||||
test_suite_properties['skipped'].append(test_id)
|
||||
continue
|
||||
|
||||
if self.opts_test_only_common and test.peripherals:
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
|
||||
test_suite_properties['skipped'].append(test_id)
|
||||
continue
|
||||
|
||||
if test.automated and test.is_supported(target, toolchain):
|
||||
if test.peripherals is None and self.opts_only_build_tests:
|
||||
# When users are using 'build only flag' and test do not have
|
||||
# specified peripherals we can allow test building by default
|
||||
pass
|
||||
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
|
||||
# If we force peripheral with option -p we expect test
|
||||
# to pass even if peripheral is not in MUTs file.
|
||||
pass
|
||||
elif not self.is_peripherals_available(target, test.peripherals):
|
||||
if self.opts_verbose_skipped_tests:
|
||||
if test.peripherals:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
|
||||
else:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
|
||||
test_suite_properties['skipped'].append(test_id)
|
||||
continue
|
||||
|
||||
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
|
||||
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
|
||||
|
||||
# Detect which lib should be added to test
|
||||
# Some libs have to compiled like RTOS or ETH
|
||||
libraries = []
|
||||
for lib in LIBRARIES:
|
||||
if lib['build_dir'] in test.dependencies:
|
||||
libraries.append(lib['id'])
|
||||
# Build libs for test
|
||||
# Build all required libraries
|
||||
for lib_id in libraries:
|
||||
try:
|
||||
build_lib(lib_id,
|
||||
|
@ -424,9 +406,16 @@ class SingleTestRunner(object):
|
|||
jobs=self.opts_jobs)
|
||||
except ToolException:
|
||||
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
|
||||
build_report[toolchain]["library_failure"] = True
|
||||
build_report[toolchain]["library_build_failures"].append(lib_id)
|
||||
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
|
||||
q.put(target + '_'.join(toolchains))
|
||||
return
|
||||
continue
|
||||
|
||||
build_report[toolchain]["library_build_successes"].append(lib_id)
|
||||
|
||||
|
||||
for test_id in valid_test_map_keys:
|
||||
test = TEST_MAP[test_id]
|
||||
|
||||
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
|
||||
|
||||
|
@ -461,10 +450,14 @@ class SingleTestRunner(object):
|
|||
jobs=self.opts_jobs)
|
||||
except ToolException:
|
||||
project_name_str = project_name if project_name is not None else test_id
|
||||
print "DIS BE MAH ERRRRR: %s" % (str(ToolException))
|
||||
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
|
||||
build_report[toolchain]["test_build_failures"].append(test_id)
|
||||
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
|
||||
q.put(target + '_'.join(toolchains))
|
||||
return
|
||||
continue
|
||||
|
||||
build_report[toolchain]["test_build_successes"].append(test_id)
|
||||
|
||||
if self.opts_only_build_tests:
|
||||
# With this option we are skipping testing phase
|
||||
continue
|
||||
|
@ -500,6 +493,7 @@ class SingleTestRunner(object):
|
|||
|
||||
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
|
||||
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
|
||||
|
||||
# return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
|
||||
q.put(target + '_'.join(toolchains))
|
||||
return
|
||||
|
@ -514,6 +508,8 @@ class SingleTestRunner(object):
|
|||
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
|
||||
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
|
||||
|
||||
build_reports = []
|
||||
|
||||
if self.opts_parallel_test_exec:
|
||||
###################################################################
|
||||
# Experimental, parallel test execution per singletest instance.
|
||||
|
@ -526,7 +522,9 @@ class SingleTestRunner(object):
|
|||
# get information about available MUTs (per target).
|
||||
for target, toolchains in self.test_spec['targets'].iteritems():
|
||||
self.test_suite_properties_ext[target] = {}
|
||||
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids))
|
||||
cur_build_report = {}
|
||||
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
|
||||
build_reports.append({ "target": target, "report": cur_build_report})
|
||||
t.daemon = True
|
||||
t.start()
|
||||
execute_threads.append(t)
|
||||
|
@ -538,16 +536,107 @@ class SingleTestRunner(object):
|
|||
for target, toolchains in self.test_spec['targets'].iteritems():
|
||||
if target not in self.test_suite_properties_ext:
|
||||
self.test_suite_properties_ext[target] = {}
|
||||
self.execute_thread_slice(q, target, toolchains, clean, test_ids)
|
||||
|
||||
cur_build_report = {}
|
||||
self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
|
||||
build_reports.append({ "target": target, "report": cur_build_report})
|
||||
q.get()
|
||||
|
||||
print build_reports
|
||||
|
||||
build_report = []
|
||||
|
||||
for target_build_report in build_reports:
|
||||
cur_report = {
|
||||
"target": target_build_report["target"],
|
||||
"successes": [],
|
||||
"failures": []
|
||||
}
|
||||
|
||||
for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
|
||||
print "%s - %s" % (target_build_report["target"], toolchain)
|
||||
report = target_build_report["report"][toolchain]
|
||||
|
||||
if report["mbed_failure"]:
|
||||
cur_report["failures"].append("mbed::%s" % (toolchain))
|
||||
elif report["library_failure"]:
|
||||
for library in report["library_build_failures"]:
|
||||
cur_report["failures"].append("Library::%s::%s" % (library, toolchain))
|
||||
else:
|
||||
cur_report["successes"].append("All Libraries::%s" % (toolchain))
|
||||
|
||||
if len(report["test_build_failures"]) > 0:
|
||||
for successful_test in report["test_build_successes"]:
|
||||
cur_report["successes"].append("Test::%s::%s" % (toolchain, successful_test))
|
||||
|
||||
for failed_test in report["test_build_failures"]:
|
||||
cur_report["failures"].append("Test::%s::%s" % (toolchain, failed_test))
|
||||
else:
|
||||
cur_report["successes"].append("All Tests::%s" % (toolchain))
|
||||
|
||||
|
||||
build_report.append(cur_report)
|
||||
|
||||
if self.db_logger:
|
||||
self.db_logger.reconnect();
|
||||
if self.db_logger.is_connected():
|
||||
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
|
||||
self.db_logger.disconnect();
|
||||
|
||||
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
|
||||
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
|
||||
|
||||
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
|
||||
valid_test_map_keys = []
|
||||
|
||||
for test_id in test_map_keys:
|
||||
test = TEST_MAP[test_id]
|
||||
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
|
||||
continue
|
||||
|
||||
if test_ids and test_id not in test_ids:
|
||||
continue
|
||||
|
||||
if self.opts_test_only_peripheral and not test.peripherals:
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
|
||||
continue
|
||||
|
||||
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
|
||||
# We will skip tests not forced with -p option
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
|
||||
continue
|
||||
|
||||
if self.opts_test_only_common and test.peripherals:
|
||||
if self.opts_verbose_skipped_tests:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
|
||||
continue
|
||||
|
||||
if test.automated and test.is_supported(target, toolchain):
|
||||
if test.peripherals is None and self.opts_only_build_tests:
|
||||
# When users are using 'build only flag' and test do not have
|
||||
# specified peripherals we can allow test building by default
|
||||
pass
|
||||
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
|
||||
# If we force peripheral with option -p we expect test
|
||||
# to pass even if peripheral is not in MUTs file.
|
||||
pass
|
||||
elif not self.is_peripherals_available(target, test.peripherals):
|
||||
if self.opts_verbose_skipped_tests:
|
||||
if test.peripherals:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
|
||||
else:
|
||||
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
|
||||
continue
|
||||
|
||||
# The test has made it through all the filters, so add it to the valid tests list
|
||||
valid_test_map_keys.append(test_id)
|
||||
|
||||
return valid_test_map_keys
|
||||
|
||||
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
|
||||
# NOTE: This will not preserve order
|
||||
return list(set(all_test_map_keys) - set(valid_test_map_keys))
|
||||
|
||||
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
|
||||
""" Prints well-formed summary with results (SQL table like)
|
||||
|
@ -1314,7 +1403,7 @@ def singletest_in_cli_mode(single_test):
|
|||
"""
|
||||
start = time()
|
||||
# Execute tests depending on options and filter applied
|
||||
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = single_test.execute()
|
||||
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
|
||||
elapsed_time = time() - start
|
||||
|
||||
# Human readable summary
|
||||
|
@ -1333,9 +1422,12 @@ def singletest_in_cli_mode(single_test):
|
|||
report_exporter = ReportExporter(ResultExporterType.HTML)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
|
||||
if single_test.opts_report_junit_file_name:
|
||||
# Export results in form of HTML report to separate file
|
||||
# Export results in form of JUnit XML report to separate file
|
||||
report_exporter = ReportExporter(ResultExporterType.JUNIT)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
|
||||
if single_test.opts_report_jenkins_file_name:
|
||||
# Export build results as Jenkins XML report ti sparate file
|
||||
write_build_report(build_report, single_test.opts_report_jenkins_file_name)
|
||||
|
||||
|
||||
class TestLogger():
|
||||
|
@ -1706,6 +1798,10 @@ def get_default_test_options_parser():
|
|||
dest='report_junit_file_name',
|
||||
help='You can log test suite results in form of JUnit compliant XML report')
|
||||
|
||||
parser.add_option("", "--report-jenkins",
|
||||
dest="report_jenkins_file_name",
|
||||
help="Output the build results to an xml file that is readable by Jenkins")
|
||||
|
||||
parser.add_option('', '--verbose-skipped',
|
||||
dest='verbose_skipped_tests',
|
||||
default=False,
|
||||
|
|
Loading…
Reference in New Issue