Merge pull request #1555 from bridadan/release-build-tests

Allow building of tests in build_release script
pull/1542/merge
Martin Kojtal 2016-02-20 08:03:08 +00:00
commit ba89da08bc
2 changed files with 119 additions and 32 deletions

View File

@ -17,8 +17,9 @@ limitations under the License.
"""
import sys
from time import time
from os.path import join, abspath, dirname
from os.path import join, abspath, dirname, normpath
from optparse import OptionParser
import json
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
@ -28,6 +29,10 @@ from workspace_tools.build_api import build_mbed_libs
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
from workspace_tools.paths import TEST_DIR
from workspace_tools.tests import TEST_MAP
OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC11U24', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
@ -153,51 +158,123 @@ if __name__ == '__main__':
parser.add_option("-p", "--platforms", dest="platforms", default="", help="Build only for the platform namesseparated by comma")
parser.add_option("-L", "--list-config", action="store_true", dest="list_config",
default=False, help="List the platforms and toolchains in the release in JSON")
parser.add_option("", "--report-build", dest="report_build_file_name", help="Output the build results to an junit xml file")
parser.add_option("", "--build-tests", dest="build_tests", help="Build all tests in the given directories (relative to /libraries/tests)")
options, args = parser.parse_args()
if options.list_config:
print json.dumps(OFFICIAL_MBED_LIBRARY_BUILD, indent=4)
sys.exit()
start = time()
report = {}
properties = {}
build_report = {}
build_properties = {}
platforms = None
if options.platforms != "":
platforms = set(options.platforms.split(","))
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue
if options.build_tests:
# Get all paths
directories = options.build_tests.split(',')
for i in range(len(directories)):
directories[i] = normpath(join(TEST_DIR, directories[i]))
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
test_names = []
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
for test_id in TEST_MAP.keys():
# Prevents tests with multiple source dirs from being checked
if isinstance( TEST_MAP[test_id].source_dir, basestring):
test_path = normpath(TEST_MAP[test_id].source_dir)
for directory in directories:
if directory in test_path:
test_names.append(test_id)
for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)
mut_counter = 1
mut = {}
test_spec = {
"targets": {}
}
try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs, report=report, properties=properties)
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
toolchains = None
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue
except Exception, e:
print str(e)
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
mut[str(mut_counter)] = {
"mcu": target_name
}
mut_counter += 1
test_spec["targets"][target_name] = toolchains
single_test = SingleTestRunner(_muts=mut,
_opts_report_build_file_name=options.report_build_file_name,
_test_spec=test_spec,
_opts_test_by_names=",".join(test_names),
_opts_verbose=options.verbose,
_opts_only_build_tests=True,
_opts_suppress_summary=True,
_opts_jobs=options.jobs,
_opts_include_non_automated=True,
_opts_build_report=build_report,
_opts_build_properties=build_properties)
# Runs test suite in CLI mode
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, new_build_report, new_build_properties = single_test.execute()
else:
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)
try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs, report=build_report, properties=build_properties)
except Exception, e:
print str(e)
# Write summary of the builds
if options.report_build_file_name:
file_report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
file_report_exporter.report_to_file(report, options.report_build_file_name, test_suite_properties=properties)
file_report_exporter.report_to_file(build_report, options.report_build_file_name, test_suite_properties=build_properties)
print "\n\nCompleted in: (%.2f)s" % (time() - start)
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(report)
status = print_report_exporter.report(build_report)
if not status:
sys.exit(1)

View File

@ -162,6 +162,8 @@ class SingleTestRunner(object):
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
@ -185,7 +187,8 @@ class SingleTestRunner(object):
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None):
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
@ -241,6 +244,10 @@ class SingleTestRunner(object):
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
@ -382,7 +389,7 @@ class SingleTestRunner(object):
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
@ -560,8 +567,6 @@ class SingleTestRunner(object):
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
build_report = {}
build_properties = {}
if self.opts_parallel_test_exec:
###################################################################
@ -575,7 +580,7 @@ class SingleTestRunner(object):
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, build_report, build_properties))
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
@ -588,7 +593,7 @@ class SingleTestRunner(object):
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, build_report, build_properties)
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
@ -597,9 +602,9 @@ class SingleTestRunner(object):
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report, build_properties
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
@ -626,7 +631,12 @@ class SingleTestRunner(object):
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if test.automated and test.is_supported(target, toolchain):
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default