Merge pull request #1040 from bridadan/ci

Continuous Integration Features
pull/1061/head
Martin Kojtal 2015-04-21 08:54:42 +01:00
commit 289b8064e0
12 changed files with 466 additions and 140 deletions

View File

@ -1,8 +1,9 @@
---
python:
---
python:
- "2.7"
script: "python workspace_tools/build_travis.py"
install:
- "sudo $TRAVIS_BUILD_DIR/travis/install_dependencies.sh > /dev/null"
- sudo pip install colorama
- sudo pip install prettytable
- sudo pip install prettytable
- sudo pip install jinja2

View File

@ -29,6 +29,8 @@ from workspace_tools.paths import MBED_TARGETS_PATH, MBED_LIBRARIES, MBED_API, M
from workspace_tools.targets import TARGET_NAMES, TARGET_MAP
from workspace_tools.libraries import Library
from workspace_tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
def build_project(src_path, build_path, target, toolchain_name,
@ -529,3 +531,20 @@ def print_build_results(result_list, build_name):
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def write_build_report(build_report, template_filename, filename):
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as f:
f.write(template.render(failing_builds=build_report_failing, passing_builds=build_report_passing))

View File

@ -24,6 +24,7 @@ ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.build_api import build_mbed_libs
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
OFFICIAL_MBED_LIBRARY_BUILD = (
@ -98,31 +99,67 @@ if __name__ == '__main__':
default=1, help="Number of concurrent jobs (default 1). Use 0 for auto based on host machine's number of CPUs")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Verbose diagnostic output")
parser.add_option("-t", "--toolchains", dest="toolchains", help="Use toolchains names separated by comma")
parser.add_option("", "--report-build", dest="report_build_file_name", help="Output the build results to an html file")
options, args = parser.parse_args()
start = time()
failures = []
successes = []
skips = []
build_report = []
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet and set((options.toolchains).split(','))
cur_target_build_report = { "target": target_name, "passing": [], "failing": [], "skipped": []}
for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)
try:
build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs)
successes.append(id)
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs)
if built_mbed_lib:
successes.append(id)
cur_target_build_report["passing"].append({ "toolchain": toolchain })
else:
skips.append(id)
cur_target_build_report["skipped"].append({ "toolchain": toolchain })
except Exception, e:
failures.append(id)
cur_target_build_report["failing"].append({ "toolchain": toolchain })
print e
if len(toolchains) > 0:
build_report.append(cur_target_build_report)
# Write summary of the builds
if options.report_build_file_name:
write_build_report(build_report, 'library_build/report.html', options.report_build_file_name)
print "\n\nCompleted in: (%.2f)s" % (time() - start)
if successes:
print "\n\nBuild successes:"
print "\n".join([" * %s" % s for s in successes])
if skips:
print "\n\nBuild skips:"
print "\n".join([" * %s" % s for s in skips])
if failures:
print "\n\nBuild failures:"
print "\n".join([" * %s" % f for f in failures])

View File

@ -0,0 +1,31 @@
<div class="toggleshow{% if report.failing|length == 0 %} toggleshow-hide{% endif %}">
<h3>
<a href="#" class="toggleshow-title">
<span class="toggleshow-arrow"></span>
{% if report.failing|length > 0 %}
<span class="redbold">[FAIL]</span>
{% else %}
<span class="greenbold">[PASS]</span>
{% endif %}
{{report.target}} - Passing: {{report.passing|length}}, Failing: {{report.failing|length}}, Skipped: {{report.skipped|length}}
</a>
</h3>
<div class="toggleshow-body">
<h4 class="redbold">Failing</h4>
{% with build = report.failing %}
{% include 'library_build/build_report_table.html' %}
{% endwith %}
<h4 class="greenbold">Passing</h4>
{% with build = report.passing %}
{% include 'library_build/build_report_table.html' %}
{% endwith %}
<h4>Skipped</h4>
{% with build = report.skipped %}
{% include 'library_build/build_report_table.html' %}
{% endwith %}
</div>
</div>

View File

@ -0,0 +1,10 @@
<table class="sortable pane bigtable stripped-odd">
<tr>
<th>Toolchain</th>
</tr>
{% for run in build %}
<tr>
<td>{{run.toolchain}}</td>
</tr>
{% endfor %}
</table>

View File

@ -0,0 +1,11 @@
<h2>{{failing_builds|length}} Failing Builds</h2>
{% for report in failing_builds %}
{% include 'library_build/build_report.html' %}
{% endfor %}
<h2>{{passing_builds|length}} Passing Builds</h2>
{% for report in passing_builds %}
{% include 'library_build/build_report.html' %}
{% endfor %}
{% include 'scripts.js' %}

View File

@ -0,0 +1,53 @@
<script>
var elements = document.querySelectorAll(".toggleshow"),
hideClass = 'toggleshow-hide';
for (var i = 0; i < elements.length; i++) {
var arrow = elements[i].querySelector(".toggleshow-arrow");
// Initial hide/show based on class
// Update arrow as well
if (containsClass(elements[i], 'toggleshow-hide')) {
toggleDisplay(elements[i]);
changeArrow(arrow, false);
} else {
changeArrow(arrow, true);
}
// Add click handler
addClick(elements[i], toggleDisplay);
}
function containsClass(element, className) {
var eleClassName = ' ' + elements[i].className + ' ';
return eleClassName.indexOf(' ' + className + ' ') > -1;
}
function toggleDisplay(parentElement) {
var body = parentElement.querySelector(".toggleshow-body"),
arrow = parentElement.querySelector(".toggleshow-arrow");
if (body.style.display == 'block' || body.style.display == '') {
body.style.display = 'none';
changeArrow(arrow, false);
} else {
body.style.display = 'block';
changeArrow(arrow, true);
}
}
function changeArrow(element, visible) {
if (visible) {
element.innerHTML = '&#9650';
} else {
element.innerHTML = '&#9660';
}
}
function addClick(parentElement, func) {
parentElement.querySelector(".toggleshow-title").addEventListener("click", function(e) {
func(parentElement);
e.preventDefault();
return false;
});
}
</script>

View File

@ -0,0 +1,31 @@
<div class="toggleshow{% if report.failing|length == 0 %} toggleshow-hide{% endif %}">
<h3>
<a href="#" class="toggleshow-title">
<span class="toggleshow-arrow"></span>
{% if report.failing|length > 0 %}
<span class="redbold">[FAIL]</span>
{% else %}
<span class="greenbold">[PASS]</span>
{% endif %}
{{report.target}} - Passing: {{report.passing|length}}, Failing: {{report.failing|length}}, Skipped: {{report.skipped|length}}
</a>
</h3>
<div class="toggleshow-body">
<h4 class="redbold">Failing</h4>
{% with build = report.failing %}
{% include 'tests_build/build_report_table.html' %}
{% endwith %}
<h4 class="greenbold">Passing</h4>
{% with build = report.passing %}
{% include 'tests_build/build_report_table.html' %}
{% endwith %}
<h4>Skipped</h4>
{% with build = report.skipped %}
{% include 'tests_build/build_report_table.html' %}
{% endwith %}
</div>
</div>

View File

@ -0,0 +1,12 @@
<table class="sortable pane bigtable stripped-odd">
<tr>
<th>Toolchain</th>
<th>Project</th>
</tr>
{% for run in build %}
<tr>
<td>{{run.toolchain}}</td>
<td>{{run.project}}</td>
</tr>
{% endfor %}
</table>

View File

@ -0,0 +1,11 @@
<h2>{{failing_builds|length}} Failing Builds</h2>
{% for report in failing_builds %}
{% include 'tests_build/build_report.html' %}
{% endfor %}
<h2>{{passing_builds|length}} Passing Builds</h2>
{% for report in passing_builds %}
{% include 'tests_build/build_report.html' %}
{% endfor %}
{% include 'scripts.js' %}

View File

@ -209,6 +209,7 @@ if __name__ == '__main__':
_opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name,
_opts_report_junit_file_name=opts.report_junit_file_name,
_opts_report_build_file_name=opts.report_build_file_name,
_test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests,

View File

@ -48,6 +48,7 @@ from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_db import BaseDBAccess
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.build_api import write_build_report
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
@ -153,6 +154,7 @@ class SingleTestRunner(object):
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
@ -205,6 +207,7 @@ class SingleTestRunner(object):
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
@ -294,8 +297,17 @@ class SingleTestRunner(object):
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids):
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
for toolchain in toolchains:
# Toolchain specific build successes and failures
build_report[toolchain] = {
"mbed_failure": False,
"library_failure": False,
"library_build_passing": [],
"library_build_failing": [],
"test_build_passing": [],
"test_build_failing": []
}
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {}
@ -306,6 +318,7 @@ class SingleTestRunner(object):
test_suite_properties['toolchain'] = toolchain
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
@ -328,9 +341,9 @@ class SingleTestRunner(object):
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
build_report[toolchain]["mbed_failure"] = True
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
@ -340,6 +353,7 @@ class SingleTestRunner(object):
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
@ -358,148 +372,130 @@ class SingleTestRunner(object):
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
for test_id in test_map_keys:
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = set()
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.add(lib['id'])
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
build_report[toolchain]["library_build_passing"].append(lib_id)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
build_report[toolchain]["library_failure"] = True
build_report[toolchain]["library_build_failing"].append(lib_id)
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
continue
if test_ids and test_id not in test_ids:
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs)
build_report[toolchain]["test_build_passing"].append(test_id)
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
build_report[toolchain]["test_build_failing"].append(test_id)
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
test_suite_properties['skipped'].append(test_id)
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
test_suite_properties['skipped'].append(test_id)
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
test_suite_properties['skipped'].append(test_id)
continue
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
if test.automated and test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
test_suite_properties['skipped'].append(test_id)
continue
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# read MUTs, test specification and perform tests
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
libraries = []
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.append(lib['id'])
# Build libs for test
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs)
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
# return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
@ -514,6 +510,8 @@ class SingleTestRunner(object):
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
build_reports = []
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
@ -526,7 +524,9 @@ class SingleTestRunner(object):
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids))
cur_build_report = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
build_reports.append({ "target": target, "report": cur_build_report})
t.daemon = True
t.start()
execute_threads.append(t)
@ -538,16 +538,118 @@ class SingleTestRunner(object):
for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids)
cur_build_report = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
build_reports.append({ "target": target, "report": cur_build_report})
q.get()
build_report = []
for target_build_report in build_reports:
cur_report = {
"target": target_build_report["target"],
"passing": [],
"failing": []
}
for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
print "%s - %s" % (target_build_report["target"], toolchain)
report = target_build_report["report"][toolchain]
if report["mbed_failure"]:
cur_report["failing"].append({
"toolchain": toolchain,
"project": "mbed library"
})
else:
for passing_library in report["library_build_failing"]:
cur_report["failing"].append({
"toolchain": toolchain,
"project": "Library::%s" % (passing_library)
})
for failing_library in report["library_build_passing"]:
cur_report["passing"].append({
"toolchain": toolchain,
"project": "Library::%s" % (failing_library)
})
for passing_test in report["test_build_passing"]:
cur_report["passing"].append({
"toolchain": toolchain,
"project": "Test::%s" % (passing_test)
})
for failing_test in report["test_build_failing"]:
cur_report["failing"].append({
"toolchain": toolchain,
"project": "Test::%s" % (failing_test)
})
build_report.append(cur_report)
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if test.automated and test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
@ -1314,7 +1416,7 @@ def singletest_in_cli_mode(single_test):
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = single_test.execute()
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
elapsed_time = time() - start
# Human readable summary
@ -1333,9 +1435,12 @@ def singletest_in_cli_mode(single_test):
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of HTML report to separate file
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
write_build_report(build_report, 'tests_build/report.html', single_test.opts_report_build_file_name)
class TestLogger():
@ -1706,6 +1811,10 @@ def get_default_test_options_parser():
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_option("", "--report-build",
dest="report_build_file_name",
help="Output the build results to an html file")
parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,