Merged build_release report format into test junit format

pull/1423/head
Brian Daniels 2015-10-29 15:48:25 -05:00
parent 228a61d49b
commit 6ee94dee1e
3 changed files with 105 additions and 67 deletions

View File

@ -27,6 +27,7 @@ sys.path.insert(0, ROOT)
from workspace_tools.build_api import build_mbed_libs
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC11U24', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
@ -145,11 +146,13 @@ if __name__ == '__main__':
options, args = parser.parse_args()
id_name = "MBED"
start = time()
failures = []
successes = []
skips = []
build_report = []
report = {}
properties = {}
platforms = None
if options.platforms != "":
@ -170,34 +173,63 @@ if __name__ == '__main__':
toolchainSet = set(toolchains)
toolchains = toolchainSet and set((options.toolchains).split(','))
cur_target_build_report = { "target": target_name, "passing": [], "failing": [], "skipped": []}
for toolchain in toolchains:
if not target_name in report:
report[target_name] = {}
if not toolchain in report[target_name]:
report[target_name][toolchain] = {}
if not id_name in report[target_name][toolchain]:
report[target_name][toolchain][id_name] = []
if not target_name in properties:
properties[target_name] = {}
if not toolchain in properties[target_name]:
properties[target_name][toolchain] = {}
properties[target_name][toolchain]["target"] = target_name
properties[target_name][toolchain]["toolchain"] = toolchain
id = "%s::%s" % (target_name, toolchain)
start = time()
cur_result = {}
cur_result["toolchain_name"] = toolchain
cur_result["target_name"] = target_name
cur_result["id"] = id_name
cur_result["description"] = "mbed SDK"
try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs)
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = ""
if built_mbed_lib:
successes.append(id)
cur_target_build_report["passing"].append({ "toolchain": toolchain })
cur_result["result"] = "OK"
else:
skips.append(id)
cur_target_build_report["skipped"].append({ "toolchain": toolchain })
cur_result["result"] = "SKIP"
except Exception, e:
failures.append(id)
cur_target_build_report["failing"].append({ "toolchain": toolchain })
print e
exc_type, exc_value, exc_tb = sys.exc_info()
end = time()
cur_result["result"] = "FAIL"
cur_result["output"] = str(e)
cur_result["elapsed_time"] = end - start
print str(e)
if len(toolchains) > 0:
build_report.append(cur_target_build_report)
cur_result_wrap = { 0: cur_result }
report[target_name][toolchain][id_name].append(cur_result_wrap)
# Write summary of the builds
if options.report_build_file_name:
write_build_report(build_report, 'library_build/report.html', options.report_build_file_name)
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(report, options.report_build_file_name, test_suite_properties=properties)
print "\n\nCompleted in: (%.2f)s" % (time() - start)

View File

@ -456,10 +456,10 @@ class SingleTestRunner(object):
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
@ -491,17 +491,17 @@ class SingleTestRunner(object):
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = []
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[toolchain][target][test_id].append({ 0: {
'single_test_result' : self.TEST_RESULT_BUILD_FAILED,
'single_test_output' : '',
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : self.TEST_RESULT_BUILD_FAILED,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'test_id' : test_id,
'test_description' : 'Toolchain build failed',
'id' : test_id,
'description' : 'Toolchain build failed',
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
@ -543,9 +543,9 @@ class SingleTestRunner(object):
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[toolchain][target]:
if test_id not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = []
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
@ -554,7 +554,7 @@ class SingleTestRunner(object):
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[toolchain][target][test_id].append(append_test_result)
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
@ -945,13 +945,13 @@ class SingleTestRunner(object):
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'single_test_result' : single_test_result,
'single_test_output' : single_test_output,
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'test_id' : test_id,
'test_description' : test_description,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,

View File

@ -35,11 +35,11 @@ class ReportExporter():
u'uARM': { u'LPC1768': { 'MBED_2': { 0: { 'copy_method': 'shutils.copy()',
'duration': 20,
'elapsed_time': 1.7929999828338623,
'single_test_output': 'Host test instrumentation on ...\r\n',
'single_test_result': 'OK',
'output': 'Host test instrumentation on ...\r\n',
'result': 'OK',
'target_name': u'LPC1768',
'test_description': 'stdio',
'test_id': u'MBED_2',
'description': 'stdio',
'id': u'MBED_2',
'toolchain_name': u'uARM'}},
"""
CSS_STYLE = """<style>
@ -111,8 +111,8 @@ class ReportExporter():
'OTHER': 'LightGray',
}
tooltip_name = self.get_tooltip_name(test['toolchain_name'], test['target_name'], test['test_id'], test_no)
background_color = RESULT_COLORS[test['single_test_result'] if test['single_test_result'] in RESULT_COLORS else 'OTHER']
tooltip_name = self.get_tooltip_name(test['toolchain_name'], test['target_name'], test['id'], test_no)
background_color = RESULT_COLORS[test['result'] if test['result'] in RESULT_COLORS else 'OTHER']
result_div_style = "background-color: %s"% background_color
result = """<div class="name" style="%s" onmouseover="show(%s)" onmouseout="hide(%s)">
@ -130,12 +130,12 @@ class ReportExporter():
"""% (result_div_style,
tooltip_name,
tooltip_name,
test['single_test_result'],
test['result'],
tooltip_name,
test['target_name_unique'],
test['test_description'],
test['description'],
test['elapsed_time'],
test['single_test_output'].replace('\n', '<br />'))
test['output'].replace('\n', '<br />'))
return result
def get_result_tree(self, test_results):
@ -160,11 +160,11 @@ class ReportExporter():
We need this to create complete list of all test ran.
"""
result = []
toolchains = test_result_ext.keys()
for toolchain in toolchains:
targets = test_result_ext[toolchain].keys()
for target in targets:
tests = test_result_ext[toolchain][target].keys()
targets = test_result_ext.keys()
for target in targets:
toolchains = test_result_ext[target].keys()
for toolchain in toolchains:
tests = test_result_ext[target][toolchain].keys()
result.extend(tests)
return sorted(list(set(result)))
@ -185,15 +185,15 @@ class ReportExporter():
"""% (self.CSS_STYLE, self.JAVASCRIPT)
unique_test_ids = self.get_all_unique_test_ids(test_result_ext)
toolchains = sorted(test_result_ext.keys())
targets = sorted(test_result_ext.keys())
result += '<table><tr>'
for toolchain in toolchains:
targets = sorted(test_result_ext[toolchain].keys())
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for target in targets:
result += '<td></td>'
result += '<td></td>'
tests = sorted(test_result_ext[toolchain][target].keys())
tests = sorted(test_result_ext[target][toolchain].keys())
for test in unique_test_ids:
result += """<td align="center">%s</td>"""% test
result += """</tr>
@ -203,7 +203,7 @@ class ReportExporter():
"""% (toolchain, target)
for test in unique_test_ids:
test_result = self.get_result_tree(test_result_ext[toolchain][target][test]) if test in tests else ''
test_result = self.get_result_tree(test_result_ext[target][toolchain][test]) if test in tests else ''
result += '<td>%s</td>'% (test_result)
result += '</tr>'
@ -246,34 +246,40 @@ class ReportExporter():
test_suites = []
test_cases = []
toolchains = sorted(test_result_ext.keys())
for toolchain in toolchains:
targets = sorted(test_result_ext[toolchain].keys())
for target in targets:
targets = sorted(test_result_ext.keys())
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for toolchain in toolchains:
test_cases = []
tests = sorted(test_result_ext[toolchain][target].keys())
tests = sorted(test_result_ext[target][toolchain].keys())
for test in tests:
test_results = test_result_ext[toolchain][target][test]
test_results = test_result_ext[target][toolchain][test]
for test_res in test_results:
test_ids = sorted(test_res.keys())
for test_no in test_ids:
test_result = test_res[test_no]
name = test_result['test_description']
classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id'])
name = test_result['description']
classname = 'test.%s.%s.%s'% (target, toolchain, test_result['id'])
elapsed_sec = test_result['elapsed_time']
_stdout = test_result['single_test_output']
_stderr = test_result['target_name_unique']
_stdout = test_result['output']
if 'target_name_unique' in test_result:
_stderr = test_result['target_name_unique']
else:
_stderr = test_result['target_name']
# Test case
tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
# Test case extra failure / error info
if test_result['single_test_result'] == 'FAIL':
message = test_result['single_test_result']
if test_result['result'] == 'FAIL':
message = test_result['result']
tc.add_failure_info(message, _stdout)
elif test_result['single_test_result'] != 'OK':
message = test_result['single_test_result']
elif test_result['result'] != 'OK':
message = test_result['result']
tc.add_error_info(message, _stdout)
test_cases.append(tc)
ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
test_suites.append(ts)
return TestSuite.to_xml_string(test_suites)