mirror of https://github.com/ARMmbed/mbed-os.git
Merge pull request #1094 from PrzemekWirkus/devel_add_multiple_same_targets
Tools: Added support for testing of multiple targets of the same type (board type)pull/1108/merge
commit
d2cab9778f
|
|
@ -106,6 +106,7 @@ class HostTestPluginBase:
|
|||
""" Runs command from command line.
|
||||
"""
|
||||
result = True
|
||||
ret = 0
|
||||
try:
|
||||
ret = call(cmd, shell=shell)
|
||||
if ret:
|
||||
|
|
|
|||
|
|
@ -51,10 +51,11 @@ class HostTestPluginCopyMethod_Shell(HostTestPluginBase):
|
|||
if capabilitity == 'shell':
|
||||
if os.name == 'nt': capabilitity = 'copy'
|
||||
elif os.name == 'posix': capabilitity = 'cp'
|
||||
if capabilitity == 'cp' or capabilitity == 'copy' or capabilitity == 'copy':
|
||||
if capabilitity == 'cp' or capabilitity == 'copy' or capabilitity == 'xcopy':
|
||||
copy_method = capabilitity
|
||||
cmd = [copy_method, image_path, destination_path]
|
||||
result = self.run_command(cmd)
|
||||
shell = not capabilitity == 'cp'
|
||||
result = self.run_command(cmd, shell=shell)
|
||||
return result
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ def get_version():
|
|||
""" Returns test script version
|
||||
"""
|
||||
single_test_version_major = 1
|
||||
single_test_version_minor = 4
|
||||
single_test_version_minor = 5
|
||||
return (single_test_version_major, single_test_version_minor)
|
||||
|
||||
|
||||
|
|
@ -144,9 +144,9 @@ if __name__ == '__main__':
|
|||
|
||||
if get_module_avail('mbed_lstools'):
|
||||
mbeds = mbed_lstools.create()
|
||||
muts_list = mbeds.list_mbeds()
|
||||
muts_list = mbeds.list_mbeds_ext() if hasattr(mbeds, 'list_mbeds_ext') else mbeds.list_mbeds()
|
||||
for mut in muts_list:
|
||||
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name'],
|
||||
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name_unique'] if 'platform_name_unique' in mut else mut['platform_name'],
|
||||
mut['serial_port'],
|
||||
mut['mount_point'])
|
||||
|
||||
|
|
|
|||
|
|
@ -518,24 +518,30 @@ class SingleTestRunner(object):
|
|||
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
|
||||
|
||||
# read MUTs, test specification and perform tests
|
||||
handle_result = self.handle(test_spec, target, toolchain, test_loops=test_loops)
|
||||
if handle_result:
|
||||
single_test_result, detailed_test_results = handle_result
|
||||
else:
|
||||
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
|
||||
|
||||
if handle_results is None:
|
||||
continue
|
||||
|
||||
# Append test results to global test summary
|
||||
if single_test_result is not None:
|
||||
self.test_summary.append(single_test_result)
|
||||
for handle_result in handle_results:
|
||||
if handle_result:
|
||||
single_test_result, detailed_test_results = handle_result
|
||||
else:
|
||||
continue
|
||||
|
||||
# Add detailed test result to test summary structure
|
||||
if target not in self.test_summary_ext[toolchain][target]:
|
||||
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
|
||||
# Append test results to global test summary
|
||||
if single_test_result is not None:
|
||||
self.test_summary.append(single_test_result)
|
||||
|
||||
# Add detailed test result to test summary structure
|
||||
if target not in self.test_summary_ext[toolchain][target]:
|
||||
if test_id not in self.test_summary_ext[toolchain][target]:
|
||||
self.test_summary_ext[toolchain][target][test_id] = []
|
||||
self.test_summary_ext[toolchain][target][test_id].append(detailed_test_results)
|
||||
|
||||
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
|
||||
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
|
||||
|
||||
# return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
|
||||
q.put(target + '_'.join(toolchains))
|
||||
return
|
||||
|
||||
|
|
@ -821,12 +827,9 @@ class SingleTestRunner(object):
|
|||
result = False
|
||||
return result, resutl_msg
|
||||
|
||||
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
|
||||
""" Function determines MUT's mbed disk/port and copies binary to
|
||||
target.
|
||||
Test is being invoked afterwards.
|
||||
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
|
||||
""" Test is being invoked for given MUT.
|
||||
"""
|
||||
data = json.loads(test_spec)
|
||||
# Get test information, image and test timeout
|
||||
test_id = data['test_id']
|
||||
test = TEST_MAP[test_id]
|
||||
|
|
@ -834,13 +837,6 @@ class SingleTestRunner(object):
|
|||
image = data["image"]
|
||||
duration = data.get("duration", 10)
|
||||
|
||||
# Find a suitable MUT:
|
||||
mut = None
|
||||
for id, m in self.muts.iteritems():
|
||||
if m['mcu'] == data['mcu']:
|
||||
mut = m
|
||||
break
|
||||
|
||||
if mut is None:
|
||||
print "Error: No Mbed available: MUT[%s]" % data['mcu']
|
||||
return None
|
||||
|
|
@ -852,6 +848,7 @@ class SingleTestRunner(object):
|
|||
return None
|
||||
|
||||
target_by_mcu = TARGET_MAP[mut['mcu']]
|
||||
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
|
||||
# Some extra stuff can be declared in MUTs structure
|
||||
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
|
||||
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
|
||||
|
|
@ -911,6 +908,7 @@ class SingleTestRunner(object):
|
|||
'single_test_result' : single_test_result,
|
||||
'single_test_output' : single_test_output,
|
||||
'target_name' : target_name,
|
||||
'target_name_unique' : target_name_unique,
|
||||
'toolchain_name' : toolchain_name,
|
||||
'test_id' : test_id,
|
||||
'test_description' : test_description,
|
||||
|
|
@ -919,7 +917,7 @@ class SingleTestRunner(object):
|
|||
'copy_method' : _copy_method,
|
||||
}
|
||||
|
||||
print self.print_test_result(single_test_result, target_name, toolchain_name,
|
||||
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
|
||||
test_id, test_description, elapsed_time, single_timeout)
|
||||
|
||||
# Update database entries for ongoing test
|
||||
|
|
@ -944,7 +942,7 @@ class SingleTestRunner(object):
|
|||
self.db_logger.disconnect()
|
||||
|
||||
return (self.shape_global_test_loop_result(test_all_result),
|
||||
target_name,
|
||||
target_name_unique,
|
||||
toolchain_name,
|
||||
test_id,
|
||||
test_description,
|
||||
|
|
@ -952,6 +950,23 @@ class SingleTestRunner(object):
|
|||
single_timeout,
|
||||
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
|
||||
|
||||
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
|
||||
""" Function determines MUT's mbed disk/port and copies binary to
|
||||
target.
|
||||
"""
|
||||
handle_results = []
|
||||
data = json.loads(test_spec)
|
||||
|
||||
# Find a suitable MUT:
|
||||
mut = None
|
||||
for id, m in self.muts.iteritems():
|
||||
if m['mcu'] == data['mcu']:
|
||||
mut = m
|
||||
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
|
||||
handle_results.append(handle_result)
|
||||
|
||||
return handle_results
|
||||
|
||||
def print_test_result(self, test_result, target_name, toolchain_name,
|
||||
test_id, test_description, elapsed_time, duration):
|
||||
""" Use specific convention to print test result and related data
|
||||
|
|
@ -1497,7 +1512,6 @@ def singletest_in_cli_mode(single_test):
|
|||
# Export build results as html report to sparate file
|
||||
write_build_report(build_report, 'tests_build/report.html', single_test.opts_report_build_file_name)
|
||||
|
||||
|
||||
class TestLogger():
|
||||
""" Super-class for logging and printing ongoing events for test suite pass
|
||||
"""
|
||||
|
|
@ -1627,10 +1641,13 @@ def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
|||
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
|
||||
index = 1
|
||||
for mut in mbeds_list:
|
||||
m = {'mcu' : mut['platform_name'],
|
||||
'port' : mut['serial_port'],
|
||||
'disk' : mut['mount_point'],
|
||||
'peripherals' : [] # No peripheral detection
|
||||
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
|
||||
# if not we are creating our own unique value (last few chars from platform's target_id).
|
||||
m = {'mcu': mut['platform_name'],
|
||||
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
|
||||
'port': mut['serial_port'],
|
||||
'disk': mut['mount_point'],
|
||||
'peripherals': [] # No peripheral detection
|
||||
}
|
||||
if index not in result:
|
||||
result[index] = {}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,8 @@ from workspace_tools.utils import construct_enum
|
|||
|
||||
|
||||
ResultExporterType = construct_enum(HTML='Html_Exporter',
|
||||
JUNIT='JUnit_Exporter')
|
||||
JUNIT='JUnit_Exporter',
|
||||
BUILD='Build_Exporter')
|
||||
|
||||
|
||||
class ReportExporter():
|
||||
|
|
@ -91,23 +92,27 @@ class ReportExporter():
|
|||
""" Generate simple unique tool-tip name which can be used.
|
||||
For example as HTML <div> section id attribute.
|
||||
"""
|
||||
return "target_test_%s_%s_%s_%d"% (toolchain.lower(), target.lower(), test_id.lower(), loop_no)
|
||||
return "target_test_%s_%s_%s_%s"% (toolchain.lower(), target.lower(), test_id.lower(), loop_no)
|
||||
|
||||
def get_result_div_sections(self, test, test_no):
|
||||
""" Generates separate <dvi> sections which contains test results output.
|
||||
""" Generates separate <DIV> sections which contains test results output.
|
||||
"""
|
||||
|
||||
RESULT_COLORS = {'OK' : 'LimeGreen',
|
||||
'FAIL' : 'Orange',
|
||||
'ERROR' : 'LightCoral',}
|
||||
RESULT_COLORS = {'OK': 'LimeGreen',
|
||||
'FAIL': 'Orange',
|
||||
'ERROR': 'LightCoral',
|
||||
'OTHER': 'LightGray',
|
||||
}
|
||||
|
||||
tooltip_name = self.get_tooltip_name(test['toolchain_name'], test['target_name'], test['test_id'], test_no)
|
||||
background_color = RESULT_COLORS[test['single_test_result'] if test['single_test_result'] in RESULT_COLORS else 'ERROR']
|
||||
background_color = RESULT_COLORS[test['single_test_result'] if test['single_test_result'] in RESULT_COLORS else 'OTHER']
|
||||
result_div_style = "background-color: %s"% background_color
|
||||
|
||||
result = """<div class="name" style="%s" onmouseover="show(%s)" onmouseout="hide(%s)">
|
||||
<center>%s</center>
|
||||
<div class = "tooltip" id= "%s">
|
||||
<b>%s</b><br />
|
||||
<hr />
|
||||
<b>%s</b> in <b>%.2f sec</b><br />
|
||||
<hr />
|
||||
<small>
|
||||
|
|
@ -120,6 +125,7 @@ class ReportExporter():
|
|||
tooltip_name,
|
||||
test['single_test_result'],
|
||||
tooltip_name,
|
||||
test['target_name_unique'],
|
||||
test['test_description'],
|
||||
test['elapsed_time'],
|
||||
test['single_test_output'].replace('\n', '<br />'))
|
||||
|
|
@ -130,14 +136,16 @@ class ReportExporter():
|
|||
we will show it in a column to see all results.
|
||||
This function produces HTML table with corresponding results.
|
||||
"""
|
||||
result = '<table>'
|
||||
test_ids = sorted(test_results.keys())
|
||||
for test_no in test_ids:
|
||||
test = test_results[test_no]
|
||||
result += """<tr>
|
||||
<td valign="top">%s</td>
|
||||
</tr>"""% self.get_result_div_sections(test, test_no)
|
||||
result += '</table>'
|
||||
result = ''
|
||||
for i, test_result in enumerate(test_results):
|
||||
result += '<table>'
|
||||
test_ids = sorted(test_result.keys())
|
||||
for test_no in test_ids:
|
||||
test = test_result[test_no]
|
||||
result += """<tr>
|
||||
<td valign="top">%s</td>
|
||||
</tr>"""% self.get_result_div_sections(test, "%d_%d" % (test_no, i))
|
||||
result += '</table>'
|
||||
return result
|
||||
|
||||
def get_all_unique_test_ids(self, test_result_ext):
|
||||
|
|
@ -158,7 +166,7 @@ class ReportExporter():
|
|||
#
|
||||
|
||||
def exporter_html(self, test_result_ext, test_suite_properties=None):
|
||||
""" Export test results in proprietary html format.
|
||||
""" Export test results in proprietary HTML format.
|
||||
"""
|
||||
result = """<html>
|
||||
<head>
|
||||
|
|
@ -211,25 +219,26 @@ class ReportExporter():
|
|||
tests = sorted(test_result_ext[toolchain][target].keys())
|
||||
for test in tests:
|
||||
test_results = test_result_ext[toolchain][target][test]
|
||||
test_ids = sorted(test_results.keys())
|
||||
for test_no in test_ids:
|
||||
test_result = test_results[test_no]
|
||||
name = test_result['test_description']
|
||||
classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id'])
|
||||
elapsed_sec = test_result['elapsed_time']
|
||||
_stdout = test_result['single_test_output']
|
||||
_stderr = ''
|
||||
# Test case
|
||||
tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
|
||||
# Test case extra failure / error info
|
||||
if test_result['single_test_result'] == 'FAIL':
|
||||
message = test_result['single_test_result']
|
||||
tc.add_failure_info(message, _stdout)
|
||||
elif test_result['single_test_result'] != 'OK':
|
||||
message = test_result['single_test_result']
|
||||
tc.add_error_info(message, _stdout)
|
||||
for test_res in test_results:
|
||||
test_ids = sorted(test_res.keys())
|
||||
for test_no in test_ids:
|
||||
test_result = test_res[test_no]
|
||||
name = test_result['test_description']
|
||||
classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id'])
|
||||
elapsed_sec = test_result['elapsed_time']
|
||||
_stdout = test_result['single_test_output']
|
||||
_stderr = test_result['target_name_unique']
|
||||
# Test case
|
||||
tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
|
||||
# Test case extra failure / error info
|
||||
if test_result['single_test_result'] == 'FAIL':
|
||||
message = test_result['single_test_result']
|
||||
tc.add_failure_info(message, _stdout)
|
||||
elif test_result['single_test_result'] != 'OK':
|
||||
message = test_result['single_test_result']
|
||||
tc.add_error_info(message, _stdout)
|
||||
|
||||
test_cases.append(tc)
|
||||
test_cases.append(tc)
|
||||
ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
|
||||
test_suites.append(ts)
|
||||
return TestSuite.to_xml_string(test_suites)
|
||||
|
|
|
|||
Loading…
Reference in New Issue