Remove autoformatting

pull/7745/head
Olli-Pekka Puolitaival 2018-08-15 08:56:26 +03:00
parent 3b3bed2406
commit bf1781b005
1 changed files with 173 additions and 194 deletions

View File

@ -31,13 +31,11 @@ import datetime
import threading import threading
import ctypes import ctypes
import functools import functools
import subprocess
from colorama import Fore, Back, Style from colorama import Fore, Back, Style
from prettytable import PrettyTable from prettytable import PrettyTable
from copy import copy, deepcopy from copy import copy, deepcopy
from time import sleep, time from time import sleep, time
try: try:
from Queue import Queue, Empty from Queue import Queue, Empty
except ImportError: except ImportError:
@ -116,7 +114,6 @@ class ProcessObserver(Thread):
class SingleTestExecutor(threading.Thread): class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage """ Example: Single test class in separate thread usage
""" """
def __init__(self, single_test): def __init__(self, single_test):
self.single_test = single_test self.single_test = single_test
threading.Thread.__init__(self) threading.Thread.__init__(self)
@ -137,7 +134,7 @@ class SingleTestExecutor(threading.Thread):
# table shows text x toolchain test result matrix # table shows text x toolchain test result matrix
print(self.single_test.generate_test_summary_by_target( print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed)) test_summary, shuffle_seed))
print("Completed in %.2f sec" % (elapsed_time)) print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object): class SingleTestRunner(object):
@ -160,26 +157,26 @@ class SingleTestRunner(object):
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED" TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count} TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file) muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file) test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner # mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success": TEST_RESULT_OK, TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure": TEST_RESULT_FAIL, "failure" : TEST_RESULT_FAIL,
"error": TEST_RESULT_ERROR, "error" : TEST_RESULT_ERROR,
"ioerr_copy": TEST_RESULT_IOERR_COPY, "ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk": TEST_RESULT_IOERR_DISK, "ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial": TEST_RESULT_IOERR_SERIAL, "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout": TEST_RESULT_TIMEOUT, "timeout" : TEST_RESULT_TIMEOUT,
"no_image": TEST_RESULT_NO_IMAGE, "no_image" : TEST_RESULT_NO_IMAGE,
"end": TEST_RESULT_UNDEF, "end" : TEST_RESULT_UNDEF,
"mbed_assert": TEST_RESULT_MBED_ASSERT, "mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed": TEST_RESULT_BUILD_FAILED, "build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted": TEST_RESULT_NOT_SUPPORTED "not_supproted" : TEST_RESULT_NOT_SUPPORTED
} }
def __init__(self, def __init__(self,
_global_loops_count=1, _global_loops_count=1,
@ -289,18 +286,17 @@ class SingleTestRunner(object):
# Database related initializations # Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url) self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table) self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready # Let's connect to database to set up credentials and confirm database is ready
if self.db_logger: if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected(): if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description # Get hostname and uname so we can use it as build description
# when creating new build_id in external database # when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname() (_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__)) _host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
location=_host_location, type=build_id_type)
self.db_logger.disconnect() self.db_logger.disconnect()
def dump_options(self): def dump_options(self):
@ -311,23 +307,23 @@ class SingleTestRunner(object):
or or
data_str = json.dumps(self.dump_options()) data_str = json.dumps(self.dump_options())
""" """
result = {"db_url": str(self.opts_db_url), result = {"db_url" : str(self.opts_db_url),
"log_file_name": str(self.opts_log_file_name), "log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order": str(self.opts_shuffle_test_order), "shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed": str(self.opts_shuffle_test_seed), "shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names": str(self.opts_test_by_names), "test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names": str(self.opts_peripheral_by_names), "peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral": str(self.opts_test_only_peripheral), "test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common": str(self.opts_test_only_common), "test_only_common" : str(self.opts_test_only_common),
"verbose": str(self.opts_verbose), "verbose" : str(self.opts_verbose),
"firmware_global_name": str(self.opts_firmware_global_name), "firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests": str(self.opts_only_build_tests), "only_build_tests" : str(self.opts_only_build_tests),
"copy_method": str(self.opts_copy_method), "copy_method" : str(self.opts_copy_method),
"mut_reset_type": str(self.opts_mut_reset_type), "mut_reset_type" : str(self.opts_mut_reset_type),
"jobs": str(self.opts_jobs), "jobs" : str(self.opts_jobs),
"extend_test_timeout": str(self.opts_extend_test_timeout), "extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy": '' "_dummy" : ''
} }
return result return result
def shuffle_random_func(self): def shuffle_random_func(self):
@ -369,6 +365,7 @@ class SingleTestRunner(object):
'shuffle_random_seed': self.shuffle_random_seed 'shuffle_random_seed': self.shuffle_random_seed
} }
# print '=== %s::%s ===' % (target, toolchain) # print '=== %s::%s ===' % (target, toolchain)
# Let's build our test # Let's build our test
if target not in TARGET_MAP: if target not in TARGET_MAP:
@ -398,7 +395,7 @@ class SingleTestRunner(object):
print(self.logger.log_line( print(self.logger.log_line(
self.logger.LogType.NOTIF, self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not ' 'Skipped tests for %s target. Toolchain %s is not '
'supported for this target' % (T.name, toolchain))) 'supported for this target'% (T.name, toolchain)))
continue continue
except ToolException: except ToolException:
@ -441,13 +438,13 @@ class SingleTestRunner(object):
_extra=json.dumps(self.dump_options())) _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect(); self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys) skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys: for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id) test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built # First pass through all tests and determine which libraries need to be built
libraries = [] libraries = []
for test_id in valid_test_map_keys: for test_id in valid_test_map_keys:
@ -459,6 +456,7 @@ class SingleTestRunner(object):
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries: if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id']) libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries # Build all required libraries
@ -480,10 +478,11 @@ class SingleTestRunner(object):
'There were errors while building library %s' % lib_id)) 'There were errors while building library %s' % lib_id))
continue continue
for test_id in valid_test_map_keys: for test_id in valid_test_map_keys:
test = TEST_MAP[test_id] test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s' % (target, toolchain, test_id)] = ', '.join(libraries) test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function # TODO: move this 2 below loops to separate function
INC_DIRS = [] INC_DIRS = []
@ -495,18 +494,18 @@ class SingleTestRunner(object):
for lib_id in libraries: for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']: if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros']) MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"' % target) MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"' % test_id) MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4() test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"' % str(test_uuid)) MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report) # Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext: if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]: if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try: try:
@ -524,6 +523,7 @@ class SingleTestRunner(object):
except Exception as e: except Exception as e:
project_name_str = project_name if project_name is not None else test_id project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException): if isinstance(e, ToolException):
@ -538,6 +538,7 @@ class SingleTestRunner(object):
'Project %s is not supported' % project_name_str)) 'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary # Append test results to global test summary
self.test_summary.append( self.test_summary.append(
(test_result, target, toolchain, test_id, (test_result, target, toolchain, test_id,
@ -548,17 +549,17 @@ class SingleTestRunner(object):
if test_id not in self.test_summary_ext[target][toolchain]: if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = [] self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({0: { self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result': test_result, 'result' : test_result,
'output': '', 'output' : '',
'target_name': target, 'target_name' : target,
'target_name_unique': target, 'target_name_unique': target,
'toolchain_name': toolchain, 'toolchain_name' : toolchain,
'id': test_id, 'id' : test_id,
'description': test.get_description(), 'description' : test.get_description(),
'elapsed_time': 0, 'elapsed_time' : 0,
'duration': 0, 'duration' : 0,
'copy_method': None 'copy_method' : None
}}) }})
continue continue
@ -576,9 +577,9 @@ class SingleTestRunner(object):
test_spec = self.shape_test_request(target, path, test_id, test_duration) test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id) test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s' % (target, toolchain, test_id)] = test_duration test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s' % (target, toolchain, test_id)] = test_loops test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s' % (target, toolchain, test_id)] = path test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests # read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops) handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
@ -626,11 +627,12 @@ class SingleTestRunner(object):
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float(): if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND) self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec: if self.opts_parallel_test_exec:
################################################################### ###################################################################
# Experimental, parallel test execution per singletest instance. # Experimental, parallel test execution per singletest instance.
################################################################### ###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately! # Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers # So we are not building the same thing multiple times and compilers
# in separate threads do not collide. # in separate threads do not collide.
@ -638,29 +640,26 @@ class SingleTestRunner(object):
# get information about available MUTs (per target). # get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].items(): for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {} self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args=( t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True t.daemon = True
t.start() t.start()
execute_threads.append(t) execute_threads.append(t)
for t in execute_threads: for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end q.get() # t.join() would block some threads because we should not wait in any order for thread end
else: else:
# Serialized (not parallel) test execution # Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].items(): for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext: if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {} self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
self.build_properties)
q.get() q.get()
if self.db_logger: if self.db_logger:
self.db_logger.reconnect(); self.db_logger.reconnect();
if self.db_logger.is_connected(): if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
_status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect(); self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
@ -684,8 +683,8 @@ class SingleTestRunner(object):
continue continue
if (self.opts_peripheral_by_names and test.peripherals and if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names) not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)): for i in test.peripherals)):
# We will skip tests not forced with -p option # We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests: if self.opts_verbose_skipped_tests:
print(self.logger.log_line( print(self.logger.log_line(
@ -756,7 +755,7 @@ class SingleTestRunner(object):
result = "Test summary:\n" result = "Test summary:\n"
for target in unique_targets: for target in unique_targets:
result_dict = {} # test : { toolchain : result } result_dict = {} # test : { toolchain : result }
unique_target_toolchains = [] unique_target_toolchains = []
for test in test_summary: for test in test_summary:
if test[TARGET_INDEX] == target: if test[TARGET_INDEX] == target:
@ -770,7 +769,7 @@ class SingleTestRunner(object):
pt = PrettyTable(pt_cols) pt = PrettyTable(pt_cols)
for col in pt_cols: for col in pt_cols:
pt.align[col] = "l" pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default) pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests: for test in unique_tests:
if test in result_dict: if test in result_dict:
@ -782,40 +781,40 @@ class SingleTestRunner(object):
row.append(test_results[toolchain]) row.append(test_results[toolchain])
pt.add_row(row) pt.add_row(row)
result += pt.get_string() result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f" % (self.SHUFFLE_SEED_ROUND, shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed) shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s" % (shuffle_seed_text if self.opts_shuffle_test_order else '') result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result return result
def generate_test_summary(self, test_summary, shuffle_seed=None): def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like) """ Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across table shows target x test results matrix across
""" """
success_code = 0 # Success code that can be leter returned to success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n" result = "Test summary:\n"
# Pretty table package is used to print results # Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"]) "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default) pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK: 0, result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL: 0, self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR: 0, self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF: 0, self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY: 0, self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK: 0, self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL: 0, self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE: 0, self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT: 0, self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT: 0, self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED: 0, self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED: 0 self.TEST_RESULT_NOT_SUPPORTED : 0
} }
for test in test_summary: for test in test_summary:
if test[0] in result_dict: if test[0] in result_dict:
@ -825,11 +824,10 @@ class SingleTestRunner(object):
result += "\n" result += "\n"
# Print result count # Print result count
result += "Result: " + ' / '.join( result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()]) shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed_text = "Shuffle Seed: %.*f\n" % (self.SHUFFLE_SEED_ROUND, shuffle_seed if shuffle_seed else self.shuffle_random_seed)
shuffle_seed if shuffle_seed else self.shuffle_random_seed) result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
result += "\n%s" % (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result return result
def test_loop_list_to_dict(self, test_loops_str): def test_loop_list_to_dict(self, test_loops_str):
@ -885,7 +883,7 @@ class SingleTestRunner(object):
return None return None
mcu = mut['mcu'] mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc. copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger: if self.db_logger:
self.db_logger.reconnect() self.db_logger.reconnect()
@ -941,14 +939,13 @@ class SingleTestRunner(object):
# Host test execution # Host test execution
start_host_exec_time = time() start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method _copy_method = selected_copy_method
if not exists(image_path): if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0 elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
'Image file does not exist: %s' % image_path)
print(single_test_output) print(single_test_output)
else: else:
# Host test execution # Host test execution
@ -968,20 +965,20 @@ class SingleTestRunner(object):
# Store test result # Store test result
test_all_result.append(single_test_result) test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = { detailed_test_results[test_index] = {
'result': single_test_result, 'result' : single_test_result,
'output': single_test_output, 'output' : single_test_output,
'target_name': target_name, 'target_name' : target_name,
'target_name_unique': target_name_unique, 'target_name_unique' : target_name_unique,
'toolchain_name': toolchain_name, 'toolchain_name' : toolchain_name,
'id': test_id, 'id' : test_id,
'description': test_description, 'description' : test_description,
'elapsed_time': round(elapsed_time, 2), 'elapsed_time' : round(elapsed_time, 2),
'duration': single_timeout, 'duration' : single_timeout,
'copy_method': _copy_method, 'copy_method' : _copy_method,
} }
print(self.print_test_result( print(self.print_test_result(
@ -1009,8 +1006,7 @@ class SingleTestRunner(object):
if self.db_logger: if self.db_logger:
self.db_logger.disconnect() self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique, target_name_unique,
toolchain_name, toolchain_name,
test_id, test_id,
@ -1048,7 +1044,7 @@ class SingleTestRunner(object):
tokens.append(test_description) tokens.append(test_description)
separator = "::" separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration) time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result + "]" + time_info result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result): def shape_test_loop_ok_result_count(self, test_all_result):
@ -1056,7 +1052,7 @@ class SingleTestRunner(object):
""" """
test_loop_count = len(test_all_result) test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK) test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d" % (test_loop_ok_result, test_loop_count) return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate): def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string """ Reformats list of results to simple string
@ -1110,16 +1106,16 @@ class SingleTestRunner(object):
Returns string Returns string
""" """
result = None result = None
if re.search("HOST: Property '%s'" % property_name, line) is not None: if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'" % property_name, line) property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1: if property is not None and len(property.groups()) == 1:
result = property.groups()[0] result = property.groups()[0]
return result return result
cmd = ["python", cmd = ["python",
'%s.py' % name, '%s.py'% name,
'-d', disk, '-d', disk,
'-f', '"%s"' % image_path, '-f', '"%s"'% image_path,
'-p', port, '-p', port,
'-t', str(duration), '-t', str(duration),
'-C', str(program_cycle_s)] '-C', str(program_cycle_s)]
@ -1143,7 +1139,7 @@ class SingleTestRunner(object):
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS) proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc) obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = '' line = ''
output = [] output = []
start_time = time() start_time = time()
@ -1182,7 +1178,7 @@ class SingleTestRunner(object):
else: else:
line += c line += c
end_time = time() end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end} testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs) c = get_char_from_queue(obs)
@ -1259,11 +1255,11 @@ def show_json_file_format_error(json_spec_filename, line, column):
with open(json_spec_filename) as data_file: with open(json_spec_filename) as data_file:
line_no = 1 line_no = 1
for json_line in data_file: for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error if line_no + 5 >= line: # Print last few lines before error
print('Line %d:\t' % line_no + json_line) print('Line %d:\t'%line_no + json_line)
if line_no == line: if line_no == line:
print('%s\t%s^'(' ' * len('Line %d:' % line_no), print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1))) '-' * (column - 1)))
break break
line_no += 1 line_no += 1
@ -1311,7 +1307,7 @@ def get_json_data_from_file(json_spec_filename, verbose=False):
show_json_file_format_error(json_spec_filename, line, column) show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg: except IOError as fileopen_error_msg:
print('JSON file %s not opened. Reason: %s\n' % print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg)) (json_spec_filename, fileopen_error_msg))
if verbose and result: if verbose and result:
pp = pprint.PrettyPrinter(indent=4) pp = pprint.PrettyPrinter(indent=4)
@ -1384,7 +1380,7 @@ def print_test_configuration_from_json(json_data, join_delim=", "):
target_supported_toolchains = get_target_supported_toolchains(target) target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains: if not target_supported_toolchains:
target_supported_toolchains = [] target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*" % target target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name] row = [target_name]
toolchains = targets[target] toolchains = targets[target]
@ -1415,27 +1411,27 @@ def print_test_configuration_from_json(json_data, join_delim=", "):
pt.add_row(row) pt.add_row(row)
# generate result string # generate result string
result = pt.get_string() # Test specification table result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts: if toolchain_conflicts or toolchain_path_conflicts:
result += "\n" result += "\n"
result += "Toolchain conflicts:\n" result += "Toolchain conflicts:\n"
for target in toolchain_conflicts: for target in toolchain_conflicts:
if target not in TARGET_MAP: if target not in TARGET_MAP:
result += "\t* Target %s unknown\n" % (target) result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target]) conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else '' sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n" % (target, conflict_target_list, sufix) result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts: for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration # Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS: if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain] toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path): if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n" % (toolchain, toolchain_path) result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',', platform_filter=None): def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases """ Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to information using pretty print functionality. Allows test suite user to
see test cases see test cases
@ -1466,7 +1462,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
counter_all = 0 counter_all = 0
counter_automated = 0 counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default) pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()): for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None: if platform_filter is not None:
@ -1520,8 +1516,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
pt.align['percent [%]'] = "r" pt.align['percent [%]'] = "r"
for unique_id in unique_test_id: for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id]) # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round( percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75) str_progress = progress_bar(percent_progress, 75)
row = [unique_id, row = [unique_id,
counter_dict_test_id_types[unique_id], counter_dict_test_id_types[unique_id],
@ -1538,7 +1533,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
def progress_bar(percent_progress, saturation=0): def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark """ This function creates progress bar with optional simple saturation mark
""" """
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50) step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step) str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|' c = '!' if str_progress[38] == '.' else '|'
if saturation > 0: if saturation > 0:
@ -1578,32 +1573,26 @@ def singletest_in_cli_mode(single_test):
if single_test.opts_report_html_file_name: if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file # Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML) report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name: if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file # Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT) report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name: if single_test.opts_report_text_file_name:
# Export results in form of a text file # Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT) report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name: if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file # Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build") report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies # Returns True if no build failures of the test projects or their dependencies
return status return status
class TestLogger(): class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass """ Super-class for logging and printing ongoing events for test suite pass
""" """
def __init__(self, store_log=True): def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory """ We can control if logger actually stores log in memory
or just handled all log entries immediately or just handled all log entries immediately
@ -1619,18 +1608,18 @@ class TestLogger():
ERROR='Error', ERROR='Error',
EXCEPT='Exception') EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'): def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text """ Log one line of text
""" """
log_timestamp = time() log_timestamp = time()
log_entry = {'log_type': LogType, log_entry = {'log_type' : LogType,
'log_timestamp': log_timestamp, 'log_timestamp' : log_timestamp,
'log_line': log_line, 'log_line' : log_line,
'_future': None '_future' : None
} }
# Store log in memory # Store log in memory
if self.store_log: if self.store_log:
self.log.append(log_entry) self.log.append(log_entry)
@ -1640,20 +1629,18 @@ class TestLogger():
class CLITestLogger(TestLogger): class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
""" """
def __init__(self, store_log=True, file_name=None): def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self) TestLogger.__init__(self)
self.log_file_name = file_name self.log_file_name = file_name
# self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True): def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry """ Prints on screen formatted log entry
""" """
ts = log_entry['log_timestamp'] ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime( timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
"[%s] " % self.TIMESTAMP_FORMAT) if timestamp else '' log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
log_line_str = "%(log_type)s: %(log_line)s" % (log_entry)
return timestamp_str + log_line_str return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'): def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
@ -1691,7 +1678,7 @@ def detect_database_verbose(db_url):
if result is not None: if result is not None:
# Parsing passed # Parsing passed
(db_type, username, password, host, db_name) = result (db_type, username, password, host, db_name) = result
# print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect # Let's try to connect
db_ = factory_db_logger(db_url) db_ = factory_db_logger(db_url)
if db_ is not None: if db_ is not None:
@ -1715,12 +1702,11 @@ def get_module_avail(module_name):
""" """
return module_name in sys.modules.keys() return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None): def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None oldError = None
if os.name == 'nt': if os.name == 'nt':
# Disable Windows error box temporarily # Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) # note that SEM_FAILCRITICALERRORS = 1 oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create() mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds() detect_muts_list = mbeds.list_mbeds()
@ -1730,7 +1716,6 @@ def get_autodetected_MUTS_list(platform_name_filter=None):
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter) return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None): def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file. """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary. If function fails to auto-detect devices it will return empty dictionary.
@ -1742,7 +1727,7 @@ def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
@param mbeds_list list of mbeds captured from mbed_lstools @param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter' @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
""" """
result = {} # Should be in muts_all.json format result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts) # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}] # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1 index = 1
@ -1755,11 +1740,10 @@ def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing) # For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id). # if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'], m = {'mcu': mut['platform_name'],
'mcu_unique': mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % ( 'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'], 'port': mut['serial_port'],
'disk': mut['mount_point'], 'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection 'peripherals': [] # No peripheral detection
} }
if index not in result: if index not in result:
result[index] = {} result[index] = {}
@ -1780,7 +1764,7 @@ def get_autodetected_TEST_SPEC(mbeds_list,
use_supported_toolchains - if True add all supported toolchains to test_spec use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
""" """
result = {'targets': {}} result = {'targets': {} }
for mut in mbeds_list: for mut in mbeds_list:
mcu = mut['mcu'] mcu = mut['mcu']
@ -1838,7 +1822,7 @@ def get_default_test_options_parser():
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"] toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc', parser.add_argument('--tc',
dest='toolchains_filter', dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")), type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains") help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()]) test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
@ -1874,9 +1858,9 @@ def get_default_test_options_parser():
help='Runs only test enumerated it this switch. Use comma to separate test case names') help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names', parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names', dest='peripheral_by_names',
type=argparse_many(str), type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names') help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod') copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods) copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
@ -1884,7 +1868,7 @@ def get_default_test_options_parser():
parser.add_argument('-c', '--copy-method', parser.add_argument('-c', '--copy-method',
dest='copy_method', dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"), type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s" % copy_methods_str) help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod') reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods) reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
@ -1893,7 +1877,7 @@ def get_default_test_options_parser():
dest='mut_reset_type', dest='mut_reset_type',
default=None, default=None,
type=argparse_uppercase_type(reset_methods, "reset method"), type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s' % reset_methods_str) help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests', parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests', dest='goanna_for_tests',
@ -2057,20 +2041,18 @@ def get_default_test_options_parser():
help="Depth level for static memory report") help="Depth level for static memory report")
return parser return parser
def test_path_to_name(path, base): def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens """Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism""" This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = [] name_parts = []
head, tail = os.path.split(relpath(path, base)) head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."): while (tail and tail != "."):
name_parts.insert(0, tail) name_parts.insert(0, tail)
head, tail = os.path.split(head) head, tail = os.path.split(head)
return "-".join(name_parts).lower() return "-".join(name_parts).lower()
def get_test_config(config_name, target_name): def get_test_config(config_name, target_name):
"""Finds the path to a test configuration file """Finds the path to a test configuration file
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc" config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
@ -2169,7 +2151,6 @@ def print_tests(tests, format="list", sort=True):
print("Unknown format '%s'" % format) print("Unknown format '%s'" % format)
sys.exit(1) sys.exit(1)
def norm_relative_path(path, start): def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the """This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path python os.path.relpath function, but also normalizes a Windows-syle path
@ -2326,16 +2307,16 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
# Set the overall result to a failure if a build failure occurred # Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and if ('reason' in worker_result and
not worker_result['reason'] and not worker_result['reason'] and
not isinstance(worker_result['reason'], NotSupportedException)): not isinstance(worker_result['reason'], NotSupportedException)):
result = False result = False
break break
# Adding binary path to test build result # Adding binary path to test build result
if ('result' in worker_result and if ('result' in worker_result and
worker_result['result'] and worker_result['result'] and
'bin_file' in worker_result): 'bin_file' in worker_result):
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory) bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests' test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests'
@ -2382,6 +2363,4 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
def test_spec_from_test_builds(test_builds): def test_spec_from_test_builds(test_builds):
return { return {
"builds": test_builds "builds": test_builds
} }