mirror of https://github.com/ARMmbed/mbed-os.git
Remove autoformatting
parent
3b3bed2406
commit
bf1781b005
|
@ -31,13 +31,11 @@ import datetime
|
||||||
import threading
|
import threading
|
||||||
import ctypes
|
import ctypes
|
||||||
import functools
|
import functools
|
||||||
import subprocess
|
|
||||||
from colorama import Fore, Back, Style
|
from colorama import Fore, Back, Style
|
||||||
from prettytable import PrettyTable
|
from prettytable import PrettyTable
|
||||||
from copy import copy, deepcopy
|
from copy import copy, deepcopy
|
||||||
|
|
||||||
from time import sleep, time
|
from time import sleep, time
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from Queue import Queue, Empty
|
from Queue import Queue, Empty
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
@ -116,7 +114,6 @@ class ProcessObserver(Thread):
|
||||||
class SingleTestExecutor(threading.Thread):
|
class SingleTestExecutor(threading.Thread):
|
||||||
""" Example: Single test class in separate thread usage
|
""" Example: Single test class in separate thread usage
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, single_test):
|
def __init__(self, single_test):
|
||||||
self.single_test = single_test
|
self.single_test = single_test
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
|
@ -299,8 +296,7 @@ class SingleTestRunner(object):
|
||||||
(_hostname, _uname) = self.db_logger.get_hostname()
|
(_hostname, _uname) = self.db_logger.get_hostname()
|
||||||
_host_location = os.path.dirname(os.path.abspath(__file__))
|
_host_location = os.path.dirname(os.path.abspath(__file__))
|
||||||
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
|
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
|
||||||
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname,
|
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
|
||||||
location=_host_location, type=build_id_type)
|
|
||||||
self.db_logger.disconnect()
|
self.db_logger.disconnect()
|
||||||
|
|
||||||
def dump_options(self):
|
def dump_options(self):
|
||||||
|
@ -369,6 +365,7 @@ class SingleTestRunner(object):
|
||||||
'shuffle_random_seed': self.shuffle_random_seed
|
'shuffle_random_seed': self.shuffle_random_seed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# print '=== %s::%s ===' % (target, toolchain)
|
# print '=== %s::%s ===' % (target, toolchain)
|
||||||
# Let's build our test
|
# Let's build our test
|
||||||
if target not in TARGET_MAP:
|
if target not in TARGET_MAP:
|
||||||
|
@ -441,13 +438,13 @@ class SingleTestRunner(object):
|
||||||
_extra=json.dumps(self.dump_options()))
|
_extra=json.dumps(self.dump_options()))
|
||||||
self.db_logger.disconnect();
|
self.db_logger.disconnect();
|
||||||
|
|
||||||
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids,
|
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
|
||||||
self.opts_include_non_automated)
|
|
||||||
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
|
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
|
||||||
|
|
||||||
for skipped_test_id in skipped_test_map_keys:
|
for skipped_test_id in skipped_test_map_keys:
|
||||||
test_suite_properties['skipped'].append(skipped_test_id)
|
test_suite_properties['skipped'].append(skipped_test_id)
|
||||||
|
|
||||||
|
|
||||||
# First pass through all tests and determine which libraries need to be built
|
# First pass through all tests and determine which libraries need to be built
|
||||||
libraries = []
|
libraries = []
|
||||||
for test_id in valid_test_map_keys:
|
for test_id in valid_test_map_keys:
|
||||||
|
@ -459,6 +456,7 @@ class SingleTestRunner(object):
|
||||||
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
|
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
|
||||||
libraries.append(lib['id'])
|
libraries.append(lib['id'])
|
||||||
|
|
||||||
|
|
||||||
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
|
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
|
||||||
|
|
||||||
# Build all required libraries
|
# Build all required libraries
|
||||||
|
@ -480,6 +478,7 @@ class SingleTestRunner(object):
|
||||||
'There were errors while building library %s' % lib_id))
|
'There were errors while building library %s' % lib_id))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
for test_id in valid_test_map_keys:
|
for test_id in valid_test_map_keys:
|
||||||
test = TEST_MAP[test_id]
|
test = TEST_MAP[test_id]
|
||||||
|
|
||||||
|
@ -524,6 +523,7 @@ class SingleTestRunner(object):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
project_name_str = project_name if project_name is not None else test_id
|
project_name_str = project_name if project_name is not None else test_id
|
||||||
|
|
||||||
|
|
||||||
test_result = self.TEST_RESULT_FAIL
|
test_result = self.TEST_RESULT_FAIL
|
||||||
|
|
||||||
if isinstance(e, ToolException):
|
if isinstance(e, ToolException):
|
||||||
|
@ -538,6 +538,7 @@ class SingleTestRunner(object):
|
||||||
'Project %s is not supported' % project_name_str))
|
'Project %s is not supported' % project_name_str))
|
||||||
test_result = self.TEST_RESULT_NOT_SUPPORTED
|
test_result = self.TEST_RESULT_NOT_SUPPORTED
|
||||||
|
|
||||||
|
|
||||||
# Append test results to global test summary
|
# Append test results to global test summary
|
||||||
self.test_summary.append(
|
self.test_summary.append(
|
||||||
(test_result, target, toolchain, test_id,
|
(test_result, target, toolchain, test_id,
|
||||||
|
@ -626,6 +627,7 @@ class SingleTestRunner(object):
|
||||||
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
|
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
|
||||||
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
|
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
|
||||||
|
|
||||||
|
|
||||||
if self.opts_parallel_test_exec:
|
if self.opts_parallel_test_exec:
|
||||||
###################################################################
|
###################################################################
|
||||||
# Experimental, parallel test execution per singletest instance.
|
# Experimental, parallel test execution per singletest instance.
|
||||||
|
@ -638,8 +640,7 @@ class SingleTestRunner(object):
|
||||||
# get information about available MUTs (per target).
|
# get information about available MUTs (per target).
|
||||||
for target, toolchains in self.test_spec['targets'].items():
|
for target, toolchains in self.test_spec['targets'].items():
|
||||||
self.test_suite_properties_ext[target] = {}
|
self.test_suite_properties_ext[target] = {}
|
||||||
t = threading.Thread(target=self.execute_thread_slice, args=(
|
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
|
||||||
q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
|
|
||||||
t.daemon = True
|
t.daemon = True
|
||||||
t.start()
|
t.start()
|
||||||
execute_threads.append(t)
|
execute_threads.append(t)
|
||||||
|
@ -652,15 +653,13 @@ class SingleTestRunner(object):
|
||||||
if target not in self.test_suite_properties_ext:
|
if target not in self.test_suite_properties_ext:
|
||||||
self.test_suite_properties_ext[target] = {}
|
self.test_suite_properties_ext[target] = {}
|
||||||
|
|
||||||
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report,
|
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
|
||||||
self.build_properties)
|
|
||||||
q.get()
|
q.get()
|
||||||
|
|
||||||
if self.db_logger:
|
if self.db_logger:
|
||||||
self.db_logger.reconnect();
|
self.db_logger.reconnect();
|
||||||
if self.db_logger.is_connected():
|
if self.db_logger.is_connected():
|
||||||
self.db_logger.update_build_id_info(self.db_logger_build_id,
|
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
|
||||||
_status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
|
|
||||||
self.db_logger.disconnect();
|
self.db_logger.disconnect();
|
||||||
|
|
||||||
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
|
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
|
||||||
|
@ -825,8 +824,7 @@ class SingleTestRunner(object):
|
||||||
result += "\n"
|
result += "\n"
|
||||||
|
|
||||||
# Print result count
|
# Print result count
|
||||||
result += "Result: " + ' / '.join(
|
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
|
||||||
['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
|
|
||||||
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
|
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
|
||||||
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
|
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
|
||||||
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
|
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
|
||||||
|
@ -947,8 +945,7 @@ class SingleTestRunner(object):
|
||||||
if not exists(image_path):
|
if not exists(image_path):
|
||||||
single_test_result = self.TEST_RESULT_NO_IMAGE
|
single_test_result = self.TEST_RESULT_NO_IMAGE
|
||||||
elapsed_time = 0
|
elapsed_time = 0
|
||||||
single_test_output = self.logger.log_line(self.logger.LogType.ERROR,
|
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
|
||||||
'Image file does not exist: %s' % image_path)
|
|
||||||
print(single_test_output)
|
print(single_test_output)
|
||||||
else:
|
else:
|
||||||
# Host test execution
|
# Host test execution
|
||||||
|
@ -1009,8 +1006,7 @@ class SingleTestRunner(object):
|
||||||
if self.db_logger:
|
if self.db_logger:
|
||||||
self.db_logger.disconnect()
|
self.db_logger.disconnect()
|
||||||
|
|
||||||
return (self.shape_global_test_loop_result(test_all_result,
|
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
|
||||||
self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
|
|
||||||
target_name_unique,
|
target_name_unique,
|
||||||
toolchain_name,
|
toolchain_name,
|
||||||
test_id,
|
test_id,
|
||||||
|
@ -1520,8 +1516,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
|
||||||
pt.align['percent [%]'] = "r"
|
pt.align['percent [%]'] = "r"
|
||||||
for unique_id in unique_test_id:
|
for unique_id in unique_test_id:
|
||||||
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
|
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
|
||||||
percent_progress = round(
|
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
|
||||||
100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
|
|
||||||
str_progress = progress_bar(percent_progress, 75)
|
str_progress = progress_bar(percent_progress, 75)
|
||||||
row = [unique_id,
|
row = [unique_id,
|
||||||
counter_dict_test_id_types[unique_id],
|
counter_dict_test_id_types[unique_id],
|
||||||
|
@ -1578,32 +1573,26 @@ def singletest_in_cli_mode(single_test):
|
||||||
if single_test.opts_report_html_file_name:
|
if single_test.opts_report_html_file_name:
|
||||||
# Export results in form of HTML report to separate file
|
# Export results in form of HTML report to separate file
|
||||||
report_exporter = ReportExporter(ResultExporterType.HTML)
|
report_exporter = ReportExporter(ResultExporterType.HTML)
|
||||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name,
|
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
|
||||||
test_suite_properties=test_suite_properties_ext)
|
|
||||||
if single_test.opts_report_junit_file_name:
|
if single_test.opts_report_junit_file_name:
|
||||||
# Export results in form of JUnit XML report to separate file
|
# Export results in form of JUnit XML report to separate file
|
||||||
report_exporter = ReportExporter(ResultExporterType.JUNIT)
|
report_exporter = ReportExporter(ResultExporterType.JUNIT)
|
||||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name,
|
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
|
||||||
test_suite_properties=test_suite_properties_ext)
|
|
||||||
if single_test.opts_report_text_file_name:
|
if single_test.opts_report_text_file_name:
|
||||||
# Export results in form of a text file
|
# Export results in form of a text file
|
||||||
report_exporter = ReportExporter(ResultExporterType.TEXT)
|
report_exporter = ReportExporter(ResultExporterType.TEXT)
|
||||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name,
|
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
|
||||||
test_suite_properties=test_suite_properties_ext)
|
|
||||||
if single_test.opts_report_build_file_name:
|
if single_test.opts_report_build_file_name:
|
||||||
# Export build results as html report to sparate file
|
# Export build results as html report to sparate file
|
||||||
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
|
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
|
||||||
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name,
|
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
|
||||||
test_suite_properties=build_properties)
|
|
||||||
|
|
||||||
# Returns True if no build failures of the test projects or their dependencies
|
# Returns True if no build failures of the test projects or their dependencies
|
||||||
return status
|
return status
|
||||||
|
|
||||||
|
|
||||||
class TestLogger():
|
class TestLogger():
|
||||||
""" Super-class for logging and printing ongoing events for test suite pass
|
""" Super-class for logging and printing ongoing events for test suite pass
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, store_log=True):
|
def __init__(self, store_log=True):
|
||||||
""" We can control if logger actually stores log in memory
|
""" We can control if logger actually stores log in memory
|
||||||
or just handled all log entries immediately
|
or just handled all log entries immediately
|
||||||
|
@ -1640,7 +1629,6 @@ class TestLogger():
|
||||||
class CLITestLogger(TestLogger):
|
class CLITestLogger(TestLogger):
|
||||||
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
|
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, store_log=True, file_name=None):
|
def __init__(self, store_log=True, file_name=None):
|
||||||
TestLogger.__init__(self)
|
TestLogger.__init__(self)
|
||||||
self.log_file_name = file_name
|
self.log_file_name = file_name
|
||||||
|
@ -1651,8 +1639,7 @@ class CLITestLogger(TestLogger):
|
||||||
""" Prints on screen formatted log entry
|
""" Prints on screen formatted log entry
|
||||||
"""
|
"""
|
||||||
ts = log_entry['log_timestamp']
|
ts = log_entry['log_timestamp']
|
||||||
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime(
|
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
|
||||||
"[%s] " % self.TIMESTAMP_FORMAT) if timestamp else ''
|
|
||||||
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
|
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
|
||||||
return timestamp_str + log_line_str
|
return timestamp_str + log_line_str
|
||||||
|
|
||||||
|
@ -1715,7 +1702,6 @@ def get_module_avail(module_name):
|
||||||
"""
|
"""
|
||||||
return module_name in sys.modules.keys()
|
return module_name in sys.modules.keys()
|
||||||
|
|
||||||
|
|
||||||
def get_autodetected_MUTS_list(platform_name_filter=None):
|
def get_autodetected_MUTS_list(platform_name_filter=None):
|
||||||
oldError = None
|
oldError = None
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
|
@ -1730,7 +1716,6 @@ def get_autodetected_MUTS_list(platform_name_filter=None):
|
||||||
|
|
||||||
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
|
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
|
||||||
|
|
||||||
|
|
||||||
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
||||||
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
|
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
|
||||||
If function fails to auto-detect devices it will return empty dictionary.
|
If function fails to auto-detect devices it will return empty dictionary.
|
||||||
|
@ -1755,8 +1740,7 @@ def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
||||||
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
|
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
|
||||||
# if not we are creating our own unique value (last few chars from platform's target_id).
|
# if not we are creating our own unique value (last few chars from platform's target_id).
|
||||||
m = {'mcu': mut['platform_name'],
|
m = {'mcu': mut['platform_name'],
|
||||||
'mcu_unique': mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (
|
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
|
||||||
mut['platform_name'], mut['target_id'][-4:]),
|
|
||||||
'port': mut['serial_port'],
|
'port': mut['serial_port'],
|
||||||
'disk': mut['mount_point'],
|
'disk': mut['mount_point'],
|
||||||
'peripherals': [] # No peripheral detection
|
'peripherals': [] # No peripheral detection
|
||||||
|
@ -2057,7 +2041,6 @@ def get_default_test_options_parser():
|
||||||
help="Depth level for static memory report")
|
help="Depth level for static memory report")
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
def test_path_to_name(path, base):
|
def test_path_to_name(path, base):
|
||||||
"""Change all slashes in a path into hyphens
|
"""Change all slashes in a path into hyphens
|
||||||
This creates a unique cross-platform test name based on the path
|
This creates a unique cross-platform test name based on the path
|
||||||
|
@ -2070,7 +2053,6 @@ def test_path_to_name(path, base):
|
||||||
|
|
||||||
return "-".join(name_parts).lower()
|
return "-".join(name_parts).lower()
|
||||||
|
|
||||||
|
|
||||||
def get_test_config(config_name, target_name):
|
def get_test_config(config_name, target_name):
|
||||||
"""Finds the path to a test configuration file
|
"""Finds the path to a test configuration file
|
||||||
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
|
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
|
||||||
|
@ -2169,7 +2151,6 @@ def print_tests(tests, format="list", sort=True):
|
||||||
print("Unknown format '%s'" % format)
|
print("Unknown format '%s'" % format)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def norm_relative_path(path, start):
|
def norm_relative_path(path, start):
|
||||||
"""This function will create a normalized, relative path. It mimics the
|
"""This function will create a normalized, relative path. It mimics the
|
||||||
python os.path.relpath function, but also normalizes a Windows-syle path
|
python os.path.relpath function, but also normalizes a Windows-syle path
|
||||||
|
@ -2383,5 +2364,3 @@ def test_spec_from_test_builds(test_builds):
|
||||||
return {
|
return {
|
||||||
"builds": test_builds
|
"builds": test_builds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue