mirror of https://github.com/ARMmbed/mbed-os.git
Remove autoformatting
parent
3b3bed2406
commit
bf1781b005
|
@ -31,13 +31,11 @@ import datetime
|
|||
import threading
|
||||
import ctypes
|
||||
import functools
|
||||
import subprocess
|
||||
from colorama import Fore, Back, Style
|
||||
from prettytable import PrettyTable
|
||||
from copy import copy, deepcopy
|
||||
|
||||
from time import sleep, time
|
||||
|
||||
try:
|
||||
from Queue import Queue, Empty
|
||||
except ImportError:
|
||||
|
@ -116,7 +114,6 @@ class ProcessObserver(Thread):
|
|||
class SingleTestExecutor(threading.Thread):
|
||||
""" Example: Single test class in separate thread usage
|
||||
"""
|
||||
|
||||
def __init__(self, single_test):
|
||||
self.single_test = single_test
|
||||
threading.Thread.__init__(self)
|
||||
|
@ -299,8 +296,7 @@ class SingleTestRunner(object):
|
|||
(_hostname, _uname) = self.db_logger.get_hostname()
|
||||
_host_location = os.path.dirname(os.path.abspath(__file__))
|
||||
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
|
||||
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname,
|
||||
location=_host_location, type=build_id_type)
|
||||
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
|
||||
self.db_logger.disconnect()
|
||||
|
||||
def dump_options(self):
|
||||
|
@ -369,6 +365,7 @@ class SingleTestRunner(object):
|
|||
'shuffle_random_seed': self.shuffle_random_seed
|
||||
}
|
||||
|
||||
|
||||
# print '=== %s::%s ===' % (target, toolchain)
|
||||
# Let's build our test
|
||||
if target not in TARGET_MAP:
|
||||
|
@ -441,13 +438,13 @@ class SingleTestRunner(object):
|
|||
_extra=json.dumps(self.dump_options()))
|
||||
self.db_logger.disconnect();
|
||||
|
||||
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids,
|
||||
self.opts_include_non_automated)
|
||||
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
|
||||
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
|
||||
|
||||
for skipped_test_id in skipped_test_map_keys:
|
||||
test_suite_properties['skipped'].append(skipped_test_id)
|
||||
|
||||
|
||||
# First pass through all tests and determine which libraries need to be built
|
||||
libraries = []
|
||||
for test_id in valid_test_map_keys:
|
||||
|
@ -459,6 +456,7 @@ class SingleTestRunner(object):
|
|||
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
|
||||
libraries.append(lib['id'])
|
||||
|
||||
|
||||
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
|
||||
|
||||
# Build all required libraries
|
||||
|
@ -480,6 +478,7 @@ class SingleTestRunner(object):
|
|||
'There were errors while building library %s' % lib_id))
|
||||
continue
|
||||
|
||||
|
||||
for test_id in valid_test_map_keys:
|
||||
test = TEST_MAP[test_id]
|
||||
|
||||
|
@ -524,6 +523,7 @@ class SingleTestRunner(object):
|
|||
except Exception as e:
|
||||
project_name_str = project_name if project_name is not None else test_id
|
||||
|
||||
|
||||
test_result = self.TEST_RESULT_FAIL
|
||||
|
||||
if isinstance(e, ToolException):
|
||||
|
@ -538,6 +538,7 @@ class SingleTestRunner(object):
|
|||
'Project %s is not supported' % project_name_str))
|
||||
test_result = self.TEST_RESULT_NOT_SUPPORTED
|
||||
|
||||
|
||||
# Append test results to global test summary
|
||||
self.test_summary.append(
|
||||
(test_result, target, toolchain, test_id,
|
||||
|
@ -626,6 +627,7 @@ class SingleTestRunner(object):
|
|||
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
|
||||
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
|
||||
|
||||
|
||||
if self.opts_parallel_test_exec:
|
||||
###################################################################
|
||||
# Experimental, parallel test execution per singletest instance.
|
||||
|
@ -638,8 +640,7 @@ class SingleTestRunner(object):
|
|||
# get information about available MUTs (per target).
|
||||
for target, toolchains in self.test_spec['targets'].items():
|
||||
self.test_suite_properties_ext[target] = {}
|
||||
t = threading.Thread(target=self.execute_thread_slice, args=(
|
||||
q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
|
||||
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
execute_threads.append(t)
|
||||
|
@ -652,15 +653,13 @@ class SingleTestRunner(object):
|
|||
if target not in self.test_suite_properties_ext:
|
||||
self.test_suite_properties_ext[target] = {}
|
||||
|
||||
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report,
|
||||
self.build_properties)
|
||||
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
|
||||
q.get()
|
||||
|
||||
if self.db_logger:
|
||||
self.db_logger.reconnect();
|
||||
if self.db_logger.is_connected():
|
||||
self.db_logger.update_build_id_info(self.db_logger_build_id,
|
||||
_status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
|
||||
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
|
||||
self.db_logger.disconnect();
|
||||
|
||||
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
|
||||
|
@ -825,8 +824,7 @@ class SingleTestRunner(object):
|
|||
result += "\n"
|
||||
|
||||
# Print result count
|
||||
result += "Result: " + ' / '.join(
|
||||
['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
|
||||
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
|
||||
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
|
||||
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
|
||||
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
|
||||
|
@ -947,8 +945,7 @@ class SingleTestRunner(object):
|
|||
if not exists(image_path):
|
||||
single_test_result = self.TEST_RESULT_NO_IMAGE
|
||||
elapsed_time = 0
|
||||
single_test_output = self.logger.log_line(self.logger.LogType.ERROR,
|
||||
'Image file does not exist: %s' % image_path)
|
||||
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
|
||||
print(single_test_output)
|
||||
else:
|
||||
# Host test execution
|
||||
|
@ -1009,8 +1006,7 @@ class SingleTestRunner(object):
|
|||
if self.db_logger:
|
||||
self.db_logger.disconnect()
|
||||
|
||||
return (self.shape_global_test_loop_result(test_all_result,
|
||||
self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
|
||||
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
|
||||
target_name_unique,
|
||||
toolchain_name,
|
||||
test_id,
|
||||
|
@ -1520,8 +1516,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
|
|||
pt.align['percent [%]'] = "r"
|
||||
for unique_id in unique_test_id:
|
||||
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
|
||||
percent_progress = round(
|
||||
100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
|
||||
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
|
||||
str_progress = progress_bar(percent_progress, 75)
|
||||
row = [unique_id,
|
||||
counter_dict_test_id_types[unique_id],
|
||||
|
@ -1578,32 +1573,26 @@ def singletest_in_cli_mode(single_test):
|
|||
if single_test.opts_report_html_file_name:
|
||||
# Export results in form of HTML report to separate file
|
||||
report_exporter = ReportExporter(ResultExporterType.HTML)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name,
|
||||
test_suite_properties=test_suite_properties_ext)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
|
||||
if single_test.opts_report_junit_file_name:
|
||||
# Export results in form of JUnit XML report to separate file
|
||||
report_exporter = ReportExporter(ResultExporterType.JUNIT)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name,
|
||||
test_suite_properties=test_suite_properties_ext)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
|
||||
if single_test.opts_report_text_file_name:
|
||||
# Export results in form of a text file
|
||||
report_exporter = ReportExporter(ResultExporterType.TEXT)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name,
|
||||
test_suite_properties=test_suite_properties_ext)
|
||||
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
|
||||
if single_test.opts_report_build_file_name:
|
||||
# Export build results as html report to sparate file
|
||||
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
|
||||
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name,
|
||||
test_suite_properties=build_properties)
|
||||
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
|
||||
|
||||
# Returns True if no build failures of the test projects or their dependencies
|
||||
return status
|
||||
|
||||
|
||||
class TestLogger():
|
||||
""" Super-class for logging and printing ongoing events for test suite pass
|
||||
"""
|
||||
|
||||
def __init__(self, store_log=True):
|
||||
""" We can control if logger actually stores log in memory
|
||||
or just handled all log entries immediately
|
||||
|
@ -1640,7 +1629,6 @@ class TestLogger():
|
|||
class CLITestLogger(TestLogger):
|
||||
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
|
||||
"""
|
||||
|
||||
def __init__(self, store_log=True, file_name=None):
|
||||
TestLogger.__init__(self)
|
||||
self.log_file_name = file_name
|
||||
|
@ -1651,8 +1639,7 @@ class CLITestLogger(TestLogger):
|
|||
""" Prints on screen formatted log entry
|
||||
"""
|
||||
ts = log_entry['log_timestamp']
|
||||
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime(
|
||||
"[%s] " % self.TIMESTAMP_FORMAT) if timestamp else ''
|
||||
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
|
||||
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
|
||||
return timestamp_str + log_line_str
|
||||
|
||||
|
@ -1715,7 +1702,6 @@ def get_module_avail(module_name):
|
|||
"""
|
||||
return module_name in sys.modules.keys()
|
||||
|
||||
|
||||
def get_autodetected_MUTS_list(platform_name_filter=None):
|
||||
oldError = None
|
||||
if os.name == 'nt':
|
||||
|
@ -1730,7 +1716,6 @@ def get_autodetected_MUTS_list(platform_name_filter=None):
|
|||
|
||||
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
|
||||
|
||||
|
||||
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
||||
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
|
||||
If function fails to auto-detect devices it will return empty dictionary.
|
||||
|
@ -1755,8 +1740,7 @@ def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
|
|||
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
|
||||
# if not we are creating our own unique value (last few chars from platform's target_id).
|
||||
m = {'mcu': mut['platform_name'],
|
||||
'mcu_unique': mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (
|
||||
mut['platform_name'], mut['target_id'][-4:]),
|
||||
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
|
||||
'port': mut['serial_port'],
|
||||
'disk': mut['mount_point'],
|
||||
'peripherals': [] # No peripheral detection
|
||||
|
@ -2057,7 +2041,6 @@ def get_default_test_options_parser():
|
|||
help="Depth level for static memory report")
|
||||
return parser
|
||||
|
||||
|
||||
def test_path_to_name(path, base):
|
||||
"""Change all slashes in a path into hyphens
|
||||
This creates a unique cross-platform test name based on the path
|
||||
|
@ -2070,7 +2053,6 @@ def test_path_to_name(path, base):
|
|||
|
||||
return "-".join(name_parts).lower()
|
||||
|
||||
|
||||
def get_test_config(config_name, target_name):
|
||||
"""Finds the path to a test configuration file
|
||||
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
|
||||
|
@ -2169,7 +2151,6 @@ def print_tests(tests, format="list", sort=True):
|
|||
print("Unknown format '%s'" % format)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def norm_relative_path(path, start):
|
||||
"""This function will create a normalized, relative path. It mimics the
|
||||
python os.path.relpath function, but also normalizes a Windows-syle path
|
||||
|
@ -2383,5 +2364,3 @@ def test_spec_from_test_builds(test_builds):
|
|||
return {
|
||||
"builds": test_builds
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue