Reorganized test_api.py to build all required libraries (RTOS, Ethernet, etc) for tests up front, before any tests are built

pull/1040/head
Brian Daniels 2015-04-13 17:07:21 -05:00
parent 4b0926fcdb
commit cb4e13c1e3
2 changed files with 232 additions and 135 deletions

View File

@ -209,6 +209,7 @@ if __name__ == '__main__':
_opts_log_file_name=opts.log_file_name, _opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name, _opts_report_html_file_name=opts.report_html_file_name,
_opts_report_junit_file_name=opts.report_junit_file_name, _opts_report_junit_file_name=opts.report_junit_file_name,
_opts_report_jenkins_file_name=opts.report_jenkins_file_name,
_test_spec=test_spec, _test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk, _opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests, _opts_goanna_for_tests=opts.goanna_for_tests,

View File

@ -48,6 +48,7 @@ from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_db import BaseDBAccess from workspace_tools.test_db import BaseDBAccess
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import get_target_supported_toolchains from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.build_api import write_build_report
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
from workspace_tools.test_exporters import ReportExporter, ResultExporterType from workspace_tools.test_exporters import ReportExporter, ResultExporterType
@ -153,6 +154,7 @@ class SingleTestRunner(object):
_opts_log_file_name=None, _opts_log_file_name=None,
_opts_report_html_file_name=None, _opts_report_html_file_name=None,
_opts_report_junit_file_name=None, _opts_report_junit_file_name=None,
_opts_report_jenkins_file_name=None,
_test_spec={}, _test_spec={},
_opts_goanna_for_mbed_sdk=None, _opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None, _opts_goanna_for_tests=None,
@ -205,6 +207,7 @@ class SingleTestRunner(object):
self.opts_log_file_name = _opts_log_file_name self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_jenkins_file_name = _opts_report_jenkins_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order self.opts_shuffle_test_order = _opts_shuffle_test_order
@ -294,8 +297,17 @@ class SingleTestRunner(object):
test_summary_ext = {} test_summary_ext = {}
execute_thread_slice_lock = Lock() execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids): def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
for toolchain in toolchains: for toolchain in toolchains:
# Toolchain specific build successes and failures
build_report[toolchain] = {
"mbed_failure": False,
"library_failure": False,
"library_build_successes": [],
"library_build_failures": [],
"test_build_successes": [],
"test_build_failures": []
}
# print target, toolchain # print target, toolchain
# Test suite properties returned to external tools like CI # Test suite properties returned to external tools like CI
test_suite_properties = {} test_suite_properties = {}
@ -306,6 +318,7 @@ class SingleTestRunner(object):
test_suite_properties['toolchain'] = toolchain test_suite_properties['toolchain'] = toolchain
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
# print '=== %s::%s ===' % (target, toolchain) # print '=== %s::%s ===' % (target, toolchain)
# Let's build our test # Let's build our test
if target not in TARGET_MAP: if target not in TARGET_MAP:
@ -328,9 +341,9 @@ class SingleTestRunner(object):
continue continue
except ToolException: except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain)) print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
build_report[toolchain]["mbed_failure"] = True
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains)) continue
return
build_dir = join(BUILD_DIR, "test", target, toolchain) build_dir = join(BUILD_DIR, "test", target, toolchain)
@ -340,6 +353,7 @@ class SingleTestRunner(object):
# Enumerate through all tests and shuffle test order if requested # Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys()) test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order: if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func) random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable # Update database with shuffle seed f applicable
@ -358,148 +372,128 @@ class SingleTestRunner(object):
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options())) self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect(); self.db_logger.disconnect();
for test_id in test_map_keys: valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = set()
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id] test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.add(lib['id'])
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
build_report[toolchain]["library_failure"] = True
build_report[toolchain]["library_build_failures"].append(lib_id)
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
continue continue
if test_ids and test_id not in test_ids: build_report[toolchain]["library_build_successes"].append(lib_id)
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs)
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print "DIS BE MAH ERRRRR: %s" % (str(ToolException))
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
build_report[toolchain]["test_build_failures"].append(test_id)
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
continue continue
if self.opts_test_only_peripheral and not test.peripherals: build_report[toolchain]["test_build_successes"].append(test_id)
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target)) if self.opts_only_build_tests:
test_suite_properties['skipped'].append(test_id) # With this option we are skipping testing phase
continue continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]): # Test duration can be increased by global value
# We will skip tests not forced with -p option test_duration = test.duration
if self.opts_verbose_skipped_tests: if self.opts_extend_test_timeout is not None:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target)) test_duration += self.opts_extend_test_timeout
test_suite_properties['skipped'].append(test_id)
continue
if self.opts_test_only_common and test.peripherals: # For an automated test the duration act as a timeout after
if self.opts_verbose_skipped_tests: # which the test gets interrupted
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target)) test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_suite_properties['skipped'].append(test_id) test_loops = self.get_test_loop_count(test_id)
continue
if test.automated and test.is_supported(target, toolchain): test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
if test.peripherals is None and self.opts_only_build_tests: test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
# When users are using 'build only flag' and test do not have test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
test_suite_properties['skipped'].append(test_id)
continue
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None # read MUTs, test specification and perform tests
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
# Detect which lib should be added to test # Append test results to global test summary
# Some libs have to compiled like RTOS or ETH if single_test_result is not None:
libraries = [] self.test_summary.append(single_test_result)
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.append(lib['id'])
# Build libs for test
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries) # Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
# TODO: move this 2 below loops to separate function self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
INC_DIRS = [] if target not in self.test_summary_ext[toolchain]:
for lib_id in libraries: self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']: if target not in self.test_summary_ext[toolchain][target]:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext']) self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs)
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains))
return
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped']) test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties self.test_suite_properties_ext[target][toolchain] = test_suite_properties
# return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext # return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
q.put(target + '_'.join(toolchains)) q.put(target + '_'.join(toolchains))
return return
@ -514,6 +508,8 @@ class SingleTestRunner(object):
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float(): if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND) self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
build_reports = []
if self.opts_parallel_test_exec: if self.opts_parallel_test_exec:
################################################################### ###################################################################
# Experimental, parallel test execution per singletest instance. # Experimental, parallel test execution per singletest instance.
@ -526,7 +522,9 @@ class SingleTestRunner(object):
# get information about available MUTs (per target). # get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems(): for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {} self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids)) cur_build_report = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
build_reports.append({ "target": target, "report": cur_build_report})
t.daemon = True t.daemon = True
t.start() t.start()
execute_threads.append(t) execute_threads.append(t)
@ -538,16 +536,107 @@ class SingleTestRunner(object):
for target, toolchains in self.test_spec['targets'].iteritems(): for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext: if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {} self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids)
cur_build_report = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
build_reports.append({ "target": target, "report": cur_build_report})
q.get() q.get()
print build_reports
build_report = []
for target_build_report in build_reports:
cur_report = {
"target": target_build_report["target"],
"successes": [],
"failures": []
}
for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
print "%s - %s" % (target_build_report["target"], toolchain)
report = target_build_report["report"][toolchain]
if report["mbed_failure"]:
cur_report["failures"].append("mbed::%s" % (toolchain))
elif report["library_failure"]:
for library in report["library_build_failures"]:
cur_report["failures"].append("Library::%s::%s" % (library, toolchain))
else:
cur_report["successes"].append("All Libraries::%s" % (toolchain))
if len(report["test_build_failures"]) > 0:
for successful_test in report["test_build_successes"]:
cur_report["successes"].append("Test::%s::%s" % (toolchain, successful_test))
for failed_test in report["test_build_failures"]:
cur_report["failures"].append("Test::%s::%s" % (toolchain, failed_test))
else:
cur_report["successes"].append("All Tests::%s" % (toolchain))
build_report.append(cur_report)
if self.db_logger: if self.db_logger:
self.db_logger.reconnect(); self.db_logger.reconnect();
if self.db_logger.is_connected(): if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED) self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect(); self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if test.automated and test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None): def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like) """ Prints well-formed summary with results (SQL table like)
@ -1314,7 +1403,7 @@ def singletest_in_cli_mode(single_test):
""" """
start = time() start = time()
# Execute tests depending on options and filter applied # Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = single_test.execute() test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
elapsed_time = time() - start elapsed_time = time() - start
# Human readable summary # Human readable summary
@ -1333,9 +1422,12 @@ def singletest_in_cli_mode(single_test):
report_exporter = ReportExporter(ResultExporterType.HTML) report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext) report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name: if single_test.opts_report_junit_file_name:
# Export results in form of HTML report to separate file # Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT) report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext) report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_jenkins_file_name:
# Export build results as Jenkins XML report ti sparate file
write_build_report(build_report, single_test.opts_report_jenkins_file_name)
class TestLogger(): class TestLogger():
@ -1706,6 +1798,10 @@ def get_default_test_options_parser():
dest='report_junit_file_name', dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report') help='You can log test suite results in form of JUnit compliant XML report')
parser.add_option("", "--report-jenkins",
dest="report_jenkins_file_name",
help="Output the build results to an xml file that is readable by Jenkins")
parser.add_option('', '--verbose-skipped', parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests', dest='verbose_skipped_tests',
default=False, default=False,