From 373e8c40c0c3cf21bb7d40d474784a8e070d7c48 Mon Sep 17 00:00:00 2001 From: Przemek Wirkus Date: Thu, 30 Apr 2015 19:39:30 +0100 Subject: [PATCH 1/2] Fixed test build failure, now BUILD_FAILURE is one of possible results. Added build report after build completion. Fixed result from execute() capture when result is NoneType value --- workspace_tools/test_api.py | 90 ++++++++++++++++++++++++++++--------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/workspace_tools/test_api.py b/workspace_tools/test_api.py index bca2da5270..dfeccb6bfb 100644 --- a/workspace_tools/test_api.py +++ b/workspace_tools/test_api.py @@ -49,6 +49,7 @@ from workspace_tools.test_db import BaseDBAccess from workspace_tools.build_api import build_project, build_mbed_libs, build_lib from workspace_tools.build_api import get_target_supported_toolchains from workspace_tools.build_api import write_build_report +from workspace_tools.build_api import print_build_results from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH from workspace_tools.test_exporters import ReportExporter, ResultExporterType @@ -124,6 +125,7 @@ class SingleTestRunner(object): TEST_RESULT_TIMEOUT = "TIMEOUT" TEST_RESULT_NO_IMAGE = "NO_IMAGE" TEST_RESULT_MBED_ASSERT = "MBED_ASSERT" + TEST_RESULT_BUILD_FAILED = "BUILD_FAILED" GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id @@ -142,7 +144,8 @@ class SingleTestRunner(object): "timeout" : TEST_RESULT_TIMEOUT, "no_image" : TEST_RESULT_NO_IMAGE, "end" : TEST_RESULT_UNDEF, - "mbed_assert" : TEST_RESULT_MBED_ASSERT + "mbed_assert" : TEST_RESULT_MBED_ASSERT, + "build_failed" : TEST_RESULT_BUILD_FAILED } def __init__(self, @@ -182,6 +185,11 @@ class SingleTestRunner(object): from colorama import init init() + # Build results + build_failures = [] + build_successes = [] + build_skipped = [] + PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}" self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN) # Settings related to test loops counters @@ -299,6 +307,8 @@ class SingleTestRunner(object): def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report): for toolchain in toolchains: + tt_id = "%s::%s" % (toolchain, target) + # Toolchain specific build successes and failures build_report[toolchain] = { "mbed_failure": False, @@ -310,13 +320,14 @@ class SingleTestRunner(object): } # print target, toolchain # Test suite properties returned to external tools like CI - test_suite_properties = {} - test_suite_properties['jobs'] = self.opts_jobs - test_suite_properties['clean'] = clean - test_suite_properties['target'] = target - test_suite_properties['test_ids'] = ', '.join(test_ids) - test_suite_properties['toolchain'] = toolchain - test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed + test_suite_properties = { + 'jobs': self.opts_jobs, + 'clean': clean, + 'target': target, + 'test_ids': ', '.join(test_ids), + 'toolchain': toolchain, + 'shuffle_random_seed': self.shuffle_random_seed + } # print '=== %s::%s ===' % (target, toolchain) @@ -329,6 +340,7 @@ class SingleTestRunner(object): build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None + try: build_mbed_libs_result = build_mbed_libs(T, toolchain, @@ -337,12 +349,15 @@ class SingleTestRunner(object): jobs=self.opts_jobs) if not build_mbed_libs_result: + self.build_skipped.append(tt_id) print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain)) continue + else: + self.build_successes.append(tt_id) except ToolException: - print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain)) + self.build_failures.append(tt_id) build_report[toolchain]["mbed_failure"] = True - #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext + print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain)) continue build_dir = join(BUILD_DIR, "test", target, toolchain) @@ -415,8 +430,6 @@ class SingleTestRunner(object): continue - - for test_id in valid_test_map_keys: test = TEST_MAP[test_id] @@ -437,6 +450,12 @@ class SingleTestRunner(object): test_uuid = uuid.uuid4() MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid)) + # Prepare extended test results data structure (it can be used to generate detailed test report) + if toolchain not in self.test_summary_ext: + self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain + if target not in self.test_summary_ext[toolchain]: + self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target + project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None try: path = build_project(test.source_dir, @@ -457,7 +476,25 @@ class SingleTestRunner(object): project_name_str = project_name if project_name is not None else test_id print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str)) build_report[toolchain]["test_build_failing"].append(test_id) - # return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext + + # Append test results to global test summary + self.test_summary.append( + (self.TEST_RESULT_BUILD_FAILED, target, toolchain, test_id, 'Toolchain build failed', 0, 0, '-') + ) + + # Add detailed test result to test summary structure + if target not in self.test_summary_ext[toolchain][target]: + self.test_summary_ext[toolchain][target][test_id] = { 0: { + 'single_test_result' : self.TEST_RESULT_BUILD_FAILED, + 'single_test_output' : '', + 'target_name' : target, + 'toolchain_name' : toolchain, + 'test_id' : test_id, + 'test_description' : 'Toolchain build failed', + 'elapsed_time' : 0, + 'duration' : 0, + 'copy_method' : None + }} continue if self.opts_only_build_tests: @@ -479,17 +516,17 @@ class SingleTestRunner(object): test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path # read MUTs, test specification and perform tests - single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops) + handle_result = self.handle(test_spec, target, toolchain, test_loops=test_loops) + if handle_result: + single_test_result, detailed_test_results = handle_result + else: + continue # Append test results to global test summary if single_test_result is not None: self.test_summary.append(single_test_result) - # Prepare extended test results data structure (it can be used to generate detailed test report) - if toolchain not in self.test_summary_ext: - self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain - if target not in self.test_summary_ext[toolchain]: - self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target + # Add detailed test result to test summary structure if target not in self.test_summary_ext[toolchain][target]: self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it @@ -511,6 +548,9 @@ class SingleTestRunner(object): self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND) build_reports = [] + self.build_failures = [] + self.build_successes = [] + self.build_skipped = [] if self.opts_parallel_test_exec: ################################################################### @@ -554,7 +594,6 @@ class SingleTestRunner(object): } for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get): - print "%s - %s" % (target_build_report["target"], toolchain) report = target_build_report["report"][toolchain] if report["mbed_failure"]: @@ -703,6 +742,7 @@ class SingleTestRunner(object): """ Prints well-formed summary with results (SQL table like) table shows target x test results matrix across """ + success_code = 0 # Success code that can be leter returned to result = "Test summary:\n" # Pretty table package is used to print results pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", @@ -723,7 +763,8 @@ class SingleTestRunner(object): self.TEST_RESULT_IOERR_SERIAL : 0, self.TEST_RESULT_NO_IMAGE : 0, self.TEST_RESULT_TIMEOUT : 0, - self.TEST_RESULT_MBED_ASSERT : 0 + self.TEST_RESULT_MBED_ASSERT : 0, + self.TEST_RESULT_BUILD_FAILED : 0 } for test in test_summary: @@ -1413,6 +1454,8 @@ def progress_bar(percent_progress, saturation=0): def singletest_in_cli_mode(single_test): """ Runs SingleTestRunner object in CLI (Command line interface) mode + + @return returns success code (0 == success) for building and running tests """ start = time() # Execute tests depending on options and filter applied @@ -1427,7 +1470,12 @@ def singletest_in_cli_mode(single_test): # prints well-formed summary with results (SQL table like) # table shows text x toolchain test result matrix print single_test.generate_test_summary_by_target(test_summary, shuffle_seed) + print print "Completed in %.2f sec"% (elapsed_time) + print + print print_build_results(single_test.build_successes, "Build successes:"), + print print_build_results(single_test.build_skipped, "Build skipped:"), + print print_build_results(single_test.build_failures, "Build failures:"), # Store extra reports in files if single_test.opts_report_html_file_name: From 9a720be3d4f71c1c71bbba5641d99cf9d1c562db Mon Sep 17 00:00:00 2001 From: Przemek Wirkus Date: Sat, 2 May 2015 23:08:00 +0100 Subject: [PATCH 2/2] Small improvements to reporting build failures --- workspace_tools/build.py | 9 ++++++--- workspace_tools/build_api.py | 2 +- workspace_tools/test_api.py | 17 ++++++++++++----- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/workspace_tools/build.py b/workspace_tools/build.py index f402cb0222..98d2fd52df 100755 --- a/workspace_tools/build.py +++ b/workspace_tools/build.py @@ -254,9 +254,12 @@ if __name__ == '__main__': print "Completed in: (%.2f)s" % (time() - start) print - print print_build_results(successes, "Build successes:"), - print print_build_results(skipped, "Build skipped:"), - print print_build_results(failures, "Build failures:"), + for report, report_name in [(successes, "Build successes:"), + (skipped, "Build skipped:"), + (failures, "Build failures:"), + ]: + if report: + print print_build_results(report, report_name), if failures: sys.exit(1) diff --git a/workspace_tools/build_api.py b/workspace_tools/build_api.py index 0acea152cf..dbcba3c0c8 100644 --- a/workspace_tools/build_api.py +++ b/workspace_tools/build_api.py @@ -526,7 +526,7 @@ def static_analysis_scan_library(src_paths, build_path, target, toolchain_name, def print_build_results(result_list, build_name): """ Generate result string for build results """ result = "" - if result_list: + if len(result_list) > 0: result += build_name + "\n" result += "\n".join([" * %s" % f for f in result_list]) result += "\n" diff --git a/workspace_tools/test_api.py b/workspace_tools/test_api.py index dfeccb6bfb..c88134eb28 100644 --- a/workspace_tools/test_api.py +++ b/workspace_tools/test_api.py @@ -426,7 +426,6 @@ class SingleTestRunner(object): print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id)) build_report[toolchain]["library_failure"] = True build_report[toolchain]["library_build_failing"].append(lib_id) - #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext continue @@ -456,6 +455,8 @@ class SingleTestRunner(object): if target not in self.test_summary_ext[toolchain]: self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target + tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only + project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None try: path = build_project(test.source_dir, @@ -476,6 +477,7 @@ class SingleTestRunner(object): project_name_str = project_name if project_name is not None else test_id print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str)) build_report[toolchain]["test_build_failing"].append(test_id) + self.build_failures.append(tt_test_id) # Append test results to global test summary self.test_summary.append( @@ -1470,12 +1472,17 @@ def singletest_in_cli_mode(single_test): # prints well-formed summary with results (SQL table like) # table shows text x toolchain test result matrix print single_test.generate_test_summary_by_target(test_summary, shuffle_seed) - print + print "Completed in %.2f sec"% (elapsed_time) print - print print_build_results(single_test.build_successes, "Build successes:"), - print print_build_results(single_test.build_skipped, "Build skipped:"), - print print_build_results(single_test.build_failures, "Build failures:"), + # Write summary of the builds + + for report, report_name in [(single_test.build_successes, "Build successes:"), + (single_test.build_skipped, "Build skipped:"), + (single_test.build_failures, "Build failures:"), + ]: + if report: + print print_build_results(report, report_name) # Store extra reports in files if single_test.opts_report_html_file_name: