2014-08-04 13:29:20 +00:00
"""
mbed SDK
2014-08-04 14:57:59 +00:00
Copyright ( c ) 2011 - 2014 ARM Limited
2014-08-04 13:29:20 +00:00
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : / / www . apache . org / licenses / LICENSE - 2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
2014-08-04 14:57:59 +00:00
Author : Przemyslaw Wirkus < Przemyslaw . wirkus @arm.com >
2014-08-04 13:29:20 +00:00
"""
import os
import re
2014-09-03 11:03:39 +00:00
import sys
2014-08-04 13:29:20 +00:00
import json
2014-09-24 13:57:30 +00:00
import uuid
2014-08-04 13:29:20 +00:00
import pprint
import random
2016-06-24 22:15:01 +00:00
import argparse
2014-08-12 15:15:33 +00:00
import datetime
2014-08-04 13:29:20 +00:00
import threading
2015-09-23 15:54:41 +00:00
import ctypes
2014-08-04 13:29:20 +00:00
from types import ListType
2015-02-10 23:38:01 +00:00
from colorama import Fore , Back , Style
2014-08-04 13:29:20 +00:00
from prettytable import PrettyTable
2016-07-25 18:15:34 +00:00
from copy import copy
2014-08-04 13:29:20 +00:00
from time import sleep , time
2014-08-04 14:57:59 +00:00
from Queue import Queue , Empty
2016-07-25 18:15:34 +00:00
from os . path import join , exists , basename , relpath
2015-03-12 09:55:05 +00:00
from threading import Thread , Lock
2016-10-10 20:09:50 +00:00
from multiprocessing import Pool , cpu_count
2014-10-23 15:56:05 +00:00
from subprocess import Popen , PIPE
2014-08-04 13:29:20 +00:00
# Imports related to mbed build api
2016-06-09 20:34:53 +00:00
from tools . tests import TESTS
from tools . tests import TEST_MAP
from tools . paths import BUILD_DIR
from tools . paths import HOST_TESTS
from tools . utils import ToolException
from tools . utils import NotSupportedException
from tools . utils import construct_enum
2016-06-28 15:34:28 +00:00
from tools . memap import MemapParser
2016-06-09 20:34:53 +00:00
from tools . targets import TARGET_MAP
2017-08-04 21:30:17 +00:00
import tools . test_configs as TestConfig
2016-06-09 20:34:53 +00:00
from tools . test_db import BaseDBAccess
from tools . build_api import build_project , build_mbed_libs , build_lib
from tools . build_api import get_target_supported_toolchains
from tools . build_api import write_build_report
from tools . build_api import prep_report
from tools . build_api import prep_properties
from tools . build_api import create_result
from tools . build_api import add_result_to_report
2016-07-25 18:15:34 +00:00
from tools . build_api import prepare_toolchain
from tools . build_api import scan_resources
2016-11-10 19:42:00 +00:00
from tools . build_api import get_config
2016-06-09 20:34:53 +00:00
from tools . libraries import LIBRARIES , LIBRARY_MAP
2016-09-30 21:54:34 +00:00
from tools . options import extract_profile
2016-07-19 10:14:42 +00:00
from tools . toolchains import TOOLCHAIN_PATHS
2016-06-24 22:15:01 +00:00
from tools . toolchains import TOOLCHAINS
2016-06-09 20:34:53 +00:00
from tools . test_exporters import ReportExporter , ResultExporterType
2016-06-24 22:15:01 +00:00
from tools . utils import argparse_filestring_type
from tools . utils import argparse_uppercase_type
from tools . utils import argparse_lowercase_type
from tools . utils import argparse_many
2016-07-25 18:15:34 +00:00
from tools . utils import get_path_depth
2016-06-09 20:34:53 +00:00
import tools . host_tests . host_tests_plugins as host_tests_plugins
2014-10-23 15:30:17 +00:00
2015-02-10 21:18:17 +00:00
try :
import mbed_lstools
2016-06-09 20:34:53 +00:00
from tools . compliance . ioper_runner import get_available_oper_test_scopes
2015-02-10 21:18:17 +00:00
except :
pass
2014-10-23 15:30:17 +00:00
2014-08-04 13:29:20 +00:00
class ProcessObserver ( Thread ) :
def __init__ ( self , proc ) :
Thread . __init__ ( self )
self . proc = proc
self . queue = Queue ( )
self . daemon = True
self . active = True
self . start ( )
def run ( self ) :
while self . active :
c = self . proc . stdout . read ( 1 )
self . queue . put ( c )
def stop ( self ) :
self . active = False
try :
self . proc . terminate ( )
except Exception , _ :
pass
class SingleTestExecutor ( threading . Thread ) :
2014-08-12 12:24:04 +00:00
""" Example: Single test class in separate thread usage
"""
2014-08-04 13:29:20 +00:00
def __init__ ( self , single_test ) :
self . single_test = single_test
threading . Thread . __init__ ( self )
def run ( self ) :
start = time ( )
# Execute tests depending on options and filter applied
2014-10-14 13:47:05 +00:00
test_summary , shuffle_seed , test_summary_ext , test_suite_properties_ext = self . single_test . execute ( )
2014-08-04 13:29:20 +00:00
elapsed_time = time ( ) - start
2014-09-25 16:21:03 +00:00
2014-08-04 13:29:20 +00:00
# Human readable summary
if not self . single_test . opts_suppress_summary :
# prints well-formed summary with results (SQL table like)
print self . single_test . generate_test_summary ( test_summary , shuffle_seed )
if self . single_test . opts_test_x_toolchain_summary :
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self . single_test . generate_test_summary_by_target ( test_summary , shuffle_seed )
2014-08-12 09:20:41 +00:00
print " Completed in %.2f sec " % ( elapsed_time )
2014-08-04 13:29:20 +00:00
class SingleTestRunner ( object ) :
2014-08-12 12:24:04 +00:00
""" Object wrapper for single test run which may involve multiple MUTs
"""
2014-08-04 13:29:20 +00:00
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = " OK "
TEST_RESULT_FAIL = " FAIL "
TEST_RESULT_ERROR = " ERROR "
TEST_RESULT_UNDEF = " UNDEF "
TEST_RESULT_IOERR_COPY = " IOERR_COPY "
TEST_RESULT_IOERR_DISK = " IOERR_DISK "
TEST_RESULT_IOERR_SERIAL = " IOERR_SERIAL "
TEST_RESULT_TIMEOUT = " TIMEOUT "
TEST_RESULT_NO_IMAGE = " NO_IMAGE "
2015-02-24 13:46:45 +00:00
TEST_RESULT_MBED_ASSERT = " MBED_ASSERT "
2015-04-30 18:39:30 +00:00
TEST_RESULT_BUILD_FAILED = " BUILD_FAILED "
2016-02-25 22:29:26 +00:00
TEST_RESULT_NOT_SUPPORTED = " NOT_SUPPORTED "
2014-08-04 13:29:20 +00:00
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [ ] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = { } # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = { } # MUTs descriptor (from external file)
test_spec = { } # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = { " success " : TEST_RESULT_OK ,
" failure " : TEST_RESULT_FAIL ,
" error " : TEST_RESULT_ERROR ,
" ioerr_copy " : TEST_RESULT_IOERR_COPY ,
" ioerr_disk " : TEST_RESULT_IOERR_DISK ,
" ioerr_serial " : TEST_RESULT_IOERR_SERIAL ,
" timeout " : TEST_RESULT_TIMEOUT ,
" no_image " : TEST_RESULT_NO_IMAGE ,
2015-02-24 13:46:45 +00:00
" end " : TEST_RESULT_UNDEF ,
2015-04-30 18:39:30 +00:00
" mbed_assert " : TEST_RESULT_MBED_ASSERT ,
2016-02-25 22:29:26 +00:00
" build_failed " : TEST_RESULT_BUILD_FAILED ,
" not_supproted " : TEST_RESULT_NOT_SUPPORTED
2014-10-20 09:51:02 +00:00
}
2014-08-04 13:29:20 +00:00
def __init__ ( self ,
_global_loops_count = 1 ,
2014-08-05 08:32:44 +00:00
_test_loops_list = None ,
2014-08-04 13:29:20 +00:00
_muts = { } ,
2014-09-18 10:17:57 +00:00
_clean = False ,
2016-09-30 21:54:34 +00:00
_parser = None ,
_opts = None ,
2014-08-18 14:33:24 +00:00
_opts_db_url = None ,
2014-08-12 15:15:33 +00:00
_opts_log_file_name = None ,
2014-09-25 16:21:03 +00:00
_opts_report_html_file_name = None ,
2014-10-09 10:51:37 +00:00
_opts_report_junit_file_name = None ,
2015-04-14 18:45:56 +00:00
_opts_report_build_file_name = None ,
2016-06-21 21:20:19 +00:00
_opts_report_text_file_name = None ,
2016-02-19 21:59:54 +00:00
_opts_build_report = { } ,
_opts_build_properties = { } ,
2014-08-04 13:29:20 +00:00
_test_spec = { } ,
_opts_goanna_for_mbed_sdk = None ,
_opts_goanna_for_tests = None ,
_opts_shuffle_test_order = False ,
_opts_shuffle_test_seed = None ,
_opts_test_by_names = None ,
2015-03-04 09:48:39 +00:00
_opts_peripheral_by_names = None ,
2014-08-04 13:29:20 +00:00
_opts_test_only_peripheral = False ,
_opts_test_only_common = False ,
_opts_verbose_skipped_tests = False ,
_opts_verbose_test_result_only = False ,
_opts_verbose = False ,
_opts_firmware_global_name = None ,
_opts_only_build_tests = False ,
2015-03-12 09:55:05 +00:00
_opts_parallel_test_exec = False ,
2014-08-04 13:29:20 +00:00
_opts_suppress_summary = False ,
_opts_test_x_toolchain_summary = False ,
2014-08-07 10:48:21 +00:00
_opts_copy_method = None ,
2014-08-07 12:22:51 +00:00
_opts_mut_reset_type = None ,
2014-08-12 09:20:41 +00:00
_opts_jobs = None ,
2014-09-02 10:43:08 +00:00
_opts_waterfall_test = None ,
2015-06-08 19:51:13 +00:00
_opts_consolidate_waterfall_test = None ,
2015-09-23 15:54:41 +00:00
_opts_extend_test_timeout = None ,
2016-02-19 21:59:54 +00:00
_opts_auto_detect = None ,
_opts_include_non_automated = False ) :
2014-08-12 12:24:04 +00:00
""" Let ' s try hard to init this object
"""
2015-02-10 23:38:01 +00:00
from colorama import init
init ( )
2014-08-04 13:29:20 +00:00
PATTERN = " \\ { ( " + " | " . join ( self . TEST_RESULT_MAPPING . keys ( ) ) + " ) \\ } "
self . RE_DETECT_TESTCASE_RESULT = re . compile ( PATTERN )
# Settings related to test loops counters
try :
_global_loops_count = int ( _global_loops_count )
except :
_global_loops_count = 1
if _global_loops_count < 1 :
_global_loops_count = 1
self . GLOBAL_LOOPS_COUNT = _global_loops_count
self . TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else [ ]
self . TEST_LOOPS_DICT = self . test_loop_list_to_dict ( _test_loops_list )
self . shuffle_random_seed = 0.0
self . SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self . muts = _muts
self . test_spec = _test_spec
# Settings passed e.g. from command line
2014-08-18 14:33:24 +00:00
self . opts_db_url = _opts_db_url
2014-08-12 15:15:33 +00:00
self . opts_log_file_name = _opts_log_file_name
2014-09-25 16:21:03 +00:00
self . opts_report_html_file_name = _opts_report_html_file_name
2014-10-09 10:51:37 +00:00
self . opts_report_junit_file_name = _opts_report_junit_file_name
2015-04-14 18:45:56 +00:00
self . opts_report_build_file_name = _opts_report_build_file_name
2016-06-21 21:20:19 +00:00
self . opts_report_text_file_name = _opts_report_text_file_name
2014-08-04 13:29:20 +00:00
self . opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self . opts_goanna_for_tests = _opts_goanna_for_tests
self . opts_shuffle_test_order = _opts_shuffle_test_order
self . opts_shuffle_test_seed = _opts_shuffle_test_seed
self . opts_test_by_names = _opts_test_by_names
2015-03-04 09:48:39 +00:00
self . opts_peripheral_by_names = _opts_peripheral_by_names
2014-08-04 13:29:20 +00:00
self . opts_test_only_peripheral = _opts_test_only_peripheral
self . opts_test_only_common = _opts_test_only_common
self . opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self . opts_verbose_test_result_only = _opts_verbose_test_result_only
self . opts_verbose = _opts_verbose
self . opts_firmware_global_name = _opts_firmware_global_name
self . opts_only_build_tests = _opts_only_build_tests
2015-03-12 09:55:05 +00:00
self . opts_parallel_test_exec = _opts_parallel_test_exec
2014-08-04 13:29:20 +00:00
self . opts_suppress_summary = _opts_suppress_summary
self . opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self . opts_copy_method = _opts_copy_method
2014-08-07 10:48:21 +00:00
self . opts_mut_reset_type = _opts_mut_reset_type
2014-08-12 09:20:41 +00:00
self . opts_jobs = _opts_jobs if _opts_jobs is not None else 1
2014-09-02 10:43:08 +00:00
self . opts_waterfall_test = _opts_waterfall_test
2015-06-08 19:51:13 +00:00
self . opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
2014-08-12 09:20:41 +00:00
self . opts_extend_test_timeout = _opts_extend_test_timeout
2014-09-18 10:17:57 +00:00
self . opts_clean = _clean
2016-09-30 21:54:34 +00:00
self . opts_parser = _parser
self . opts = _opts
2015-09-23 15:54:41 +00:00
self . opts_auto_detect = _opts_auto_detect
2016-02-19 21:59:54 +00:00
self . opts_include_non_automated = _opts_include_non_automated
self . build_report = _opts_build_report
self . build_properties = _opts_build_properties
2014-08-04 13:29:20 +00:00
2014-08-19 10:04:09 +00:00
# File / screen logger initialization
2014-08-12 15:15:33 +00:00
self . logger = CLITestLogger ( file_name = self . opts_log_file_name ) # Default test logger
2014-08-19 10:04:09 +00:00
2014-09-25 10:03:37 +00:00
# Database related initializations
2014-08-18 14:33:24 +00:00
self . db_logger = factory_db_logger ( self . opts_db_url )
2014-08-19 10:04:09 +00:00
self . db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self . db_logger :
self . db_logger . connect_url ( self . opts_db_url ) # Save db access info inside db_logger object
if self . db_logger . is_connected ( ) :
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
( _hostname , _uname ) = self . db_logger . get_hostname ( )
2014-08-19 13:40:01 +00:00
_host_location = os . path . dirname ( os . path . abspath ( __file__ ) )
2014-08-19 10:38:55 +00:00
build_id_type = None if self . opts_only_build_tests is None else self . db_logger . BUILD_ID_TYPE_BUILD_ONLY
2014-08-19 13:40:01 +00:00
self . db_logger_build_id = self . db_logger . get_next_build_id ( _hostname , desc = _uname , location = _host_location , type = build_id_type )
2014-08-19 10:04:09 +00:00
self . db_logger . disconnect ( )
2014-08-12 14:12:57 +00:00
2014-08-19 10:38:55 +00:00
def dump_options ( self ) :
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example :
data = self . dump_options ( )
or
data_str = json . dumps ( self . dump_options ( ) )
"""
result = { " db_url " : str ( self . opts_db_url ) ,
" log_file_name " : str ( self . opts_log_file_name ) ,
" shuffle_test_order " : str ( self . opts_shuffle_test_order ) ,
" shuffle_test_seed " : str ( self . opts_shuffle_test_seed ) ,
" test_by_names " : str ( self . opts_test_by_names ) ,
2015-03-12 09:55:05 +00:00
" peripheral_by_names " : str ( self . opts_peripheral_by_names ) ,
2014-08-19 10:38:55 +00:00
" test_only_peripheral " : str ( self . opts_test_only_peripheral ) ,
" test_only_common " : str ( self . opts_test_only_common ) ,
" verbose " : str ( self . opts_verbose ) ,
" firmware_global_name " : str ( self . opts_firmware_global_name ) ,
" only_build_tests " : str ( self . opts_only_build_tests ) ,
" copy_method " : str ( self . opts_copy_method ) ,
" mut_reset_type " : str ( self . opts_mut_reset_type ) ,
" jobs " : str ( self . opts_jobs ) ,
" extend_test_timeout " : str ( self . opts_extend_test_timeout ) ,
2014-10-20 09:51:02 +00:00
" _dummy " : ' '
}
2014-08-19 10:38:55 +00:00
return result
2014-08-04 13:29:20 +00:00
def shuffle_random_func ( self ) :
return self . shuffle_random_seed
def is_shuffle_seed_float ( self ) :
2014-08-12 12:24:04 +00:00
""" return true if function parameter can be converted to float
"""
2014-08-04 13:29:20 +00:00
result = True
try :
float ( self . shuffle_random_seed )
except ValueError :
result = False
return result
2015-03-12 09:55:05 +00:00
# This will store target / toolchain specific properties
test_suite_properties_ext = { } # target : toolchain
# Here we store test results
test_summary = [ ]
# Here we store test results in extended data structure
test_summary_ext = { }
execute_thread_slice_lock = Lock ( )
2015-11-05 20:42:45 +00:00
def execute_thread_slice ( self , q , target , toolchains , clean , test_ids , build_report , build_properties ) :
2015-03-12 09:55:05 +00:00
for toolchain in toolchains :
2015-04-30 18:39:30 +00:00
tt_id = " %s :: %s " % ( toolchain , target )
2015-11-24 23:39:20 +00:00
T = TARGET_MAP [ target ]
2015-03-12 09:55:05 +00:00
# print target, toolchain
# Test suite properties returned to external tools like CI
2015-04-30 18:39:30 +00:00
test_suite_properties = {
' jobs ' : self . opts_jobs ,
' clean ' : clean ,
' target ' : target ,
2015-11-24 23:39:20 +00:00
' vendor ' : T . extra_labels [ 0 ] ,
2015-04-30 18:39:30 +00:00
' test_ids ' : ' , ' . join ( test_ids ) ,
' toolchain ' : toolchain ,
' shuffle_random_seed ' : self . shuffle_random_seed
}
2015-03-12 09:55:05 +00:00
2015-04-13 22:07:21 +00:00
2015-03-12 09:55:05 +00:00
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP :
print self . logger . log_line ( self . logger . LogType . NOTIF , ' Skipped tests for %s target. Target platform not found ' % ( target ) )
continue
2014-10-14 13:47:05 +00:00
2015-03-12 09:55:05 +00:00
clean_mbed_libs_options = True if self . opts_goanna_for_mbed_sdk or clean or self . opts_clean else None
2014-09-25 16:21:03 +00:00
2016-09-30 21:54:34 +00:00
profile = extract_profile ( self . opts_parser , self . opts , toolchain )
2017-08-18 08:53:35 +00:00
stats_depth = self . opts . stats_depth or 2
2016-09-30 21:54:34 +00:00
2015-04-30 18:39:30 +00:00
2015-03-12 09:55:05 +00:00
try :
build_mbed_libs_result = build_mbed_libs ( T ,
toolchain ,
clean = clean_mbed_libs_options ,
2015-08-07 13:36:20 +00:00
verbose = self . opts_verbose ,
2015-11-05 20:42:45 +00:00
jobs = self . opts_jobs ,
report = build_report ,
2016-09-30 21:54:34 +00:00
properties = build_properties ,
build_profile = profile )
2015-03-12 09:55:05 +00:00
if not build_mbed_libs_result :
print self . logger . log_line ( self . logger . LogType . NOTIF , ' Skipped tests for %s target. Toolchain %s is not yet supported for this target ' % ( T . name , toolchain ) )
2014-08-04 13:29:20 +00:00
continue
2015-11-05 20:42:45 +00:00
2015-03-12 09:55:05 +00:00
except ToolException :
2015-04-30 18:39:30 +00:00
print self . logger . log_line ( self . logger . LogType . ERROR , ' There were errors while building MBED libs for %s using %s ' % ( target , toolchain ) )
2015-04-13 22:07:21 +00:00
continue
2015-03-12 09:55:05 +00:00
build_dir = join ( BUILD_DIR , " test " , target , toolchain )
test_suite_properties [ ' build_mbed_libs_result ' ] = build_mbed_libs_result
test_suite_properties [ ' build_dir ' ] = build_dir
test_suite_properties [ ' skipped ' ] = [ ]
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted ( TEST_MAP . keys ( ) )
2015-04-13 22:07:21 +00:00
2015-03-12 09:55:05 +00:00
if self . opts_shuffle_test_order :
random . shuffle ( test_map_keys , self . shuffle_random_func )
# Update database with shuffle seed f applicable
2014-08-19 10:04:09 +00:00
if self . db_logger :
self . db_logger . reconnect ( ) ;
if self . db_logger . is_connected ( ) :
2015-03-12 09:55:05 +00:00
self . db_logger . update_build_id_info ( self . db_logger_build_id , _shuffle_seed = self . shuffle_random_func ( ) )
2014-08-19 10:04:09 +00:00
self . db_logger . disconnect ( ) ;
2014-08-04 13:29:20 +00:00
2015-03-12 09:55:05 +00:00
if self . db_logger :
self . db_logger . reconnect ( ) ;
if self . db_logger . is_connected ( ) :
# Update MUTs and Test Specification in database
self . db_logger . update_build_id_info ( self . db_logger_build_id , _muts = self . muts , _test_spec = self . test_spec )
# Update Extra information in database (some options passed to test suite)
self . db_logger . update_build_id_info ( self . db_logger_build_id , _extra = json . dumps ( self . dump_options ( ) ) )
self . db_logger . disconnect ( ) ;
2016-02-19 21:59:54 +00:00
valid_test_map_keys = self . get_valid_tests ( test_map_keys , target , toolchain , test_ids , self . opts_include_non_automated )
2015-04-13 22:07:21 +00:00
skipped_test_map_keys = self . get_skipped_tests ( test_map_keys , valid_test_map_keys )
for skipped_test_id in skipped_test_map_keys :
test_suite_properties [ ' skipped ' ] . append ( skipped_test_id )
# First pass through all tests and determine which libraries need to be built
2015-11-13 17:48:44 +00:00
libraries = [ ]
2015-04-13 22:07:21 +00:00
for test_id in valid_test_map_keys :
2015-03-12 09:55:05 +00:00
test = TEST_MAP [ test_id ]
2015-03-04 09:48:39 +00:00
2015-04-13 22:07:21 +00:00
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES :
2015-11-16 19:24:46 +00:00
if lib [ ' build_dir ' ] in test . dependencies and lib [ ' id ' ] not in libraries :
2015-11-13 17:48:44 +00:00
libraries . append ( lib [ ' id ' ] )
2015-04-13 22:07:21 +00:00
clean_project_options = True if self . opts_goanna_for_tests or clean or self . opts_clean else None
# Build all required libraries
for lib_id in libraries :
try :
build_lib ( lib_id ,
T ,
toolchain ,
verbose = self . opts_verbose ,
clean = clean_mbed_libs_options ,
2015-11-05 20:42:45 +00:00
jobs = self . opts_jobs ,
report = build_report ,
2016-09-30 21:54:34 +00:00
properties = build_properties ,
build_profile = profile )
2015-04-01 18:15:15 +00:00
2015-04-13 22:07:21 +00:00
except ToolException :
print self . logger . log_line ( self . logger . LogType . ERROR , ' There were errors while building library %s ' % ( lib_id ) )
2015-03-12 09:55:05 +00:00
continue
2014-08-04 13:29:20 +00:00
2015-04-01 18:15:15 +00:00
2015-04-13 22:07:21 +00:00
for test_id in valid_test_map_keys :
test = TEST_MAP [ test_id ]
test_suite_properties [ ' test.libs. %s . %s . %s ' % ( target , toolchain , test_id ) ] = ' , ' . join ( libraries )
# TODO: move this 2 below loops to separate function
INC_DIRS = [ ]
for lib_id in libraries :
if ' inc_dirs_ext ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] :
INC_DIRS . extend ( LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] )
MACROS = [ ]
for lib_id in libraries :
if ' macros ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' macros ' ] :
MACROS . extend ( LIBRARY_MAP [ lib_id ] [ ' macros ' ] )
MACROS . append ( ' TEST_SUITE_TARGET_NAME= " %s " ' % target )
MACROS . append ( ' TEST_SUITE_TEST_ID= " %s " ' % test_id )
test_uuid = uuid . uuid4 ( )
MACROS . append ( ' TEST_SUITE_UUID= " %s " ' % str ( test_uuid ) )
2015-04-30 18:39:30 +00:00
# Prepare extended test results data structure (it can be used to generate detailed test report)
2015-10-29 20:48:25 +00:00
if target not in self . test_summary_ext :
self . test_summary_ext [ target ] = { } # test_summary_ext : toolchain
if toolchain not in self . test_summary_ext [ target ] :
self . test_summary_ext [ target ] [ toolchain ] = { } # test_summary_ext : toolchain : target
2015-04-30 18:39:30 +00:00
2015-05-02 22:08:00 +00:00
tt_test_id = " %s :: %s :: %s " % ( toolchain , target , test_id ) # For logging only
2015-04-13 22:07:21 +00:00
project_name = self . opts_firmware_global_name if self . opts_firmware_global_name else None
try :
2017-08-18 08:53:35 +00:00
path = build_project ( test . source_dir , join ( build_dir , test_id ) , T ,
toolchain , test . dependencies , clean = clean_project_options ,
verbose = self . opts_verbose , name = project_name , macros = MACROS ,
inc_dirs = INC_DIRS , jobs = self . opts_jobs , report = build_report ,
properties = build_properties , project_id = test_id ,
project_description = test . get_description ( ) ,
build_profile = profile , stats_depth = stats_depth )
2015-04-01 18:15:15 +00:00
2016-02-25 22:29:26 +00:00
except Exception , e :
2015-04-13 22:07:21 +00:00
project_name_str = project_name if project_name is not None else test_id
2016-02-25 22:29:26 +00:00
test_result = self . TEST_RESULT_FAIL
if isinstance ( e , ToolException ) :
print self . logger . log_line ( self . logger . LogType . ERROR , ' There were errors while building project %s ' % ( project_name_str ) )
test_result = self . TEST_RESULT_BUILD_FAILED
elif isinstance ( e , NotSupportedException ) :
print self . logger . log_line ( self . logger . LogType . INFO , ' The project %s is not supported ' % ( project_name_str ) )
test_result = self . TEST_RESULT_NOT_SUPPORTED
2015-04-30 18:39:30 +00:00
# Append test results to global test summary
self . test_summary . append (
2016-02-25 22:29:26 +00:00
( test_result , target , toolchain , test_id , test . get_description ( ) , 0 , 0 , ' - ' )
2015-04-30 18:39:30 +00:00
)
# Add detailed test result to test summary structure
2015-10-29 20:48:25 +00:00
if test_id not in self . test_summary_ext [ target ] [ toolchain ] :
self . test_summary_ext [ target ] [ toolchain ] [ test_id ] = [ ]
2015-06-08 17:45:04 +00:00
2015-10-29 20:48:25 +00:00
self . test_summary_ext [ target ] [ toolchain ] [ test_id ] . append ( { 0 : {
2016-02-25 22:29:26 +00:00
' result ' : test_result ,
2015-10-29 20:48:25 +00:00
' output ' : ' ' ,
2015-06-08 17:45:04 +00:00
' target_name ' : target ,
' target_name_unique ' : target ,
' toolchain_name ' : toolchain ,
2015-10-29 20:48:25 +00:00
' id ' : test_id ,
2016-02-25 22:29:26 +00:00
' description ' : test . get_description ( ) ,
2015-06-08 17:45:04 +00:00
' elapsed_time ' : 0 ,
' duration ' : 0 ,
' copy_method ' : None
} } )
2015-03-12 09:55:05 +00:00
continue
2015-03-04 09:48:39 +00:00
2015-04-13 22:07:21 +00:00
if self . opts_only_build_tests :
# With this option we are skipping testing phase
continue
2015-03-12 09:55:05 +00:00
2015-04-13 22:07:21 +00:00
# Test duration can be increased by global value
test_duration = test . duration
if self . opts_extend_test_timeout is not None :
test_duration + = self . opts_extend_test_timeout
2015-03-12 09:55:05 +00:00
2015-04-13 22:07:21 +00:00
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self . shape_test_request ( target , path , test_id , test_duration )
test_loops = self . get_test_loop_count ( test_id )
2015-03-12 09:55:05 +00:00
2015-04-13 22:07:21 +00:00
test_suite_properties [ ' test.duration. %s . %s . %s ' % ( target , toolchain , test_id ) ] = test_duration
test_suite_properties [ ' test.loops. %s . %s . %s ' % ( target , toolchain , test_id ) ] = test_loops
test_suite_properties [ ' test.path. %s . %s . %s ' % ( target , toolchain , test_id ) ] = path
2015-03-12 09:55:05 +00:00
2015-04-13 22:07:21 +00:00
# read MUTs, test specification and perform tests
2015-05-03 01:32:04 +00:00
handle_results = self . handle ( test_spec , target , toolchain , test_loops = test_loops )
if handle_results is None :
2015-04-30 18:39:30 +00:00
continue
2015-03-12 09:55:05 +00:00
2015-05-03 01:32:04 +00:00
for handle_result in handle_results :
if handle_result :
single_test_result , detailed_test_results = handle_result
else :
continue
# Append test results to global test summary
if single_test_result is not None :
self . test_summary . append ( single_test_result )
2015-03-12 09:55:05 +00:00
2015-05-03 01:32:04 +00:00
# Add detailed test result to test summary structure
2015-10-29 20:48:25 +00:00
if target not in self . test_summary_ext [ target ] [ toolchain ] :
if test_id not in self . test_summary_ext [ target ] [ toolchain ] :
self . test_summary_ext [ target ] [ toolchain ] [ test_id ] = [ ]
2015-06-08 19:51:13 +00:00
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self . opts_waterfall_test and self . opts_consolidate_waterfall_test :
append_test_result = { 0 : detailed_test_results [ len ( detailed_test_results ) - 1 ] }
2015-10-29 20:48:25 +00:00
self . test_summary_ext [ target ] [ toolchain ] [ test_id ] . append ( append_test_result )
2015-03-12 09:55:05 +00:00
test_suite_properties [ ' skipped ' ] = ' , ' . join ( test_suite_properties [ ' skipped ' ] )
self . test_suite_properties_ext [ target ] [ toolchain ] = test_suite_properties
2015-04-13 22:07:21 +00:00
2015-03-12 09:55:05 +00:00
q . put ( target + ' _ ' . join ( toolchains ) )
return
def execute ( self ) :
clean = self . test_spec . get ( ' clean ' , False )
test_ids = self . test_spec . get ( ' test_ids ' , [ ] )
q = Queue ( )
# Generate seed for shuffle if seed is not provided in
self . shuffle_random_seed = round ( random . random ( ) , self . SHUFFLE_SEED_ROUND )
if self . opts_shuffle_test_seed is not None and self . is_shuffle_seed_float ( ) :
self . shuffle_random_seed = round ( float ( self . opts_shuffle_test_seed ) , self . SHUFFLE_SEED_ROUND )
2015-04-13 22:07:21 +00:00
2015-03-12 09:55:05 +00:00
if self . opts_parallel_test_exec :
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [ ] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target , toolchains in self . test_spec [ ' targets ' ] . iteritems ( ) :
self . test_suite_properties_ext [ target ] = { }
2016-02-19 21:59:54 +00:00
t = threading . Thread ( target = self . execute_thread_slice , args = ( q , target , toolchains , clean , test_ids , self . build_report , self . build_properties ) )
2015-03-12 09:55:05 +00:00
t . daemon = True
t . start ( )
execute_threads . append ( t )
for t in execute_threads :
q . get ( ) # t.join() would block some threads because we should not wait in any order for thread end
else :
# Serialized (not parallel) test execution
for target , toolchains in self . test_spec [ ' targets ' ] . iteritems ( ) :
if target not in self . test_suite_properties_ext :
self . test_suite_properties_ext [ target ] = { }
2015-04-13 22:07:21 +00:00
2016-02-19 21:59:54 +00:00
self . execute_thread_slice ( q , target , toolchains , clean , test_ids , self . build_report , self . build_properties )
2015-03-12 09:55:05 +00:00
q . get ( )
2014-09-25 16:21:03 +00:00
2014-08-19 10:04:09 +00:00
if self . db_logger :
self . db_logger . reconnect ( ) ;
if self . db_logger . is_connected ( ) :
self . db_logger . update_build_id_info ( self . db_logger_build_id , _status_fk = self . db_logger . BUILD_ID_STATUS_COMPLETED )
self . db_logger . disconnect ( ) ;
2016-02-19 21:59:54 +00:00
return self . test_summary , self . shuffle_random_seed , self . test_summary_ext , self . test_suite_properties_ext , self . build_report , self . build_properties
2015-04-13 22:07:21 +00:00
2016-02-19 21:59:54 +00:00
def get_valid_tests ( self , test_map_keys , target , toolchain , test_ids , include_non_automated ) :
2015-04-13 22:07:21 +00:00
valid_test_map_keys = [ ]
for test_id in test_map_keys :
test = TEST_MAP [ test_id ]
2016-06-24 22:15:01 +00:00
if self . opts_test_by_names and test_id not in self . opts_test_by_names :
2015-04-13 22:07:21 +00:00
continue
if test_ids and test_id not in test_ids :
continue
if self . opts_test_only_peripheral and not test . peripherals :
if self . opts_verbose_skipped_tests :
print self . logger . log_line ( self . logger . LogType . INFO , ' Common test skipped for target %s ' % ( target ) )
continue
2016-06-24 22:15:01 +00:00
if self . opts_peripheral_by_names and test . peripherals and not len ( [ i for i in test . peripherals if i in self . opts_peripheral_by_names ] ) :
2015-04-13 22:07:21 +00:00
# We will skip tests not forced with -p option
if self . opts_verbose_skipped_tests :
print self . logger . log_line ( self . logger . LogType . INFO , ' Common test skipped for target %s ' % ( target ) )
continue
if self . opts_test_only_common and test . peripherals :
if self . opts_verbose_skipped_tests :
print self . logger . log_line ( self . logger . LogType . INFO , ' Peripheral test skipped for target %s ' % ( target ) )
continue
2016-02-19 21:59:54 +00:00
if not include_non_automated and not test . automated :
if self . opts_verbose_skipped_tests :
print self . logger . log_line ( self . logger . LogType . INFO , ' Non automated test skipped for target %s ' % ( target ) )
continue
if test . is_supported ( target , toolchain ) :
2015-04-13 22:07:21 +00:00
if test . peripherals is None and self . opts_only_build_tests :
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
2016-06-24 22:15:01 +00:00
elif self . opts_peripheral_by_names and test_id not in self . opts_peripheral_by_names :
2015-04-13 22:07:21 +00:00
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self . is_peripherals_available ( target , test . peripherals ) :
if self . opts_verbose_skipped_tests :
if test . peripherals :
print self . logger . log_line ( self . logger . LogType . INFO , ' Peripheral %s test skipped for target %s ' % ( " , " . join ( test . peripherals ) , target ) )
else :
print self . logger . log_line ( self . logger . LogType . INFO , ' Test %s skipped for target %s ' % ( test_id , target ) )
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys . append ( test_id )
return valid_test_map_keys
def get_skipped_tests ( self , all_test_map_keys , valid_test_map_keys ) :
# NOTE: This will not preserve order
return list ( set ( all_test_map_keys ) - set ( valid_test_map_keys ) )
2014-08-04 13:29:20 +00:00
def generate_test_summary_by_target ( self , test_summary , shuffle_seed = None ) :
""" Prints well-formed summary with results (SQL table like)
2014-08-12 12:19:00 +00:00
table shows text x toolchain test result matrix
"""
2014-08-04 13:29:20 +00:00
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary ( test_summary , TARGET_INDEX )
unique_tests = get_unique_value_from_summary ( test_summary , TEST_INDEX )
unique_test_desc = get_unique_value_from_summary_ext ( test_summary , TEST_INDEX , DESC_INDEX )
unique_toolchains = get_unique_value_from_summary ( test_summary , TOOLCHAIN_INDEX )
result = " Test summary: \n "
for target in unique_targets :
2014-08-12 12:47:08 +00:00
result_dict = { } # test : { toolchain : result }
unique_target_toolchains = [ ]
2014-08-04 13:29:20 +00:00
for test in test_summary :
2014-08-12 12:47:08 +00:00
if test [ TARGET_INDEX ] == target :
if test [ TOOLCHAIN_INDEX ] not in unique_target_toolchains :
unique_target_toolchains . append ( test [ TOOLCHAIN_INDEX ] )
if test [ TEST_INDEX ] not in result_dict :
result_dict [ test [ TEST_INDEX ] ] = { }
result_dict [ test [ TEST_INDEX ] ] [ test [ TOOLCHAIN_INDEX ] ] = test [ RESULT_INDEX ]
pt_cols = [ " Target " , " Test ID " , " Test Description " ] + unique_target_toolchains
2014-08-04 13:29:20 +00:00
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
pt . padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests :
2014-08-14 10:30:50 +00:00
if test in result_dict :
test_results = result_dict [ test ]
if test in unique_test_desc :
row = [ target , test , unique_test_desc [ test ] ]
for toolchain in unique_toolchains :
if toolchain in test_results :
row . append ( test_results [ toolchain ] )
pt . add_row ( row )
2014-08-04 13:29:20 +00:00
result + = pt . get_string ( )
shuffle_seed_text = " Shuffle Seed: %.*f " % ( self . SHUFFLE_SEED_ROUND ,
shuffle_seed if shuffle_seed else self . shuffle_random_seed )
result + = " \n %s " % ( shuffle_seed_text if self . opts_shuffle_test_order else ' ' )
return result
def generate_test_summary ( self , test_summary , shuffle_seed = None ) :
""" Prints well-formed summary with results (SQL table like)
2014-08-12 12:24:04 +00:00
table shows target x test results matrix across
"""
2015-04-30 18:39:30 +00:00
success_code = 0 # Success code that can be leter returned to
2014-08-04 13:29:20 +00:00
result = " Test summary: \n "
# Pretty table package is used to print results
pt = PrettyTable ( [ " Result " , " Target " , " Toolchain " , " Test ID " , " Test Description " ,
" Elapsed Time (sec) " , " Timeout (sec) " , " Loops " ] )
pt . align [ " Result " ] = " l " # Left align
pt . align [ " Target " ] = " l " # Left align
pt . align [ " Toolchain " ] = " l " # Left align
pt . align [ " Test ID " ] = " l " # Left align
pt . align [ " Test Description " ] = " l " # Left align
pt . padding_width = 1 # One space between column edges and contents (default)
result_dict = { self . TEST_RESULT_OK : 0 ,
self . TEST_RESULT_FAIL : 0 ,
self . TEST_RESULT_ERROR : 0 ,
self . TEST_RESULT_UNDEF : 0 ,
self . TEST_RESULT_IOERR_COPY : 0 ,
self . TEST_RESULT_IOERR_DISK : 0 ,
self . TEST_RESULT_IOERR_SERIAL : 0 ,
self . TEST_RESULT_NO_IMAGE : 0 ,
2015-02-24 13:46:45 +00:00
self . TEST_RESULT_TIMEOUT : 0 ,
2015-04-30 18:39:30 +00:00
self . TEST_RESULT_MBED_ASSERT : 0 ,
2016-02-25 22:29:26 +00:00
self . TEST_RESULT_BUILD_FAILED : 0 ,
self . TEST_RESULT_NOT_SUPPORTED : 0
2014-10-20 09:51:02 +00:00
}
2014-08-04 13:29:20 +00:00
for test in test_summary :
if test [ 0 ] in result_dict :
result_dict [ test [ 0 ] ] + = 1
pt . add_row ( test )
result + = pt . get_string ( )
result + = " \n "
# Print result count
result + = " Result: " + ' / ' . join ( [ ' %s %s ' % ( value , key ) for ( key , value ) in { k : v for k , v in result_dict . items ( ) if v != 0 } . iteritems ( ) ] )
shuffle_seed_text = " Shuffle Seed: %.*f \n " % ( self . SHUFFLE_SEED_ROUND ,
shuffle_seed if shuffle_seed else self . shuffle_random_seed )
result + = " \n %s " % ( shuffle_seed_text if self . opts_shuffle_test_order else ' ' )
return result
def test_loop_list_to_dict ( self , test_loops_str ) :
2014-08-12 12:24:04 +00:00
""" Transforms test_id=X,test_id=X,test_id=X into dictionary { test_id : test_id_loops_count}
"""
2014-08-04 13:29:20 +00:00
result = { }
if test_loops_str :
2016-06-24 22:15:01 +00:00
test_loops = test_loops_str
2014-08-04 13:29:20 +00:00
for test_loop in test_loops :
test_loop_count = test_loop . split ( ' = ' )
if len ( test_loop_count ) == 2 :
_test_id , _test_loops = test_loop_count
try :
_test_loops = int ( _test_loops )
except :
continue
result [ _test_id ] = _test_loops
return result
def get_test_loop_count ( self , test_id ) :
""" This function returns no. of loops per test (deducted by test_id_.
2014-08-12 12:24:04 +00:00
If test is not in list of redefined loop counts it will use default value .
"""
2014-08-04 13:29:20 +00:00
result = self . GLOBAL_LOOPS_COUNT
if test_id in self . TEST_LOOPS_DICT :
result = self . TEST_LOOPS_DICT [ test_id ]
return result
def delete_file ( self , file_path ) :
2014-08-12 12:24:04 +00:00
""" Remove file from the system
"""
2014-08-04 13:29:20 +00:00
result = True
resutl_msg = " "
try :
os . remove ( file_path )
except Exception , e :
resutl_msg = e
result = False
return result , resutl_msg
2015-05-03 01:32:04 +00:00
def handle_mut ( self , mut , data , target_name , toolchain_name , test_loops = 1 ) :
""" Test is being invoked for given MUT.
2014-08-12 12:24:04 +00:00
"""
2014-08-04 13:29:20 +00:00
# Get test information, image and test timeout
test_id = data [ ' test_id ' ]
test = TEST_MAP [ test_id ]
test_description = TEST_MAP [ test_id ] . get_description ( )
image = data [ " image " ]
duration = data . get ( " duration " , 10 )
if mut is None :
print " Error: No Mbed available: MUT[ %s ] " % data [ ' mcu ' ]
return None
2015-09-23 15:54:41 +00:00
mcu = mut [ ' mcu ' ]
2014-09-10 12:39:50 +00:00
copy_method = mut . get ( ' copy_method ' ) # Available board configuration selection e.g. core selection etc.
2014-08-04 13:29:20 +00:00
2014-08-19 10:04:09 +00:00
if self . db_logger :
self . db_logger . reconnect ( )
2014-09-10 12:39:50 +00:00
selected_copy_method = self . opts_copy_method if copy_method is None else copy_method
2014-08-04 13:29:20 +00:00
# Tests can be looped so test results must be stored for the same test
test_all_result = [ ]
2014-09-25 16:21:03 +00:00
# Test results for one test ran few times
detailed_test_results = { } # { Loop_number: { results ... } }
2014-08-04 13:29:20 +00:00
for test_index in range ( test_loops ) :
2015-09-23 15:54:41 +00:00
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail ( ' mbed_lstools ' ) and self . opts_auto_detect :
platform_name_filter = [ mcu ]
muts_list = { }
found = False
for i in range ( 0 , 60 ) :
print ( ' Looking for %s with MBEDLS ' % mcu )
muts_list = get_autodetected_MUTS_list ( platform_name_filter = platform_name_filter )
if 1 not in muts_list :
sleep ( 3 )
else :
found = True
break
if not found :
print " Error: mbed not found with MBEDLS: %s " % data [ ' mcu ' ]
return None
else :
mut = muts_list [ 1 ]
disk = mut . get ( ' disk ' )
port = mut . get ( ' port ' )
if disk is None or port is None :
return None
target_by_mcu = TARGET_MAP [ mut [ ' mcu ' ] ]
target_name_unique = mut [ ' mcu_unique ' ] if ' mcu_unique ' in mut else mut [ ' mcu ' ]
# Some extra stuff can be declared in MUTs structure
reset_type = mut . get ( ' reset_type ' ) # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut . get ( ' reset_tout ' ) # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
2014-08-04 13:29:20 +00:00
# Host test execution
start_host_exec_time = time ( )
2014-10-27 16:38:19 +00:00
single_test_result = self . TEST_RESULT_UNDEF # single test run result
2014-10-15 10:36:47 +00:00
_copy_method = selected_copy_method
2014-10-14 09:59:42 +00:00
if not exists ( image_path ) :
single_test_result = self . TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self . logger . log_line ( self . logger . LogType . ERROR , ' Image file does not exist: %s ' % image_path )
print single_test_output
2014-08-04 13:29:20 +00:00
else :
2014-10-27 16:38:19 +00:00
# Host test execution
start_host_exec_time = time ( )
host_test_verbose = self . opts_verbose_test_result_only or self . opts_verbose
host_test_reset = self . opts_mut_reset_type if reset_type is None else reset_type
2015-02-10 09:15:46 +00:00
host_test_result = self . run_host_test ( test . host_test ,
image_path , disk , port , duration ,
micro = target_name ,
verbose = host_test_verbose ,
reset = host_test_reset ,
reset_tout = reset_tout ,
copy_method = selected_copy_method ,
2016-06-27 21:01:52 +00:00
program_cycle_s = target_by_mcu . program_cycle_s )
2015-02-10 09:15:46 +00:00
single_test_result , single_test_output , single_testduration , single_timeout = host_test_result
2014-08-04 13:29:20 +00:00
# Store test result
test_all_result . append ( single_test_result )
2015-01-28 15:33:11 +00:00
total_elapsed_time = time ( ) - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
2014-09-25 16:21:03 +00:00
detailed_test_results [ test_index ] = {
2015-10-29 20:48:25 +00:00
' result ' : single_test_result ,
' output ' : single_test_output ,
2014-10-09 10:51:37 +00:00
' target_name ' : target_name ,
2015-05-03 01:32:04 +00:00
' target_name_unique ' : target_name_unique ,
2014-10-09 10:51:37 +00:00
' toolchain_name ' : toolchain_name ,
2015-10-29 20:48:25 +00:00
' id ' : test_id ,
' description ' : test_description ,
2014-10-09 10:51:37 +00:00
' elapsed_time ' : round ( elapsed_time , 2 ) ,
2015-02-10 09:15:46 +00:00
' duration ' : single_timeout ,
2014-10-09 10:51:37 +00:00
' copy_method ' : _copy_method ,
2014-09-25 16:21:03 +00:00
}
2015-05-03 01:32:04 +00:00
print self . print_test_result ( single_test_result , target_name_unique , toolchain_name ,
2015-02-10 09:15:46 +00:00
test_id , test_description , elapsed_time , single_timeout )
2014-08-19 10:04:09 +00:00
# Update database entries for ongoing test
if self . db_logger and self . db_logger . is_connected ( ) :
test_type = ' SingleTest '
self . db_logger . insert_test_entry ( self . db_logger_build_id ,
target_name ,
toolchain_name ,
test_type ,
test_id ,
single_test_result ,
2014-08-19 15:24:33 +00:00
single_test_output ,
2014-08-19 10:04:09 +00:00
elapsed_time ,
2015-02-10 09:15:46 +00:00
single_timeout ,
2014-08-19 10:04:09 +00:00
test_index )
2014-09-02 10:43:08 +00:00
# If we perform waterfall test we test until we get OK and we stop testing
if self . opts_waterfall_test and single_test_result == self . TEST_RESULT_OK :
break
2014-08-19 10:04:09 +00:00
if self . db_logger :
self . db_logger . disconnect ( )
2015-09-24 17:57:28 +00:00
return ( self . shape_global_test_loop_result ( test_all_result , self . opts_waterfall_test and self . opts_consolidate_waterfall_test ) ,
2015-05-03 01:32:04 +00:00
target_name_unique ,
2015-01-28 15:33:11 +00:00
toolchain_name ,
test_id ,
test_description ,
round ( elapsed_time , 2 ) ,
2015-02-10 09:15:46 +00:00
single_timeout ,
2015-01-28 15:33:11 +00:00
self . shape_test_loop_ok_result_count ( test_all_result ) ) , detailed_test_results
2014-08-04 13:29:20 +00:00
2015-05-03 01:32:04 +00:00
def handle ( self , test_spec , target_name , toolchain_name , test_loops = 1 ) :
""" Function determines MUT ' s mbed disk/port and copies binary to
target .
"""
handle_results = [ ]
data = json . loads ( test_spec )
# Find a suitable MUT:
mut = None
for id , m in self . muts . iteritems ( ) :
if m [ ' mcu ' ] == data [ ' mcu ' ] :
mut = m
handle_result = self . handle_mut ( mut , data , target_name , toolchain_name , test_loops = test_loops )
handle_results . append ( handle_result )
return handle_results
2014-08-04 13:29:20 +00:00
def print_test_result ( self , test_result , target_name , toolchain_name ,
test_id , test_description , elapsed_time , duration ) :
2014-08-12 12:24:04 +00:00
""" Use specific convention to print test result and related data
"""
2014-08-04 13:29:20 +00:00
tokens = [ ]
tokens . append ( " TargetTest " )
tokens . append ( target_name )
tokens . append ( toolchain_name )
tokens . append ( test_id )
tokens . append ( test_description )
separator = " :: "
time_info = " in %.2f of %d sec " % ( round ( elapsed_time , 2 ) , duration )
result = separator . join ( tokens ) + " [ " + test_result + " ] " + time_info
2015-02-10 23:38:01 +00:00
return Fore . MAGENTA + result + Fore . RESET
2014-08-04 13:29:20 +00:00
def shape_test_loop_ok_result_count ( self , test_all_result ) :
2014-08-12 12:24:04 +00:00
""" Reformats list of results to simple string
"""
2014-08-04 13:29:20 +00:00
test_loop_count = len ( test_all_result )
test_loop_ok_result = test_all_result . count ( self . TEST_RESULT_OK )
return " %d / %d " % ( test_loop_ok_result , test_loop_count )
2015-09-24 17:57:28 +00:00
def shape_global_test_loop_result ( self , test_all_result , waterfall_and_consolidate ) :
2014-08-12 12:24:04 +00:00
""" Reformats list of results to simple string
"""
2014-08-04 13:29:20 +00:00
result = self . TEST_RESULT_FAIL
2015-09-24 17:57:28 +00:00
2015-11-05 17:02:59 +00:00
if all ( test_all_result [ 0 ] == res for res in test_all_result ) :
result = test_all_result [ 0 ]
elif waterfall_and_consolidate and any ( res == self . TEST_RESULT_OK for res in test_all_result ) :
result = self . TEST_RESULT_OK
2014-08-04 13:29:20 +00:00
return result
2014-10-27 15:46:15 +00:00
def run_host_test ( self , name , image_path , disk , port , duration ,
micro = None , reset = None , reset_tout = None ,
2014-10-29 14:28:02 +00:00
verbose = False , copy_method = None , program_cycle_s = None ) :
2014-08-04 13:29:20 +00:00
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
2014-08-12 12:24:04 +00:00
printed by test runner and host test during test execution
"""
2014-10-15 15:05:57 +00:00
def get_char_from_queue ( obs ) :
""" Get character from queue safe way
"""
try :
c = obs . queue . get ( block = True , timeout = 0.5 )
except Empty , _ :
c = None
return c
def filter_queue_char ( c ) :
""" Filters out non ASCII characters from serial port
"""
if ord ( c ) not in range ( 128 ) :
c = ' '
return c
def get_test_result ( output ) :
""" Parse test ' output ' data
"""
result = self . TEST_RESULT_TIMEOUT
for line in " " . join ( output ) . splitlines ( ) :
search_result = self . RE_DETECT_TESTCASE_RESULT . search ( line )
if search_result and len ( search_result . groups ( ) ) :
result = self . TEST_RESULT_MAPPING [ search_result . groups ( 0 ) [ 0 ] ]
break
return result
2015-02-10 09:15:46 +00:00
def get_auto_property_value ( property_name , line ) :
""" Scans auto detection line from MUT and returns scanned parameter ' property_name '
Returns string
"""
result = None
if re . search ( " HOST: Property ' %s ' " % property_name , line ) is not None :
property = re . search ( " HOST: Property ' %s ' = ' ([ \ w \ d _]+) ' " % property_name , line )
if property is not None and len ( property . groups ( ) ) == 1 :
result = property . groups ( ) [ 0 ]
return result
2014-08-04 13:29:20 +00:00
# print "{%s} port:%s disk:%s" % (name, port, disk),
2014-10-27 15:46:15 +00:00
cmd = [ " python " ,
' %s .py ' % name ,
' -d ' , disk ,
' -f ' , ' " %s " ' % image_path ,
' -p ' , port ,
2014-10-29 14:28:02 +00:00
' -t ' , str ( duration ) ,
' -C ' , str ( program_cycle_s ) ]
2014-08-07 10:48:21 +00:00
2015-09-23 15:54:41 +00:00
if get_module_avail ( ' mbed_lstools ' ) and self . opts_auto_detect :
cmd + = [ ' --auto ' ]
2014-08-07 10:48:21 +00:00
# Add extra parameters to host_test
2014-10-27 15:46:15 +00:00
if copy_method is not None :
cmd + = [ " -c " , copy_method ]
2014-09-24 10:18:57 +00:00
if micro is not None :
cmd + = [ " -m " , micro ]
2014-08-07 10:48:21 +00:00
if reset is not None :
cmd + = [ " -r " , reset ]
2014-08-12 09:20:41 +00:00
if reset_tout is not None :
cmd + = [ " -R " , str ( reset_tout ) ]
if verbose :
2015-02-10 23:38:01 +00:00
print Fore . MAGENTA + " Executing ' " + " " . join ( cmd ) + " ' " + Fore . RESET
2014-09-03 13:27:53 +00:00
print " Test::Output::Start "
2014-08-07 10:48:21 +00:00
2014-08-04 13:29:20 +00:00
proc = Popen ( cmd , stdout = PIPE , cwd = HOST_TESTS )
obs = ProcessObserver ( proc )
2015-02-10 09:15:46 +00:00
update_once_flag = { } # Stores flags checking if some auto-parameter was already set
2014-08-04 13:29:20 +00:00
line = ' '
output = [ ]
2015-02-10 09:15:46 +00:00
start_time = time ( )
2015-02-17 09:59:22 +00:00
while ( time ( ) - start_time ) < ( 2 * duration ) :
2014-10-15 15:05:57 +00:00
c = get_char_from_queue ( obs )
2014-08-04 13:29:20 +00:00
if c :
2014-09-03 13:27:53 +00:00
if verbose :
sys . stdout . write ( c )
2014-10-15 15:05:57 +00:00
c = filter_queue_char ( c )
output . append ( c )
2014-08-04 13:29:20 +00:00
# Give the mbed under test a way to communicate the end of the test
if c in [ ' \n ' , ' \r ' ] :
2015-02-10 09:15:46 +00:00
# Checking for auto-detection information from the test about MUT reset moment
if ' reset_target ' not in update_once_flag and " HOST: Reset target... " in line :
2015-01-28 15:33:11 +00:00
# We will update this marker only once to prevent multiple time resets
2015-02-10 09:15:46 +00:00
update_once_flag [ ' reset_target ' ] = True
2015-01-28 15:33:11 +00:00
start_time = time ( )
2015-02-10 09:15:46 +00:00
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value ( ' timeout ' , line )
if ' timeout ' not in update_once_flag and auto_timeout_val is not None :
# We will update this marker only once to prevent multiple time resets
update_once_flag [ ' timeout ' ] = True
duration = int ( auto_timeout_val )
2015-02-24 13:46:45 +00:00
# Detect mbed assert:
if ' mbed assertation failed: ' in line :
output . append ( ' {{ mbed_assert}} ' )
break
2015-02-10 09:15:46 +00:00
# Check for test end
2014-08-12 09:20:41 +00:00
if ' {end} ' in line :
break
2014-08-04 13:29:20 +00:00
line = ' '
else :
line + = c
2015-01-28 15:33:11 +00:00
end_time = time ( )
testcase_duration = end_time - start_time # Test case duration from reset to {end}
2014-08-04 13:29:20 +00:00
2014-10-15 15:05:57 +00:00
c = get_char_from_queue ( obs )
2014-09-03 11:03:39 +00:00
if c :
2014-09-03 13:27:53 +00:00
if verbose :
sys . stdout . write ( c )
2014-10-15 15:05:57 +00:00
c = filter_queue_char ( c )
output . append ( c )
2014-09-04 09:10:55 +00:00
if verbose :
print " Test::Output::Finish "
2014-08-04 13:29:20 +00:00
# Stop test process
obs . stop ( )
2014-10-15 15:05:57 +00:00
result = get_test_result ( output )
2015-02-10 09:15:46 +00:00
return ( result , " " . join ( output ) , testcase_duration , duration )
2014-08-04 13:29:20 +00:00
def is_peripherals_available ( self , target_mcu_name , peripherals = None ) :
2015-03-04 09:48:39 +00:00
""" Checks if specified target should run specific peripheral test case defined in MUTs file
2014-08-12 12:24:04 +00:00
"""
2014-08-04 13:29:20 +00:00
if peripherals is not None :
peripherals = set ( peripherals )
for id , mut in self . muts . iteritems ( ) :
# Target MCU name check
if mut [ " mcu " ] != target_mcu_name :
continue
# Peripherals check
if peripherals is not None :
if ' peripherals ' not in mut :
continue
if not peripherals . issubset ( set ( mut [ ' peripherals ' ] ) ) :
continue
return True
return False
def shape_test_request ( self , mcu , image_path , test_id , duration = 10 ) :
2015-03-04 09:48:39 +00:00
""" Function prepares JSON structure describing test specification
2014-08-12 12:24:04 +00:00
"""
2014-08-04 13:29:20 +00:00
test_spec = {
" mcu " : mcu ,
" image " : image_path ,
" duration " : duration ,
" test_id " : test_id ,
}
return json . dumps ( test_spec )
def get_unique_value_from_summary ( test_summary , index ) :
2014-08-12 12:24:04 +00:00
""" Gets list of unique target names
"""
2014-08-04 13:29:20 +00:00
result = [ ]
for test in test_summary :
target_name = test [ index ]
if target_name not in result :
result . append ( target_name )
return sorted ( result )
def get_unique_value_from_summary_ext ( test_summary , index_key , index_val ) :
2014-08-12 12:24:04 +00:00
""" Gets list of unique target names and return dictionary
"""
2014-08-04 13:29:20 +00:00
result = { }
for test in test_summary :
key = test [ index_key ]
val = test [ index_val ]
if key not in result :
result [ key ] = val
return result
def show_json_file_format_error ( json_spec_filename , line , column ) :
2014-08-12 12:24:04 +00:00
""" Prints JSON broken content
"""
2014-08-04 13:29:20 +00:00
with open ( json_spec_filename ) as data_file :
line_no = 1
for json_line in data_file :
if line_no + 5 > = line : # Print last few lines before error
print ' Line %d : \t ' % line_no + json_line , # Prints line
if line_no == line :
print ' ' * len ( ' Line %d : ' % line_no ) + ' \t ' , ' - ' * ( column - 1 ) + ' ^ '
break
line_no + = 1
def json_format_error_defect_pos ( json_error_msg ) :
""" Gets first error line and column in JSON file format.
2014-08-12 12:24:04 +00:00
Parsed from exception thrown by json . loads ( ) string
"""
2014-08-04 13:29:20 +00:00
result = None
line , column = 0 , 0
# Line value search
line_search = re . search ( ' line [0-9]+ ' , json_error_msg )
if line_search is not None :
ls = line_search . group ( ) . split ( ' ' )
if len ( ls ) == 2 :
line = int ( ls [ 1 ] )
# Column position search
column_search = re . search ( ' column [0-9]+ ' , json_error_msg )
if column_search is not None :
cs = column_search . group ( ) . split ( ' ' )
if len ( cs ) == 2 :
column = int ( cs [ 1 ] )
result = [ line , column ]
return result
def get_json_data_from_file ( json_spec_filename , verbose = False ) :
2014-08-12 12:24:04 +00:00
""" Loads from file JSON formatted string to data structure
"""
2014-08-04 13:29:20 +00:00
result = None
try :
with open ( json_spec_filename ) as data_file :
try :
result = json . load ( data_file )
except ValueError as json_error_msg :
result = None
2014-08-14 13:39:52 +00:00
print ' JSON file %s parsing failed. Reason: %s ' % ( json_spec_filename , json_error_msg )
2014-08-04 13:29:20 +00:00
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos ( str ( json_error_msg ) )
if json_format_defect_pos is not None :
line = json_format_defect_pos [ 0 ]
column = json_format_defect_pos [ 1 ]
print
show_json_file_format_error ( json_spec_filename , line , column )
except IOError as fileopen_error_msg :
2014-08-14 13:39:52 +00:00
print ' JSON file %s not opened. Reason: %s ' % ( json_spec_filename , fileopen_error_msg )
2014-08-12 15:15:33 +00:00
print
2014-08-04 13:29:20 +00:00
if verbose and result :
pp = pprint . PrettyPrinter ( indent = 4 )
pp . pprint ( result )
return result
2015-01-30 08:38:15 +00:00
def print_muts_configuration_from_json ( json_data , join_delim = " , " , platform_filter = None ) :
2014-08-12 12:24:04 +00:00
""" Prints MUTs configuration passed to test script for verboseness
"""
2014-08-04 13:29:20 +00:00
muts_info_cols = [ ]
# We need to check all unique properties for each defined MUT
for k in json_data :
mut_info = json_data [ k ]
2014-08-04 15:16:47 +00:00
for mut_property in mut_info :
if mut_property not in muts_info_cols :
muts_info_cols . append ( mut_property )
2014-08-04 13:29:20 +00:00
# Prepare pretty table object to display all MUTs
pt_cols = [ " index " ] + muts_info_cols
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
# Add rows to pretty print object
for k in json_data :
row = [ k ]
mut_info = json_data [ k ]
2015-01-30 08:38:15 +00:00
add_row = True
if platform_filter and ' mcu ' in mut_info :
add_row = re . search ( platform_filter , mut_info [ ' mcu ' ] ) is not None
if add_row :
for col in muts_info_cols :
cell_val = mut_info [ col ] if col in mut_info else None
if type ( cell_val ) == ListType :
cell_val = join_delim . join ( cell_val )
row . append ( cell_val )
pt . add_row ( row )
2014-08-04 13:29:20 +00:00
return pt . get_string ( )
def print_test_configuration_from_json ( json_data , join_delim = " , " ) :
2014-08-12 12:24:04 +00:00
""" Prints test specification configuration passed to test script for verboseness
"""
2014-08-04 13:29:20 +00:00
toolchains_info_cols = [ ]
# We need to check all toolchains for each device
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
toolchains = targets [ target ]
for toolchain in toolchains :
if toolchain not in toolchains_info_cols :
toolchains_info_cols . append ( toolchain )
# Prepare pretty table object to display test specification
pt_cols = [ " mcu " ] + sorted ( toolchains_info_cols )
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
# { target : [conflicted toolchains] }
toolchain_conflicts = { }
2014-09-05 12:26:51 +00:00
toolchain_path_conflicts = [ ]
2014-08-04 13:29:20 +00:00
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
target_supported_toolchains = get_target_supported_toolchains ( target )
if not target_supported_toolchains :
target_supported_toolchains = [ ]
target_name = target if target in TARGET_MAP else " %s * " % target
row = [ target_name ]
toolchains = targets [ target ]
2015-01-30 08:38:15 +00:00
2014-08-14 13:29:02 +00:00
for toolchain in sorted ( toolchains_info_cols ) :
2014-09-05 12:26:51 +00:00
# Check for conflicts: target vs toolchain
2014-08-04 13:29:20 +00:00
conflict = False
2014-09-05 12:26:51 +00:00
conflict_path = False
2014-08-04 13:29:20 +00:00
if toolchain in toolchains :
if toolchain not in target_supported_toolchains :
conflict = True
if target not in toolchain_conflicts :
toolchain_conflicts [ target ] = [ ]
toolchain_conflicts [ target ] . append ( toolchain )
# Add marker inside table about target usage / conflict
cell_val = ' Yes ' if toolchain in toolchains else ' - '
if conflict :
cell_val + = ' * '
2014-09-05 12:26:51 +00:00
# Check for conflicts: toolchain vs toolchain path
2016-07-19 10:14:42 +00:00
if toolchain in TOOLCHAIN_PATHS :
toolchain_path = TOOLCHAIN_PATHS [ toolchain ]
2014-09-05 12:26:51 +00:00
if not os . path . isdir ( toolchain_path ) :
conflict_path = True
if toolchain not in toolchain_path_conflicts :
toolchain_path_conflicts . append ( toolchain )
if conflict_path :
cell_val + = ' # '
2014-08-04 13:29:20 +00:00
row . append ( cell_val )
pt . add_row ( row )
# generate result string
result = pt . get_string ( ) # Test specification table
2014-09-05 12:26:51 +00:00
if toolchain_conflicts or toolchain_path_conflicts :
2014-08-04 13:29:20 +00:00
result + = " \n "
result + = " Toolchain conflicts: \n "
for target in toolchain_conflicts :
if target not in TARGET_MAP :
result + = " \t * Target %s unknown \n " % ( target )
2014-08-04 15:16:47 +00:00
conflict_target_list = join_delim . join ( toolchain_conflicts [ target ] )
2014-08-04 13:29:20 +00:00
sufix = ' s ' if len ( toolchain_conflicts [ target ] ) > 1 else ' '
result + = " \t * Target %s does not support %s toolchain %s \n " % ( target , conflict_target_list , sufix )
2014-09-05 12:26:51 +00:00
for toolchain in toolchain_path_conflicts :
# Let's check toolchain configuration
2016-07-19 10:14:42 +00:00
if toolchain in TOOLCHAIN_PATHS :
toolchain_path = TOOLCHAIN_PATHS [ toolchain ]
2014-09-05 12:26:51 +00:00
if not os . path . isdir ( toolchain_path ) :
result + = " \t # Toolchain %s path not found: %s \n " % ( toolchain , toolchain_path )
2014-08-04 13:29:20 +00:00
return result
2014-10-20 09:51:02 +00:00
def get_avail_tests_summary_table ( cols = None , result_summary = True , join_delim = ' , ' , platform_filter = None ) :
2014-08-04 13:29:20 +00:00
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality . Allows test suite user to
2014-08-12 12:24:04 +00:00
see test cases
"""
2014-08-04 13:29:20 +00:00
# get all unique test ID prefixes
unique_test_id = [ ]
for test in TESTS :
split = test [ ' id ' ] . split ( ' _ ' ) [ : - 1 ]
test_id_prefix = ' _ ' . join ( split )
if test_id_prefix not in unique_test_id :
unique_test_id . append ( test_id_prefix )
unique_test_id . sort ( )
counter_dict_test_id_types = dict ( ( t , 0 ) for t in unique_test_id )
counter_dict_test_id_types_all = dict ( ( t , 0 ) for t in unique_test_id )
2014-10-20 09:51:02 +00:00
test_properties = [ ' id ' ,
' automated ' ,
' description ' ,
' peripherals ' ,
' host_test ' ,
' duration ' ] if cols is None else cols
2014-08-04 13:29:20 +00:00
# All tests status table print
pt = PrettyTable ( test_properties )
for col in test_properties :
pt . align [ col ] = " l "
pt . align [ ' duration ' ] = " r "
counter_all = 0
counter_automated = 0
pt . padding_width = 1 # One space between column edges and contents (default)
2014-10-20 09:51:02 +00:00
for test_id in sorted ( TEST_MAP . keys ( ) ) :
if platform_filter is not None :
# FIlter out platforms using regex
if re . search ( platform_filter , test_id ) is None :
continue
2014-08-04 13:29:20 +00:00
row = [ ]
test = TEST_MAP [ test_id ]
split = test_id . split ( ' _ ' ) [ : - 1 ]
test_id_prefix = ' _ ' . join ( split )
for col in test_properties :
col_value = test [ col ]
if type ( test [ col ] ) == ListType :
col_value = join_delim . join ( test [ col ] )
elif test [ col ] == None :
col_value = " - "
row . append ( col_value )
if test [ ' automated ' ] == True :
counter_dict_test_id_types [ test_id_prefix ] + = 1
counter_automated + = 1
pt . add_row ( row )
# Update counters
counter_all + = 1
counter_dict_test_id_types_all [ test_id_prefix ] + = 1
result = pt . get_string ( )
result + = " \n \n "
2014-10-20 09:51:02 +00:00
if result_summary and not platform_filter :
2014-08-04 13:29:20 +00:00
# Automation result summary
test_id_cols = [ ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
percent_progress = round ( 100.0 * counter_automated / float ( counter_all ) , 1 )
str_progress = progress_bar ( percent_progress , 75 )
pt . add_row ( [ counter_automated , counter_all , percent_progress , str_progress ] )
result + = " Automation coverage: \n "
result + = pt . get_string ( )
result + = " \n \n "
# Test automation coverage table print
test_id_cols = [ ' id ' , ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' id ' ] = " l "
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
for unique_id in unique_test_id :
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round ( 100.0 * counter_dict_test_id_types [ unique_id ] / float ( counter_dict_test_id_types_all [ unique_id ] ) , 1 )
str_progress = progress_bar ( percent_progress , 75 )
row = [ unique_id ,
counter_dict_test_id_types [ unique_id ] ,
counter_dict_test_id_types_all [ unique_id ] ,
percent_progress ,
" [ " + str_progress + " ] " ]
pt . add_row ( row )
result + = " Test automation coverage: \n "
result + = pt . get_string ( )
result + = " \n \n "
return result
def progress_bar ( percent_progress , saturation = 0 ) :
2014-08-12 12:24:04 +00:00
""" This function creates progress bar with optional simple saturation mark
"""
2014-08-04 13:29:20 +00:00
step = int ( percent_progress / 2 ) # Scale by to (scale: 1 - 50)
str_progress = ' # ' * step + ' . ' * int ( 50 - step )
c = ' ! ' if str_progress [ 38 ] == ' . ' else ' | '
if saturation > 0 :
saturation = saturation / 2
str_progress = str_progress [ : saturation ] + c + str_progress [ saturation : ]
return str_progress
2014-08-04 14:57:59 +00:00
2014-08-05 08:32:44 +00:00
def singletest_in_cli_mode ( single_test ) :
2014-08-12 12:24:04 +00:00
""" Runs SingleTestRunner object in CLI (Command line interface) mode
2015-04-30 18:39:30 +00:00
@return returns success code ( 0 == success ) for building and running tests
2014-08-12 12:24:04 +00:00
"""
2014-08-05 08:32:44 +00:00
start = time ( )
# Execute tests depending on options and filter applied
2015-11-05 20:42:45 +00:00
test_summary , shuffle_seed , test_summary_ext , test_suite_properties_ext , build_report , build_properties = single_test . execute ( )
2014-08-05 08:32:44 +00:00
elapsed_time = time ( ) - start
2014-09-25 16:21:03 +00:00
2014-08-05 08:32:44 +00:00
# Human readable summary
if not single_test . opts_suppress_summary :
# prints well-formed summary with results (SQL table like)
print single_test . generate_test_summary ( test_summary , shuffle_seed )
if single_test . opts_test_x_toolchain_summary :
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test . generate_test_summary_by_target ( test_summary , shuffle_seed )
2015-05-02 22:08:00 +00:00
2014-08-12 09:20:41 +00:00
print " Completed in %.2f sec " % ( elapsed_time )
2015-04-30 18:39:30 +00:00
print
2015-05-02 22:08:00 +00:00
# Write summary of the builds
2015-11-06 19:38:30 +00:00
print_report_exporter = ReportExporter ( ResultExporterType . PRINT , package = " build " )
2015-11-05 20:42:45 +00:00
status = print_report_exporter . report ( build_report )
2014-08-05 08:32:44 +00:00
2014-10-15 12:38:20 +00:00
# Store extra reports in files
if single_test . opts_report_html_file_name :
# Export results in form of HTML report to separate file
report_exporter = ReportExporter ( ResultExporterType . HTML )
report_exporter . report_to_file ( test_summary_ext , single_test . opts_report_html_file_name , test_suite_properties = test_suite_properties_ext )
if single_test . opts_report_junit_file_name :
2015-04-13 22:07:21 +00:00
# Export results in form of JUnit XML report to separate file
2014-10-15 12:38:20 +00:00
report_exporter = ReportExporter ( ResultExporterType . JUNIT )
report_exporter . report_to_file ( test_summary_ext , single_test . opts_report_junit_file_name , test_suite_properties = test_suite_properties_ext )
2016-06-21 21:20:19 +00:00
if single_test . opts_report_text_file_name :
# Export results in form of a text file
report_exporter = ReportExporter ( ResultExporterType . TEXT )
report_exporter . report_to_file ( test_summary_ext , single_test . opts_report_text_file_name , test_suite_properties = test_suite_properties_ext )
2015-04-14 18:45:56 +00:00
if single_test . opts_report_build_file_name :
# Export build results as html report to sparate file
2015-11-06 19:38:30 +00:00
report_exporter = ReportExporter ( ResultExporterType . JUNIT , package = " build " )
2015-11-05 20:42:45 +00:00
report_exporter . report_to_file ( build_report , single_test . opts_report_build_file_name , test_suite_properties = build_properties )
2014-10-15 12:38:20 +00:00
2015-06-19 16:53:38 +00:00
# Returns True if no build failures of the test projects or their dependencies
2015-11-05 20:42:45 +00:00
return status
2015-06-19 16:53:38 +00:00
2014-08-11 09:41:58 +00:00
class TestLogger ( ) :
2014-08-12 12:24:04 +00:00
""" Super-class for logging and printing ongoing events for test suite pass
"""
2014-08-12 14:12:57 +00:00
def __init__ ( self , store_log = True ) :
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self . log = [ ]
self . log_to_file = False
self . log_file_name = None
self . store_log = store_log
2014-08-11 09:41:58 +00:00
self . LogType = construct_enum ( INFO = ' Info ' ,
WARN = ' Warning ' ,
2014-08-12 13:04:21 +00:00
NOTIF = ' Notification ' ,
ERROR = ' Error ' ,
EXCEPT = ' Exception ' )
2014-08-11 09:41:58 +00:00
self . LogToFileAttr = construct_enum ( CREATE = 1 , # Create or overwrite existing log file
APPEND = 2 ) # Append to existing log file
2014-09-25 16:21:03 +00:00
def log_line ( self , LogType , log_line , timestamp = True , line_delim = ' \n ' ) :
2014-08-12 14:12:57 +00:00
""" Log one line of text
"""
2014-08-12 15:15:33 +00:00
log_timestamp = time ( )
2014-08-11 09:41:58 +00:00
log_entry = { ' log_type ' : LogType ,
' log_timestamp ' : log_timestamp ,
' log_line ' : log_line ,
2014-10-20 09:51:02 +00:00
' _future ' : None
}
2014-08-12 14:12:57 +00:00
# Store log in memory
if self . store_log :
self . log . append ( log_entry )
return log_entry
2014-08-11 09:41:58 +00:00
class CLITestLogger ( TestLogger ) :
2014-08-12 14:12:57 +00:00
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__ ( self , store_log = True , file_name = None ) :
TestLogger . __init__ ( self )
self . log_file_name = file_name
2014-08-12 15:15:33 +00:00
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self . TIMESTAMP_FORMAT = ' % H: % M: % S ' # Time only
2014-08-12 14:12:57 +00:00
def log_print ( self , log_entry , timestamp = True ) :
""" Prints on screen formatted log entry
"""
ts = log_entry [ ' log_timestamp ' ]
timestamp_str = datetime . datetime . fromtimestamp ( ts ) . strftime ( " [ %s ] " % self . TIMESTAMP_FORMAT ) if timestamp else ' '
log_line_str = " %(log_type)s : %(log_line)s " % ( log_entry )
return timestamp_str + log_line_str
def log_line ( self , LogType , log_line , timestamp = True , line_delim = ' \n ' ) :
2014-08-19 10:04:09 +00:00
""" Logs line, if log file output was specified log line will be appended
2014-08-18 14:49:50 +00:00
at the end of log file
"""
2014-08-12 14:12:57 +00:00
log_entry = TestLogger . log_line ( self , LogType , log_line )
log_line_str = self . log_print ( log_entry , timestamp )
if self . log_file_name is not None :
try :
2014-09-25 16:21:03 +00:00
with open ( self . log_file_name , ' a ' ) as f :
f . write ( log_line_str + line_delim )
2014-08-12 14:12:57 +00:00
except IOError :
pass
2014-08-12 15:15:33 +00:00
return log_line_str
2014-08-11 09:41:58 +00:00
2014-08-18 09:39:43 +00:00
2014-08-18 14:33:24 +00:00
def factory_db_logger ( db_url ) :
2014-08-18 14:49:50 +00:00
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None :
2016-06-09 20:34:53 +00:00
from tools . test_mysql import MySQLDBAccess
2014-11-21 13:56:49 +00:00
connection_info = BaseDBAccess ( ) . parse_db_connection_string ( db_url )
if connection_info is not None :
( db_type , username , password , host , db_name ) = BaseDBAccess ( ) . parse_db_connection_string ( db_url )
if db_type == ' mysql ' :
return MySQLDBAccess ( )
2014-08-18 14:49:50 +00:00
return None
2014-08-18 14:33:24 +00:00
def detect_database_verbose ( db_url ) :
2014-08-18 14:49:50 +00:00
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
2014-08-18 14:33:24 +00:00
result = BaseDBAccess ( ) . parse_db_connection_string ( db_url )
if result is not None :
# Parsing passed
( db_type , username , password , host , db_name ) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger ( db_url )
if db_ is not None :
print " Connecting to database ' %s ' ... " % db_url ,
db_ . connect ( host , username , password , db_name )
if db_ . is_connected ( ) :
print " ok "
print " Detecting database... "
print db_ . detect_database ( verbose = True )
print " Disconnecting... " ,
db_ . disconnect ( )
print " done "
else :
print " Database type ' %s ' unknown " % db_type
else :
print " Parse error: ' %s ' - DB Url error " % ( db_url )
2014-08-18 09:39:43 +00:00
2015-02-10 21:18:17 +00:00
def get_module_avail ( module_name ) :
2017-09-15 09:15:24 +00:00
""" This function returns True if module_name is already imported module
2015-02-10 21:18:17 +00:00
"""
return module_name in sys . modules . keys ( )
2015-09-23 15:54:41 +00:00
def get_autodetected_MUTS_list ( platform_name_filter = None ) :
oldError = None
if os . name == ' nt ' :
# Disable Windows error box temporarily
oldError = ctypes . windll . kernel32 . SetErrorMode ( 1 ) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools . create ( )
detect_muts_list = mbeds . list_mbeds ( )
if os . name == ' nt ' :
ctypes . windll . kernel32 . SetErrorMode ( oldError )
return get_autodetected_MUTS ( detect_muts_list , platform_name_filter = platform_name_filter )
Added to option --auto handler for -f <mcu> filter switch. Now using -f switch
will filter target platforms to test. Use comma to pass more MCU names to
filter.
```
$ singletest.py --auto -j 8 -O --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+---------------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+---------------+------+-------+
| 1 | | NUCLEO_L053R8 | E: | COM35 |
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+---------------+------+-------+
Test specification in auto-detected:
+---------------+-----+------+
| mcu | ARM | uARM |
+---------------+-----+------+
| KL25Z | Yes | - |
| NUCLEO_L053R8 | - | Yes |
+---------------+-----+------+
```
Building original configuration (no filter):
```
$ singletest.py --auto -j 8 -O
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Building library CMSIS (NUCLEO_L053R8, uARM)
Building library MBED (NUCLEO_L053R8, uARM)
Building project DETECT (NUCLEO_L053R8, uARM)
.
.
.
Completed in 3.68 sec
```
```
$ singletest.py --auto -j 8 -O -f KL25Z --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+-------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+-------+------+-------+
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+-------+------+-------+
Test specification in auto-detected:
+-------+-----+
| mcu | ARM |
+-------+-----+
| KL25Z | Yes |
+-------+-----+
```
Building original configuration (with applied filter):
```
$ singletest.py --auto -j 8 -O -f KL25Z
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Completed in 1.33 sec
```
2015-03-13 15:30:33 +00:00
def get_autodetected_MUTS ( mbeds_list , platform_name_filter = None ) :
2015-02-10 21:18:17 +00:00
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto - detect devices it will return empty dictionary .
if get_module_avail ( ' mbed_lstools ' ) :
mbeds = mbed_lstools . create ( )
mbeds_list = mbeds . list_mbeds ( )
Added to option --auto handler for -f <mcu> filter switch. Now using -f switch
will filter target platforms to test. Use comma to pass more MCU names to
filter.
```
$ singletest.py --auto -j 8 -O --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+---------------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+---------------+------+-------+
| 1 | | NUCLEO_L053R8 | E: | COM35 |
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+---------------+------+-------+
Test specification in auto-detected:
+---------------+-----+------+
| mcu | ARM | uARM |
+---------------+-----+------+
| KL25Z | Yes | - |
| NUCLEO_L053R8 | - | Yes |
+---------------+-----+------+
```
Building original configuration (no filter):
```
$ singletest.py --auto -j 8 -O
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Building library CMSIS (NUCLEO_L053R8, uARM)
Building library MBED (NUCLEO_L053R8, uARM)
Building project DETECT (NUCLEO_L053R8, uARM)
.
.
.
Completed in 3.68 sec
```
```
$ singletest.py --auto -j 8 -O -f KL25Z --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+-------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+-------+------+-------+
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+-------+------+-------+
Test specification in auto-detected:
+-------+-----+
| mcu | ARM |
+-------+-----+
| KL25Z | Yes |
+-------+-----+
```
Building original configuration (with applied filter):
```
$ singletest.py --auto -j 8 -O -f KL25Z
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Completed in 1.33 sec
```
2015-03-13 15:30:33 +00:00
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter ' platform_name ' with list of filtered targets from ' platform_name_filter '
2015-02-10 21:18:17 +00:00
"""
result = { } # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list :
2015-09-23 15:54:41 +00:00
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut [ ' platform_name ' ] in platform_name_filter :
continue
2015-05-03 01:32:04 +00:00
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = { ' mcu ' : mut [ ' platform_name ' ] ,
' mcu_unique ' : mut [ ' platform_name_unique ' ] if ' platform_name_unique ' in mut else " %s [ %s ] " % ( mut [ ' platform_name ' ] , mut [ ' target_id ' ] [ - 4 : ] ) ,
' port ' : mut [ ' serial_port ' ] ,
' disk ' : mut [ ' mount_point ' ] ,
' peripherals ' : [ ] # No peripheral detection
2015-02-10 21:18:17 +00:00
}
if index not in result :
result [ index ] = { }
result [ index ] = m
index + = 1
return result
Added to option --auto handler for -f <mcu> filter switch. Now using -f switch
will filter target platforms to test. Use comma to pass more MCU names to
filter.
```
$ singletest.py --auto -j 8 -O --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+---------------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+---------------+------+-------+
| 1 | | NUCLEO_L053R8 | E: | COM35 |
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+---------------+------+-------+
Test specification in auto-detected:
+---------------+-----+------+
| mcu | ARM | uARM |
+---------------+-----+------+
| KL25Z | Yes | - |
| NUCLEO_L053R8 | - | Yes |
+---------------+-----+------+
```
Building original configuration (no filter):
```
$ singletest.py --auto -j 8 -O
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Building library CMSIS (NUCLEO_L053R8, uARM)
Building library MBED (NUCLEO_L053R8, uARM)
Building project DETECT (NUCLEO_L053R8, uARM)
.
.
.
Completed in 3.68 sec
```
```
$ singletest.py --auto -j 8 -O -f KL25Z --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+-------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+-------+------+-------+
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+-------+------+-------+
Test specification in auto-detected:
+-------+-----+
| mcu | ARM |
+-------+-----+
| KL25Z | Yes |
+-------+-----+
```
Building original configuration (with applied filter):
```
$ singletest.py --auto -j 8 -O -f KL25Z
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Completed in 1.33 sec
```
2015-03-13 15:30:33 +00:00
def get_autodetected_TEST_SPEC ( mbeds_list ,
use_default_toolchain = True ,
use_supported_toolchains = False ,
toolchain_filter = None ,
platform_name_filter = None ) :
2015-02-10 21:18:17 +00:00
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto - detect devices it will return empty ' targets ' test_spec description .
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [ . . . list of toolchains . . . ] add from all toolchains only those in filter to test_spec
"""
2015-02-10 21:29:11 +00:00
result = { ' targets ' : { } }
2015-02-10 21:18:17 +00:00
for mut in mbeds_list :
2015-09-23 15:54:41 +00:00
mcu = mut [ ' mcu ' ]
if platform_name_filter is None or ( platform_name_filter and mut [ ' mcu ' ] in platform_name_filter ) :
Added to option --auto handler for -f <mcu> filter switch. Now using -f switch
will filter target platforms to test. Use comma to pass more MCU names to
filter.
```
$ singletest.py --auto -j 8 -O --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+---------------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+---------------+------+-------+
| 1 | | NUCLEO_L053R8 | E: | COM35 |
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+---------------+------+-------+
Test specification in auto-detected:
+---------------+-----+------+
| mcu | ARM | uARM |
+---------------+-----+------+
| KL25Z | Yes | - |
| NUCLEO_L053R8 | - | Yes |
+---------------+-----+------+
```
Building original configuration (no filter):
```
$ singletest.py --auto -j 8 -O
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Building library CMSIS (NUCLEO_L053R8, uARM)
Building library MBED (NUCLEO_L053R8, uARM)
Building project DETECT (NUCLEO_L053R8, uARM)
.
.
.
Completed in 3.68 sec
```
```
$ singletest.py --auto -j 8 -O -f KL25Z --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+-------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+-------+------+-------+
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+-------+------+-------+
Test specification in auto-detected:
+-------+-----+
| mcu | ARM |
+-------+-----+
| KL25Z | Yes |
+-------+-----+
```
Building original configuration (with applied filter):
```
$ singletest.py --auto -j 8 -O -f KL25Z
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Completed in 1.33 sec
```
2015-03-13 15:30:33 +00:00
if mcu in TARGET_MAP :
default_toolchain = TARGET_MAP [ mcu ] . default_toolchain
supported_toolchains = TARGET_MAP [ mcu ] . supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = [ ]
if use_default_toolchain :
toolchains . append ( default_toolchain )
if use_supported_toolchains :
toolchains + = supported_toolchains
if toolchain_filter is not None :
all_toolchains = supported_toolchains + [ default_toolchain ]
2016-06-24 22:15:01 +00:00
for toolchain in toolchain_filter :
Added to option --auto handler for -f <mcu> filter switch. Now using -f switch
will filter target platforms to test. Use comma to pass more MCU names to
filter.
```
$ singletest.py --auto -j 8 -O --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+---------------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+---------------+------+-------+
| 1 | | NUCLEO_L053R8 | E: | COM35 |
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+---------------+------+-------+
Test specification in auto-detected:
+---------------+-----+------+
| mcu | ARM | uARM |
+---------------+-----+------+
| KL25Z | Yes | - |
| NUCLEO_L053R8 | - | Yes |
+---------------+-----+------+
```
Building original configuration (no filter):
```
$ singletest.py --auto -j 8 -O
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Building library CMSIS (NUCLEO_L053R8, uARM)
Building library MBED (NUCLEO_L053R8, uARM)
Building project DETECT (NUCLEO_L053R8, uARM)
.
.
.
Completed in 3.68 sec
```
```
$ singletest.py --auto -j 8 -O -f KL25Z --config
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
MUTs configuration in auto-detected:
+-------+-------------+-------+------+-------+
| index | peripherals | mcu | disk | port |
+-------+-------------+-------+------+-------+
| 2 | | KL25Z | F: | COM89 |
+-------+-------------+-------+------+-------+
Test specification in auto-detected:
+-------+-----+
| mcu | ARM |
+-------+-----+
| KL25Z | Yes |
+-------+-----+
```
Building original configuration (with applied filter):
```
$ singletest.py --auto -j 8 -O -f KL25Z
MBEDLS: Detecting connected mbed-enabled devices...
MBEDLS: Detected NUCLEO_L053R8, port: COM35, mounted: E:
MBEDLS: Detected KL25Z, port: COM89, mounted: F:
Building library CMSIS (KL25Z, ARM)
Building library MBED (KL25Z, ARM)
Building project DETECT (KL25Z, ARM)
.
.
.
Completed in 1.33 sec
```
2015-03-13 15:30:33 +00:00
if toolchain in all_toolchains :
toolchains . append ( toolchain )
result [ ' targets ' ] [ mcu ] = list ( set ( toolchains ) )
2015-02-10 21:18:17 +00:00
return result
2014-08-04 14:57:59 +00:00
def get_default_test_options_parser ( ) :
2015-02-10 21:18:17 +00:00
""" Get common test script options used by CLI, web services etc.
2014-08-12 12:24:04 +00:00
"""
2016-06-24 22:15:01 +00:00
parser = argparse . ArgumentParser ( )
parser . add_argument ( ' -i ' , ' --tests ' ,
dest = ' test_spec_filename ' ,
metavar = " FILE " ,
type = argparse_filestring_type ,
help = ' Points to file with test specification ' )
parser . add_argument ( ' -M ' , ' --MUTS ' ,
dest = ' muts_spec_filename ' ,
metavar = " FILE " ,
type = argparse_filestring_type ,
help = ' Points to file with MUTs specification (overwrites settings.py and private_settings.py) ' )
parser . add_argument ( " -j " , " --jobs " ,
dest = ' jobs ' ,
metavar = " NUMBER " ,
type = int ,
help = " Define number of compilation jobs. Default value is 1 " )
2014-08-07 12:22:51 +00:00
2015-02-10 21:18:17 +00:00
if get_module_avail ( ' mbed_lstools ' ) :
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
2016-06-24 22:15:01 +00:00
parser . add_argument ( ' --auto ' ,
dest = ' auto_detect ' ,
action = " store_true " ,
help = ' Use mbed-ls module to detect all connected mbed devices ' )
2015-02-10 21:18:17 +00:00
2016-06-24 22:15:01 +00:00
toolchain_list = list ( TOOLCHAINS ) + [ " DEFAULT " , " ALL " ]
parser . add_argument ( ' --tc ' ,
dest = ' toolchains_filter ' ,
type = argparse_many ( argparse_uppercase_type ( toolchain_list , " toolchains " ) ) ,
help = " Toolchain filter for --auto argument. Use toolchains names separated by comma, ' default ' or ' all ' to select toolchains " )
2015-05-26 14:59:07 +00:00
test_scopes = ' , ' . join ( [ " ' %s ' " % n for n in get_available_oper_test_scopes ( ) ] )
2016-06-24 22:15:01 +00:00
parser . add_argument ( ' --oper ' ,
dest = ' operability_checks ' ,
type = argparse_lowercase_type ( get_available_oper_test_scopes ( ) , " scopes " ) ,
help = ' Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s ' % test_scopes )
parser . add_argument ( ' --clean ' ,
dest = ' clean ' ,
action = " store_true " ,
help = ' Clean the build directory ' )
parser . add_argument ( ' -P ' , ' --only-peripherals ' ,
dest = ' test_only_peripheral ' ,
default = False ,
action = " store_true " ,
help = ' Test only peripheral declared for MUT and skip common tests ' )
2016-09-30 21:54:34 +00:00
parser . add_argument ( " --profile " , dest = " profile " , action = " append " ,
type = argparse_filestring_type ,
default = [ ] )
2016-06-24 22:15:01 +00:00
parser . add_argument ( ' -C ' , ' --only-commons ' ,
dest = ' test_only_common ' ,
default = False ,
action = " store_true " ,
help = ' Test only board internals. Skip perpherials tests and perform common tests ' )
parser . add_argument ( ' -n ' , ' --test-by-names ' ,
dest = ' test_by_names ' ,
type = argparse_many ( str ) ,
help = ' Runs only test enumerated it this switch. Use comma to separate test case names ' )
parser . add_argument ( ' -p ' , ' --peripheral-by-names ' ,
2015-03-04 09:48:39 +00:00
dest = ' peripheral_by_names ' ,
2016-06-24 22:15:01 +00:00
type = argparse_many ( str ) ,
2015-05-26 14:59:07 +00:00
help = ' Forces discovery of particular peripherals. Use comma to separate peripheral names ' )
2014-10-27 12:42:48 +00:00
copy_methods = host_tests_plugins . get_plugin_caps ( ' CopyMethod ' )
copy_methods_str = " Plugin support: " + ' , ' . join ( copy_methods )
2016-06-24 22:15:01 +00:00
parser . add_argument ( ' -c ' , ' --copy-method ' ,
dest = ' copy_method ' ,
type = argparse_uppercase_type ( copy_methods , " flash method " ) ,
help = " Select binary copy (flash) method. Default is Python ' s shutil.copy() method. %s " % copy_methods_str )
2014-10-27 12:42:48 +00:00
reset_methods = host_tests_plugins . get_plugin_caps ( ' ResetMethod ' )
reset_methods_str = " Plugin support: " + ' , ' . join ( reset_methods )
2016-06-24 22:15:01 +00:00
parser . add_argument ( ' -r ' , ' --reset-type ' ,
dest = ' mut_reset_type ' ,
default = None ,
type = argparse_uppercase_type ( reset_methods , " reset method " ) ,
help = ' Extra reset method used to reset MUT by host test script. %s ' % reset_methods_str )
parser . add_argument ( ' -g ' , ' --goanna-for-tests ' ,
dest = ' goanna_for_tests ' ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for tests. (Project will be rebuilded) ' )
parser . add_argument ( ' -G ' , ' --goanna-for-sdk ' ,
dest = ' goanna_for_mbed_sdk ' ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for mbed SDK (Project will be rebuilded) ' )
parser . add_argument ( ' -s ' , ' --suppress-summary ' ,
dest = ' suppress_summary ' ,
default = False ,
action = " store_true " ,
help = ' Suppresses display of wellformatted table with test results ' )
parser . add_argument ( ' -t ' , ' --test-summary ' ,
dest = ' test_x_toolchain_summary ' ,
default = False ,
action = " store_true " ,
help = ' Displays wellformatted table with test x toolchain test result per target ' )
parser . add_argument ( ' -A ' , ' --test-automation-report ' ,
dest = ' test_automation_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all tests and exits ' )
parser . add_argument ( ' -R ' , ' --test-case-report ' ,
dest = ' test_case_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all test cases and exits ' )
parser . add_argument ( " -S " , " --supported-toolchains " ,
action = " store_true " ,
dest = " supported_toolchains " ,
default = False ,
help = " Displays supported matrix of MCUs and toolchains " )
parser . add_argument ( " -O " , " --only-build " ,
action = " store_true " ,
dest = " only_build_tests " ,
default = False ,
help = " Only build tests, skips actual test procedures (flashing etc.) " )
parser . add_argument ( ' --parallel ' ,
dest = ' parallel_test_exec ' ,
default = False ,
action = " store_true " ,
help = ' Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection) ' )
parser . add_argument ( ' --config ' ,
dest = ' verbose_test_configuration_only ' ,
default = False ,
action = " store_true " ,
help = ' Displays full test specification and MUTs configration and exits ' )
parser . add_argument ( ' --loops ' ,
dest = ' test_loops_list ' ,
type = argparse_many ( str ) ,
help = ' Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3 ' )
parser . add_argument ( ' --global-loops ' ,
dest = ' test_global_loops_value ' ,
type = int ,
help = ' Set global number of test loops per test. Default value is set 1 ' )
parser . add_argument ( ' --consolidate-waterfall ' ,
dest = ' consolidate_waterfall_test ' ,
default = False ,
action = " store_true " ,
help = ' Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test. ' )
parser . add_argument ( ' -W ' , ' --waterfall ' ,
dest = ' waterfall_test ' ,
default = False ,
action = " store_true " ,
help = ' Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed ' )
parser . add_argument ( ' -N ' , ' --firmware-name ' ,
dest = ' firmware_global_name ' ,
help = ' Set global name for all produced projects. Note, proper file extension will be added by buid scripts ' )
parser . add_argument ( ' -u ' , ' --shuffle ' ,
dest = ' shuffle_test_order ' ,
default = False ,
action = " store_true " ,
help = ' Shuffles test execution order ' )
parser . add_argument ( ' --shuffle-seed ' ,
dest = ' shuffle_test_seed ' ,
default = None ,
help = ' Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary) ' )
parser . add_argument ( ' -f ' , ' --filter ' ,
dest = ' general_filter_regex ' ,
type = argparse_many ( str ) ,
default = None ,
help = ' For some commands you can use filter to filter out results ' )
parser . add_argument ( ' --inc-timeout ' ,
dest = ' extend_test_timeout ' ,
metavar = " NUMBER " ,
type = int ,
help = ' You can increase global timeout for each test by specifying additional test timeout in seconds ' )
parser . add_argument ( ' --db ' ,
dest = ' db_url ' ,
help = ' This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \' mysql://username:password@127.0.0.1/db_name \' ' )
parser . add_argument ( ' -l ' , ' --log ' ,
dest = ' log_file_name ' ,
help = ' Log events to external file (note not all console entries may be visible in log file) ' )
parser . add_argument ( ' --report-html ' ,
dest = ' report_html_file_name ' ,
help = ' You can log test suite results in form of HTML report ' )
parser . add_argument ( ' --report-junit ' ,
dest = ' report_junit_file_name ' ,
help = ' You can log test suite results in form of JUnit compliant XML report ' )
parser . add_argument ( " --report-build " ,
dest = " report_build_file_name " ,
help = " Output the build results to a junit xml file " )
parser . add_argument ( " --report-text " ,
dest = " report_text_file_name " ,
help = " Output the build results to a text file " )
parser . add_argument ( ' --verbose-skipped ' ,
dest = ' verbose_skipped_tests ' ,
default = False ,
action = " store_true " ,
help = ' Prints some extra information about skipped tests ' )
parser . add_argument ( ' -V ' , ' --verbose-test-result ' ,
dest = ' verbose_test_result_only ' ,
default = False ,
action = " store_true " ,
help = ' Prints test serial output ' )
parser . add_argument ( ' -v ' , ' --verbose ' ,
dest = ' verbose ' ,
default = False ,
action = " store_true " ,
help = ' Verbose mode (prints some extra information) ' )
parser . add_argument ( ' --version ' ,
dest = ' version ' ,
default = False ,
action = " store_true " ,
help = ' Prints script version and exits ' )
2017-08-18 08:53:35 +00:00
parser . add_argument ( ' --stats-depth ' ,
dest = ' stats_depth ' ,
default = 2 ,
type = int ,
help = " Depth level for static memory report " )
2014-08-04 14:57:59 +00:00
return parser
2016-06-09 22:11:23 +00:00
2016-09-06 15:23:32 +00:00
def test_path_to_name ( path , base ) :
2016-06-09 22:11:23 +00:00
""" Change all slashes in a path into hyphens
This creates a unique cross - platform test name based on the path
This can eventually be overriden by a to - be - determined meta - data mechanism """
name_parts = [ ]
2016-09-06 15:23:32 +00:00
head , tail = os . path . split ( relpath ( path , base ) )
2016-06-09 22:11:23 +00:00
while ( tail and tail != " . " ) :
name_parts . insert ( 0 , tail )
head , tail = os . path . split ( head )
2016-06-28 15:34:28 +00:00
2016-06-13 23:32:22 +00:00
return " - " . join ( name_parts ) . lower ( )
2016-06-09 22:11:23 +00:00
2017-07-21 20:07:07 +00:00
def get_test_config ( config_name , target_name ) :
""" Finds the path to a test configuration file
config_name : path to a custom configuration file OR mbed OS interface " ethernet, wifi_odin, etc "
target_name : name of target to determing if mbed OS interface given is valid
2017-10-17 19:31:47 +00:00
returns path to config , will return None if no valid config is found
2017-07-21 20:07:07 +00:00
"""
# If they passed in a full path
if exists ( config_name ) :
# This is a module config
2017-08-07 18:13:03 +00:00
return config_name
2017-07-21 20:07:07 +00:00
# Otherwise find the path to configuration file based on mbed OS interface
2017-08-07 18:13:03 +00:00
return TestConfig . get_config_path ( config_name , target_name )
2017-07-19 22:10:34 +00:00
2017-08-07 18:13:03 +00:00
def find_tests ( base_dir , target_name , toolchain_name , app_config = None ) :
2016-07-25 18:15:34 +00:00
""" Finds all tests in a directory recursively
base_dir : path to the directory to scan for tests ( ex . ' path/to/project ' )
target_name : name of the target to use for scanning ( ex . ' K64F ' )
toolchain_name : name of the toolchain to use for scanning ( ex . ' GCC_ARM ' )
options : Compile options to pass to the toolchain ( ex . [ ' debug-info ' ] )
2016-08-31 15:20:59 +00:00
app_config - location of a chosen mbed_app . json file
2016-07-25 18:15:34 +00:00
"""
2016-06-13 23:32:22 +00:00
tests = { }
2016-06-28 15:34:28 +00:00
2016-07-25 18:15:34 +00:00
# Prepare the toolchain
2017-03-07 00:37:46 +00:00
toolchain = prepare_toolchain ( [ base_dir ] , None , target_name , toolchain_name ,
2016-08-31 15:20:59 +00:00
silent = True , app_config = app_config )
2016-07-25 18:15:34 +00:00
# Scan the directory for paths to probe for 'TESTS' folders
2016-09-02 16:32:11 +00:00
base_resources = scan_resources ( [ base_dir ] , toolchain )
2016-07-25 18:15:34 +00:00
dirs = base_resources . inc_dirs
2016-06-13 23:32:22 +00:00
for directory in dirs :
2016-07-25 18:15:34 +00:00
subdirs = os . listdir ( directory )
# If the directory contains a subdirectory called 'TESTS', scan it for test cases
if ' TESTS ' in subdirs :
walk_base_dir = join ( directory , ' TESTS ' )
test_resources = toolchain . scan_resources ( walk_base_dir , base_path = base_dir )
# Loop through all subdirectories
for d in test_resources . inc_dirs :
# If the test case folder is not called 'host_tests' and it is
# located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
# then add it to the tests
path_depth = get_path_depth ( relpath ( d , walk_base_dir ) )
if path_depth == 2 :
test_group_directory_path , test_case_directory = os . path . split ( d )
test_group_directory = os . path . basename ( test_group_directory_path )
2017-05-25 11:09:18 +00:00
2016-07-25 18:15:34 +00:00
# Check to make sure discoverd folder is not in a host test directory
if test_case_directory != ' host_tests ' and test_group_directory != ' host_tests ' :
2016-09-06 15:23:32 +00:00
test_name = test_path_to_name ( d , base_dir )
2016-07-25 18:15:34 +00:00
tests [ test_name ] = d
2016-06-28 15:34:28 +00:00
2016-06-13 23:32:22 +00:00
return tests
2016-06-28 15:34:28 +00:00
2016-06-13 23:32:22 +00:00
def print_tests ( tests , format = " list " , sort = True ) :
2016-06-09 22:11:23 +00:00
""" Given a dictionary of tests (as returned from " find_tests " ), print them
in the specified format """
if format == " list " :
2016-06-13 23:32:22 +00:00
for test_name in sorted ( tests . keys ( ) ) :
test_path = tests [ test_name ]
2016-06-09 22:11:23 +00:00
print " Test Case: "
print " Name: %s " % test_name
print " Path: %s " % test_path
elif format == " json " :
print json . dumps ( tests , indent = 2 )
else :
print " Unknown format ' %s ' " % format
sys . exit ( 1 )
2016-06-30 14:29:04 +00:00
def norm_relative_path ( path , start ) :
""" This function will create a normalized, relative path. It mimics the
python os . path . relpath function , but also normalizes a Windows - syle path
that use backslashes to a Unix style path that uses forward slashes . """
path = os . path . normpath ( path )
path = os . path . relpath ( path , start )
path = path . replace ( " \\ " , " / " )
return path
2016-10-10 20:09:50 +00:00
def build_test_worker ( * args , * * kwargs ) :
2016-10-11 19:33:24 +00:00
""" This is a worker function for the parallel building of tests. The `args`
and ` kwargs ` are passed directly to ` build_project ` . It returns a dictionary
with the following structure :
{
' result ' : ` True ` if no exceptions were thrown , ` False ` otherwise
' reason ' : Instance of exception that was thrown on failure
' bin_file ' : Path to the created binary if ` build_project ` was
successful . Not present otherwise
' kwargs ' : The keyword arguments that were passed to ` build_project ` .
This includes arguments that were modified ( ex . report )
}
"""
2016-10-10 20:09:50 +00:00
bin_file = None
ret = {
' result ' : False ,
' args ' : args ,
' kwargs ' : kwargs
}
2016-11-16 23:56:40 +00:00
# Use parent TOOLCHAIN_PATHS variable
for key , value in kwargs [ ' toolchain_paths ' ] . iteritems ( ) :
TOOLCHAIN_PATHS [ key ] = value
del kwargs [ ' toolchain_paths ' ]
2016-10-10 20:09:50 +00:00
try :
bin_file = build_project ( * args , * * kwargs )
ret [ ' result ' ] = True
ret [ ' bin_file ' ] = bin_file
ret [ ' kwargs ' ] = kwargs
2016-10-11 23:24:01 +00:00
except NotSupportedException , e :
ret [ ' reason ' ] = e
except ToolException , e :
ret [ ' reason ' ] = e
except KeyboardInterrupt , e :
ret [ ' reason ' ] = e
2016-10-10 20:09:50 +00:00
except :
2016-10-11 23:24:01 +00:00
# Print unhandled exceptions here
import traceback
traceback . print_exc ( file = sys . stdout )
2016-10-10 20:09:50 +00:00
return ret
2016-06-09 22:11:23 +00:00
def build_tests ( tests , base_source_paths , build_path , target , toolchain_name ,
2016-09-27 18:15:22 +00:00
clean = False , notify = None , verbose = False , jobs = 1 , macros = None ,
silent = False , report = None , properties = None ,
continue_on_build_fail = False , app_config = None ,
2017-05-25 11:09:18 +00:00
build_profile = None , stats_depth = None ) :
2016-06-09 22:11:23 +00:00
""" Given the data structure from ' find_tests ' and the typical build parameters,
build all the tests
2016-06-28 15:34:28 +00:00
2016-06-09 22:11:23 +00:00
Returns a tuple of the build result ( True or False ) followed by the test
build data structure """
2016-06-30 14:29:04 +00:00
2016-06-28 15:34:28 +00:00
execution_directory = " . "
2016-06-30 14:29:04 +00:00
base_path = norm_relative_path ( build_path , execution_directory )
2016-07-05 12:47:26 +00:00
target_name = target if isinstance ( target , str ) else target . name
2017-01-03 18:27:12 +00:00
cfg , _ , _ = get_config ( base_source_paths , target_name , toolchain_name )
2016-11-10 19:42:00 +00:00
baud_rate = 9600
if ' platform.stdio-baud-rate ' in cfg :
baud_rate = cfg [ ' platform.stdio-baud-rate ' ] . value
2016-06-09 22:11:23 +00:00
test_build = {
2016-07-05 12:47:26 +00:00
" platform " : target_name ,
2016-06-09 22:11:23 +00:00
" toolchain " : toolchain_name ,
2016-06-30 14:29:04 +00:00
" base_path " : base_path ,
2016-11-10 19:42:00 +00:00
" baud_rate " : baud_rate ,
2016-06-09 22:11:23 +00:00
" binary_type " : " bootable " ,
" tests " : { }
}
2016-06-28 15:34:28 +00:00
2016-06-09 22:11:23 +00:00
result = True
2016-06-28 15:34:28 +00:00
2016-10-10 20:09:50 +00:00
jobs_count = int ( jobs if jobs else cpu_count ( ) )
p = Pool ( processes = jobs_count )
results = [ ]
2016-06-09 22:11:23 +00:00
for test_name , test_path in tests . iteritems ( ) :
test_build_path = os . path . join ( build_path , test_path )
src_path = base_source_paths + [ test_path ]
2016-06-13 22:14:34 +00:00
bin_file = None
2016-06-27 16:59:48 +00:00
test_case_folder_name = os . path . basename ( test_path )
2017-05-25 11:09:18 +00:00
2016-10-10 20:09:50 +00:00
args = ( src_path , test_build_path , target , toolchain_name )
kwargs = {
2016-11-10 21:41:19 +00:00
' jobs ' : 1 ,
2016-10-10 20:09:50 +00:00
' clean ' : clean ,
' macros ' : macros ,
' name ' : test_case_folder_name ,
' project_id ' : test_name ,
' report ' : report ,
' properties ' : properties ,
' verbose ' : verbose ,
' app_config ' : app_config ,
2016-10-11 19:33:24 +00:00
' build_profile ' : build_profile ,
2016-11-16 23:56:40 +00:00
' silent ' : True ,
2017-05-25 11:09:18 +00:00
' toolchain_paths ' : TOOLCHAIN_PATHS ,
' stats_depth ' : stats_depth
2016-10-10 20:09:50 +00:00
}
2017-05-25 11:09:18 +00:00
2016-10-10 20:09:50 +00:00
results . append ( p . apply_async ( build_test_worker , args , kwargs ) )
2016-06-28 15:34:28 +00:00
2016-10-10 20:09:50 +00:00
p . close ( )
result = True
itr = 0
while len ( results ) :
itr + = 1
if itr > 360000 :
p . terminate ( )
p . join ( )
raise ToolException ( " Compile did not finish in 10 minutes " )
2016-10-11 23:24:01 +00:00
else :
sleep ( 0.01 )
pending = 0
for r in results :
if r . ready ( ) is True :
try :
worker_result = r . get ( )
results . remove ( r )
2016-10-10 20:09:50 +00:00
2016-10-11 23:24:01 +00:00
# Take report from the kwargs and merge it into existing report
2017-08-28 20:40:35 +00:00
if report :
report_entry = worker_result [ ' kwargs ' ] [ ' report ' ] [ target_name ] [ toolchain_name ]
for test_key in report_entry . keys ( ) :
report [ target_name ] [ toolchain_name ] [ test_key ] = report_entry [ test_key ]
2017-05-25 11:09:18 +00:00
2016-10-11 23:24:01 +00:00
# Set the overall result to a failure if a build failure occurred
2017-02-07 17:22:57 +00:00
if ( ' reason ' in worker_result and
not worker_result [ ' reason ' ] and
not isinstance ( worker_result [ ' reason ' ] , NotSupportedException ) ) :
2016-10-11 23:24:01 +00:00
result = False
break
# Adding binary path to test build result
2017-02-07 17:22:57 +00:00
if ( ' result ' in worker_result and
worker_result [ ' result ' ] and
' bin_file ' in worker_result ) :
2016-10-11 23:24:01 +00:00
bin_file = norm_relative_path ( worker_result [ ' bin_file ' ] , execution_directory )
test_build [ ' tests ' ] [ worker_result [ ' kwargs ' ] [ ' project_id ' ] ] = {
" binaries " : [
{
" path " : bin_file
}
]
}
test_key = worker_result [ ' kwargs ' ] [ ' project_id ' ] . upper ( )
2017-08-28 20:40:35 +00:00
if report :
print report [ target_name ] [ toolchain_name ] [ test_key ] [ 0 ] [ 0 ] [ ' output ' ] . rstrip ( )
2016-10-11 23:24:01 +00:00
print ' Image: %s \n ' % bin_file
except :
if p . _taskqueue . queue :
p . _taskqueue . queue . clear ( )
sleep ( 0.5 )
p . terminate ( )
p . join ( )
raise
else :
pending + = 1
if pending > = jobs_count :
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail :
if p . _taskqueue . queue :
p . _taskqueue . queue . clear ( )
sleep ( 0.5 )
p . terminate ( )
break
2016-06-28 15:34:28 +00:00
2016-10-10 20:09:50 +00:00
p . join ( )
2016-06-28 15:34:28 +00:00
2016-06-09 22:11:23 +00:00
test_builds = { }
2016-07-05 12:47:26 +00:00
test_builds [ " %s - %s " % ( target_name , toolchain_name ) ] = test_build
2016-06-28 15:34:28 +00:00
2016-06-09 22:11:23 +00:00
return result , test_builds
2016-06-28 15:34:28 +00:00
2016-06-09 22:11:23 +00:00
def test_spec_from_test_builds ( test_builds ) :
return {
" builds " : test_builds
}