2014-06-04 15:11:54 +00:00
#!/usr/bin/env python
2014-02-24 10:49:22 +00:00
"""
mbed SDK
Copyright ( c ) 2011 - 2013 ARM Limited
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : / / www . apache . org / licenses / LICENSE - 2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
Author : Przemyslaw Wirkus < Przemyslaw . wirkus @arm.com >
2014-03-12 10:59:19 +00:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Usage : singletest . py [ options ]
This script allows you to run mbed defined test cases for particular MCU ( s )
and corresponding toolchain ( s ) .
Options :
- h , - - help show this help message and exit
- i FILE , - - tests = FILE
Points to file with test specification
- M FILE , - - MUTS = FILE Points to file with MUTs specification ( overwrites
settings . py and private_settings . py )
- g , - - goanna - for - tests
Run Goanna static analyse tool for tests
- G , - - goanna - for - sdk Run Goanna static analyse tool for mbed SDK
- s , - - suppress - summary
Suppresses display of wellformatted table with test
results
- v , - - verbose Verbose mode ( pronts some extra information )
Example : singletest . py - i test_spec . json - M muts_all . json
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
File format example : test_spec . json
{
" targets " : {
" KL46Z " : [ " ARM " , " GCC_ARM " ] ,
" LPC1768 " : [ " ARM " , " GCC_ARM " , " GCC_CR " , " GCC_CS " , " IAR " ] ,
" LPC11U24 " : [ " uARM " ] ,
" NRF51822 " : [ " ARM " ]
}
}
File format example : muts_all . json
{
" 1 " : { " mcu " : " LPC1768 " ,
" port " : " COM4 " , " disk " : " J: \\ " ,
" peripherals " : [ " TMP102 " , " digital_loop " , " port_loop " , " analog_loop " , " SD " ]
} ,
" 2 " : { " mcu " : " KL25Z " ,
" port " : " COM7 " , " disk " : " G: \\ " ,
" peripherals " : [ " digital_loop " , " port_loop " , " analog_loop " ]
2014-02-24 10:49:22 +00:00
}
2014-03-12 10:59:19 +00:00
}
2014-02-24 10:49:22 +00:00
"""
2014-07-11 10:24:03 +00:00
# Check if 'prettytable' module is installed
try :
from prettytable import PrettyTable
except ImportError , e :
print " Error: Can ' t import ' prettytable ' module: %s " % e
exit ( - 1 )
# Check if 'serial' module is installed
try :
from serial import Serial
except ImportError , e :
print " Error: Can ' t import ' serial ' module: %s " % e
exit ( - 1 )
2014-02-24 10:49:22 +00:00
import sys
import json
2014-02-27 12:59:57 +00:00
import optparse
import pprint
2014-02-27 16:47:53 +00:00
import re
2014-07-10 12:15:21 +00:00
import os
2014-07-01 16:45:12 +00:00
from types import ListType
2014-07-17 13:27:05 +00:00
import random
2014-02-24 10:49:22 +00:00
2014-05-02 18:54:40 +00:00
from os . path import join , abspath , dirname , exists , basename
2014-02-24 10:49:22 +00:00
from shutil import copy
from subprocess import call
from time import sleep , time
2014-03-13 11:32:55 +00:00
from subprocess import Popen , PIPE
from threading import Thread
from Queue import Queue , Empty
2014-02-24 10:49:22 +00:00
ROOT = abspath ( join ( dirname ( __file__ ) , " .. " ) )
sys . path . insert ( 0 , ROOT )
2014-06-09 15:10:47 +00:00
2014-02-27 12:59:57 +00:00
# Imports related to mbed build pi
2014-06-03 16:35:53 +00:00
from workspace_tools . build_api import build_project , build_mbed_libs , build_lib
2014-06-09 15:10:47 +00:00
from workspace_tools . build_api import mcu_toolchain_matrix
2014-07-10 09:56:14 +00:00
from workspace_tools . build_api import get_unique_supported_toolchains
from workspace_tools . build_api import get_target_supported_toolchains
2014-02-24 10:49:22 +00:00
from workspace_tools . paths import BUILD_DIR
2014-03-13 11:32:55 +00:00
from workspace_tools . paths import HOST_TESTS
2014-02-24 10:49:22 +00:00
from workspace_tools . targets import TARGET_MAP
2014-02-25 18:04:32 +00:00
from workspace_tools . tests import TEST_MAP
2014-04-07 10:59:33 +00:00
from workspace_tools . tests import TESTS
2014-07-01 16:45:12 +00:00
from workspace_tools . libraries import LIBRARIES , LIBRARY_MAP
2014-06-03 16:35:53 +00:00
2014-02-24 10:49:22 +00:00
# Be sure that the tools directory is in the search path
ROOT = abspath ( join ( dirname ( __file__ ) , " .. " ) )
sys . path . insert ( 0 , ROOT )
2014-02-27 12:59:57 +00:00
# Imports related to mbed build pi
2014-02-25 18:04:32 +00:00
from workspace_tools . settings import MUTs
2014-02-24 10:49:22 +00:00
2014-03-13 11:32:55 +00:00
class ProcessObserver ( Thread ) :
def __init__ ( self , proc ) :
Thread . __init__ ( self )
self . proc = proc
self . queue = Queue ( )
self . daemon = True
self . active = True
self . start ( )
def run ( self ) :
while self . active :
c = self . proc . stdout . read ( 1 )
self . queue . put ( c )
def stop ( self ) :
self . active = False
try :
self . proc . terminate ( )
except Exception , _ :
pass
2014-02-27 17:00:21 +00:00
class SingleTestRunner ( object ) :
2014-02-24 10:49:22 +00:00
""" Object wrapper for single test run which may involve multiple MUTs. """
2014-07-10 13:50:35 +00:00
RE_DETECT_TESTCASE_RESULT = None
2014-07-10 16:10:13 +00:00
# Return codes for test script
2014-06-12 16:46:05 +00:00
TEST_RESULT_OK = " OK "
TEST_RESULT_FAIL = " FAIL "
2014-03-11 17:55:10 +00:00
TEST_RESULT_ERROR = " ERROR "
2014-02-27 16:47:53 +00:00
TEST_RESULT_UNDEF = " UNDEF "
2014-07-10 12:15:21 +00:00
TEST_RESULT_IOERR_COPY = " IOERR_COPY "
TEST_RESULT_IOERR_DISK = " IOERR_DISK "
2014-07-10 13:50:35 +00:00
TEST_RESULT_IOERR_SERIAL = " IOERR_SERIAL "
2014-07-10 12:15:21 +00:00
TEST_RESULT_TIMEOUT = " TIMEOUT "
2014-07-21 09:03:04 +00:00
TEST_RESULT_NO_IMAGE = " NO_IMAGE "
2014-02-27 17:00:21 +00:00
2014-07-10 16:10:13 +00:00
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [ ] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = { } # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
2014-02-27 16:47:53 +00:00
# mbed test suite -> SingleTestRunner
2014-03-11 17:55:10 +00:00
TEST_RESULT_MAPPING = { " success " : TEST_RESULT_OK ,
" failure " : TEST_RESULT_FAIL ,
2014-07-10 13:50:35 +00:00
" error " : TEST_RESULT_ERROR ,
" ioerr_copy " : TEST_RESULT_IOERR_COPY ,
" ioerr_disk " : TEST_RESULT_IOERR_DISK ,
" ioerr_serial " : TEST_RESULT_IOERR_SERIAL ,
" timeout " : TEST_RESULT_TIMEOUT ,
2014-07-21 09:03:04 +00:00
" no_image " : TEST_RESULT_NO_IMAGE ,
2014-07-10 13:50:35 +00:00
" end " : TEST_RESULT_UNDEF }
2014-02-27 17:00:21 +00:00
2014-07-10 16:10:13 +00:00
def __init__ ( self , _global_loops_count = 1 , _test_loops_list = " " ) :
2014-02-27 17:00:21 +00:00
pattern = " \\ { ( " + " | " . join ( self . TEST_RESULT_MAPPING . keys ( ) ) + " ) \\ } "
2014-07-10 13:50:35 +00:00
self . RE_DETECT_TESTCASE_RESULT = re . compile ( pattern )
2014-07-10 16:10:13 +00:00
try :
_global_loops_count = int ( _global_loops_count )
except :
_global_loops_count = 1
if _global_loops_count < 1 :
_global_loops_count = 1
self . GLOBAL_LOOPS_COUNT = _global_loops_count
self . TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else [ ]
self . TEST_LOOPS_DICT = self . test_loop_list_to_dict ( _test_loops_list )
def test_loop_list_to_dict ( self , test_loops_str ) :
""" Transforms test_id=X,test_id=X,test_id=X into dictionary { test_id : test_id_loops_count} """
result = { }
if test_loops_str :
test_loops = test_loops_str . split ( ' , ' )
for test_loop in test_loops :
test_loop_count = test_loop . split ( ' = ' )
if len ( test_loop_count ) == 2 :
_test_id , _test_loops = test_loop_count
try :
_test_loops = int ( _test_loops )
except :
continue
result [ _test_id ] = _test_loops
return result
def get_test_loop_count ( self , test_id ) :
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value . """
result = self . GLOBAL_LOOPS_COUNT
if test_id in self . TEST_LOOPS_DICT :
result = self . TEST_LOOPS_DICT [ test_id ]
return result
2014-02-25 18:04:32 +00:00
2014-06-26 16:43:27 +00:00
def file_copy_method_selector ( self , image_path , disk , copy_method ) :
2014-07-11 09:33:13 +00:00
""" Copy file depending on method you want to use. Handles exception
and return code from shell copy commands . """
2014-07-10 12:15:21 +00:00
result = True
resutl_msg = " "
2014-06-26 16:43:27 +00:00
if copy_method == " cp " or copy_method == " copy " or copy_method == " xcopy " :
2014-07-10 12:15:21 +00:00
cmd = [ copy_method ,
image_path . encode ( ' ascii ' , ' ignore ' ) ,
disk . encode ( ' ascii ' , ' ignore ' ) + basename ( image_path ) . encode ( ' ascii ' , ' ignore ' ) ]
try :
ret = call ( cmd , shell = True )
if ret :
resutl_msg = " Return code: %d . Command: " % ret + " " . join ( cmd )
result = False
except Exception , e :
resutl_msg = e
result = False
2014-06-26 16:43:27 +00:00
else :
2014-07-10 12:15:21 +00:00
copy_method = " shutils.copy() "
2014-06-26 16:43:27 +00:00
# Default python method
2014-07-10 12:15:21 +00:00
try :
copy ( image_path , disk )
except Exception , e :
resutl_msg = e
result = False
return result , resutl_msg , copy_method
2014-06-26 16:43:27 +00:00
2014-07-10 10:24:02 +00:00
def delete_file ( file_path ) :
""" Remove file from the system """
result = True
resutl_msg = " "
try :
os . remove ( file_path )
except Exception , e :
resutl_msg = e
result = False
return result , resutl_msg
2014-07-10 14:58:17 +00:00
def handle ( self , test_spec , target_name , toolchain_name , test_loops = 1 ) :
""" Function determines MUT ' s mbed disk/port and copies binary to
target . Test is being invoked afterwards . """
2014-02-24 10:49:22 +00:00
data = json . loads ( test_spec )
# Get test information, image and test timeout
test_id = data [ ' test_id ' ]
test = TEST_MAP [ test_id ]
test_description = TEST_MAP [ test_id ] . get_description ( )
image = data [ " image " ]
duration = data . get ( " duration " , 10 )
# Find a suitable MUT:
mut = None
for id , m in MUTs . iteritems ( ) :
if m [ ' mcu ' ] == data [ ' mcu ' ] :
mut = m
break
if mut is None :
2014-07-21 09:03:04 +00:00
print " Error: No Mbed available: MUT[ %s ] " % data [ ' mcu ' ]
return None
2014-02-24 10:49:22 +00:00
disk = mut [ ' disk ' ]
port = mut [ ' port ' ]
2014-02-27 17:00:21 +00:00
target_by_mcu = TARGET_MAP [ mut [ ' mcu ' ] ]
2014-02-24 10:49:22 +00:00
# Program
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
if not exists ( image_path ) :
print " Error: Image file does not exist: %s " % image_path
elapsed_time = 0
2014-07-21 09:03:04 +00:00
test_result = self . TEST_RESULT_NO_IMAGE
2014-02-24 10:49:22 +00:00
return ( test_result , target_name , toolchain_name ,
2014-07-21 09:03:04 +00:00
test_id , test_description , round ( elapsed_time , 2 ) ,
duration , self . shape_test_loop_ok_result_count ( [ ] ) )
2014-02-24 10:49:22 +00:00
# Program MUT with proper image file
2014-06-04 14:26:16 +00:00
if not disk . endswith ( ' / ' ) and not disk . endswith ( ' \\ ' ) :
2014-06-03 13:53:58 +00:00
disk + = ' / '
2014-07-10 14:58:17 +00:00
# Tests can be looped so test results must be stored for the same test
test_all_result = [ ]
for test_index in range ( test_loops ) :
# Choose one method of copy files to mbed virtual drive
_copy_res , _err_msg , _copy_method = self . file_copy_method_selector ( image_path , disk , opts . copy_method )
2014-02-24 10:49:22 +00:00
2014-07-10 12:15:21 +00:00
# Host test execution
start_host_exec_time = time ( )
2014-07-16 14:00:41 +00:00
single_test_result = self . TEST_RESULT_UNDEF # singe test run result
2014-07-10 14:58:17 +00:00
if not _copy_res : # Serial port copy error
2014-07-16 14:00:41 +00:00
single_test_result = " IOERR_COPY "
2014-07-10 14:58:17 +00:00
print " Error: Copy method ' %s ' . %s " % ( _copy_method , _err_msg )
else :
# Copy Extra Files
if not target_by_mcu . is_disk_virtual and test . extra_files :
for f in test . extra_files :
copy ( f , disk )
sleep ( target_by_mcu . program_cycle_s ( ) )
# Host test execution
start_host_exec_time = time ( )
2014-07-18 10:43:11 +00:00
host_test_verbose = opts . verbose_test_result_only or opts . verbose
single_test_result = self . run_host_test ( test . host_test , disk , port , duration , host_test_verbose )
2014-07-16 14:00:41 +00:00
# Store test result
test_all_result . append ( single_test_result )
2014-07-10 14:58:17 +00:00
elapsed_time = time ( ) - start_host_exec_time
2014-07-16 14:00:41 +00:00
print print_test_result ( single_test_result , target_name , toolchain_name ,
2014-07-10 14:58:17 +00:00
test_id , test_description , elapsed_time , duration )
2014-07-10 17:05:01 +00:00
return ( self . shape_global_test_loop_result ( test_all_result ) , target_name , toolchain_name ,
test_id , test_description , round ( elapsed_time , 2 ) ,
duration , self . shape_test_loop_ok_result_count ( test_all_result ) )
def shape_test_loop_ok_result_count ( self , test_all_result ) :
""" Reformats list of results to simple string """
test_loop_count = len ( test_all_result )
test_loop_ok_result = test_all_result . count ( self . TEST_RESULT_OK )
return " %d / %d " % ( test_loop_ok_result , test_loop_count )
def shape_global_test_loop_result ( self , test_all_result ) :
""" Reformats list of results to simple string """
result = self . TEST_RESULT_FAIL
if all ( test_all_result [ 0 ] == res for res in test_all_result ) :
result = test_all_result [ 0 ]
return result
2014-02-24 10:49:22 +00:00
2014-03-17 11:59:25 +00:00
def run_host_test ( self , name , disk , port , duration , verbose = False , extra_serial = " " ) :
2014-07-11 09:33:13 +00:00
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution . """
2014-03-13 11:32:55 +00:00
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = [ " python " , " %s .py " % name , ' -p ' , port , ' -d ' , disk , ' -t ' , str ( duration ) , " -e " , extra_serial ]
proc = Popen ( cmd , stdout = PIPE , cwd = HOST_TESTS )
obs = ProcessObserver ( proc )
start = time ( )
line = ' '
output = [ ]
while ( time ( ) - start ) < duration :
try :
c = obs . queue . get ( block = True , timeout = 1 )
except Empty , _ :
c = None
if c :
output . append ( c )
# Give the mbed under test a way to communicate the end of the test
if c in [ ' \n ' , ' \r ' ] :
if ' {end} ' in line : break
line = ' '
else :
line + = c
# Stop test process
obs . stop ( )
2014-03-17 11:59:25 +00:00
# Handle verbose mode
if verbose :
print " Test::Output::Start "
print " " . join ( output )
print " Test::Output::Finish "
2014-03-13 11:32:55 +00:00
# Parse test 'output' data
2014-07-14 12:08:36 +00:00
result = self . TEST_RESULT_TIMEOUT
2014-03-13 11:32:55 +00:00
for line in " " . join ( output ) . splitlines ( ) :
2014-07-10 13:50:35 +00:00
search_result = self . RE_DETECT_TESTCASE_RESULT . search ( line )
2014-03-13 11:32:55 +00:00
if search_result and len ( search_result . groups ( ) ) :
result = self . TEST_RESULT_MAPPING [ search_result . groups ( 0 ) [ 0 ] ]
break
return result
2014-02-27 17:00:21 +00:00
def is_peripherals_available ( target_mcu_name , peripherals = None ) :
2014-02-27 16:47:53 +00:00
""" Checks if specified target should run specific peripheral test case. """
if peripherals is not None :
peripherals = set ( peripherals )
for id , mut in MUTs . iteritems ( ) :
2014-02-27 17:00:21 +00:00
# Target MCU name check
if mut [ " mcu " ] != target_mcu_name :
2014-02-27 16:47:53 +00:00
continue
# Peripherals check
if peripherals is not None :
if ' peripherals ' not in mut :
continue
if not peripherals . issubset ( set ( mut [ ' peripherals ' ] ) ) :
continue
return True
return False
def print_test_result ( test_result , target_name , toolchain_name ,
test_id , test_description , elapsed_time , duration ) :
2014-03-07 17:31:33 +00:00
""" Use specific convention to print test result and related data. """
2014-02-27 16:47:53 +00:00
tokens = [ ]
tokens . append ( " TargetTest " )
tokens . append ( target_name )
tokens . append ( toolchain_name )
tokens . append ( test_id )
tokens . append ( test_description )
separator = " :: "
2014-02-28 11:33:57 +00:00
time_info = " in %.2f of %d sec " % ( round ( elapsed_time , 2 ) , duration )
2014-02-27 16:47:53 +00:00
result = separator . join ( tokens ) + " [ " + test_result + " ] " + time_info
return result
2014-02-24 10:49:22 +00:00
def shape_test_request ( mcu , image_path , test_id , duration = 10 ) :
""" Function prepares JOSN structure describing test specification. """
test_spec = {
" mcu " : mcu ,
" image " : image_path ,
" duration " : duration ,
" test_id " : test_id ,
}
return json . dumps ( test_spec )
2014-02-27 14:27:58 +00:00
def get_json_data_from_file ( json_spec_filename , verbose = False ) :
2014-03-03 12:15:46 +00:00
""" Loads from file JSON formatted string to data structure """
2014-02-27 14:27:58 +00:00
result = None
2014-02-27 14:06:47 +00:00
try :
2014-02-27 14:27:58 +00:00
with open ( json_spec_filename ) as data_file :
2014-02-27 14:06:47 +00:00
try :
2014-02-27 14:27:58 +00:00
result = json . load ( data_file )
2014-02-27 14:06:47 +00:00
except ValueError as json_error_msg :
2014-02-27 14:27:58 +00:00
result = None
2014-07-09 13:51:54 +00:00
print " Error in ' %s ' file: %s " % ( json_spec_filename , json_error_msg )
2014-02-27 14:06:47 +00:00
except IOError as fileopen_error_msg :
print " Error: %s " % ( fileopen_error_msg )
2014-02-27 14:27:58 +00:00
if verbose and result :
2014-02-27 14:06:47 +00:00
pp = pprint . PrettyPrinter ( indent = 4 )
2014-02-27 14:27:58 +00:00
pp . pprint ( result )
return result
2014-02-27 14:06:47 +00:00
2014-07-09 13:51:54 +00:00
def print_muts_configuration_from_json ( json_data , join_delim = " , " ) :
""" Prints MUTs configuration passed to test script for verboseness. """
muts_info_cols = [ ]
# We need to check all unique properties for each defined MUT
for k in json_data :
mut_info = json_data [ k ]
for property in mut_info :
if property not in muts_info_cols :
muts_info_cols . append ( property )
# Prepare pretty table object to display all MUTs
pt_cols = [ " index " ] + muts_info_cols
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
# Add rows to pretty print object
for k in json_data :
row = [ k ]
mut_info = json_data [ k ]
for col in muts_info_cols :
cell_val = mut_info [ col ] if col in mut_info else None
if type ( cell_val ) == ListType :
cell_val = join_delim . join ( cell_val )
row . append ( cell_val )
pt . add_row ( row )
return pt . get_string ( )
def print_test_configuration_from_json ( json_data , join_delim = " , " ) :
""" Prints test specification configuration passed to test script for verboseness. """
toolchains_info_cols = [ ]
# We need to check all toolchains for each device
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
toolchains = targets [ target ]
for toolchain in toolchains :
if toolchain not in toolchains_info_cols :
toolchains_info_cols . append ( toolchain )
# Prepare pretty table object to display test specification
pt_cols = [ " mcu " ] + sorted ( toolchains_info_cols )
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
2014-07-10 09:56:14 +00:00
# { target : [conflicted toolchains] }
toolchain_conflicts = { }
2014-07-09 13:51:54 +00:00
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
2014-07-10 09:56:14 +00:00
target_supported_toolchains = get_target_supported_toolchains ( target )
if not target_supported_toolchains :
target_supported_toolchains = [ ]
target_name = target if target in TARGET_MAP else " %s * " % target
row = [ target_name ]
2014-07-09 13:51:54 +00:00
toolchains = targets [ target ]
for toolchain in toolchains_info_cols :
2014-07-10 09:56:14 +00:00
# Check for conflicts
conflict = False
if toolchain in toolchains :
if toolchain not in target_supported_toolchains :
conflict = True
if target not in toolchain_conflicts :
toolchain_conflicts [ target ] = [ ]
toolchain_conflicts [ target ] . append ( toolchain )
# Add marker inside table about target usage / conflict
2014-07-09 13:51:54 +00:00
cell_val = ' Yes ' if toolchain in toolchains else ' - '
2014-07-10 09:56:14 +00:00
if conflict :
cell_val + = ' * '
2014-07-09 13:51:54 +00:00
row . append ( cell_val )
pt . add_row ( row )
2014-07-10 09:56:14 +00:00
# generate result string
result = pt . get_string ( ) # Test specification table
if toolchain_conflicts : # Print conflicts if the exist
result + = " \n "
result + = " Toolchain conflicts: \n "
for target in toolchain_conflicts :
if target not in TARGET_MAP :
result + = " \t * Target %s unknown \n " % ( target )
conflict_target_list = " , " . join ( toolchain_conflicts [ target ] )
sufix = ' s ' if len ( toolchain_conflicts [ target ] ) > 1 else ' '
result + = " \t * Target %s does not support %s toolchain %s \n " % ( target , conflict_target_list , sufix )
return result
2014-07-09 13:51:54 +00:00
2014-07-02 11:02:36 +00:00
def get_avail_tests_summary_table ( cols = None , result_summary = True , join_delim = ' , ' ) :
2014-07-11 09:33:13 +00:00
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality . Allows test suite user to
see test cases . """
2014-06-12 16:46:05 +00:00
# get all unique test ID prefixes
unique_test_id = [ ]
for test in TESTS :
split = test [ ' id ' ] . split ( ' _ ' ) [ : - 1 ]
test_id_prefix = ' _ ' . join ( split )
if test_id_prefix not in unique_test_id :
unique_test_id . append ( test_id_prefix )
unique_test_id . sort ( )
counter_dict_test_id_types = dict ( ( t , 0 ) for t in unique_test_id )
counter_dict_test_id_types_all = dict ( ( t , 0 ) for t in unique_test_id )
2014-07-02 14:01:02 +00:00
test_properties = [ ' id ' , ' automated ' , ' description ' , ' peripherals ' , ' host_test ' , ' duration ' ] if cols is None else cols
2014-06-12 16:46:05 +00:00
# All tests status table print
pt = PrettyTable ( test_properties )
for col in test_properties :
pt . align [ col ] = " l "
pt . align [ ' duration ' ] = " r "
counter_all = 0
counter_automated = 0
pt . padding_width = 1 # One space between column edges and contents (default)
2014-07-02 11:02:36 +00:00
for test_id in TEST_MAP :
2014-06-12 16:46:05 +00:00
row = [ ]
2014-07-02 11:02:36 +00:00
test = TEST_MAP [ test_id ]
split = test_id . split ( ' _ ' ) [ : - 1 ]
2014-06-12 16:46:05 +00:00
test_id_prefix = ' _ ' . join ( split )
2014-04-07 10:59:33 +00:00
2014-06-12 16:46:05 +00:00
for col in test_properties :
2014-07-02 11:02:36 +00:00
col_value = test [ col ]
if type ( test [ col ] ) == ListType :
col_value = join_delim . join ( test [ col ] )
elif test [ col ] == None :
col_value = " - "
row . append ( col_value )
if test [ ' automated ' ] == True :
2014-06-12 16:46:05 +00:00
counter_dict_test_id_types [ test_id_prefix ] + = 1
counter_automated + = 1
pt . add_row ( row )
# Update counters
counter_all + = 1
counter_dict_test_id_types_all [ test_id_prefix ] + = 1
2014-07-11 09:33:13 +00:00
result = pt . get_string ( )
result + = " \n \n "
2014-06-12 16:46:05 +00:00
2014-07-02 11:02:36 +00:00
if result_summary :
# Automation result summary
test_id_cols = [ ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
2014-06-12 16:46:05 +00:00
2014-07-02 11:02:36 +00:00
percent_progress = round ( 100.0 * counter_automated / float ( counter_all ) , 1 )
2014-05-14 17:26:26 +00:00
str_progress = progress_bar ( percent_progress , 75 )
2014-07-02 11:02:36 +00:00
pt . add_row ( [ counter_automated , counter_all , percent_progress , str_progress ] )
2014-07-11 09:33:13 +00:00
result + = " Automation coverage: \n "
result + = pt . get_string ( )
result + = " \n \n "
2014-07-02 11:02:36 +00:00
# Test automation coverage table print
test_id_cols = [ ' id ' , ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' id ' ] = " l "
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
for unique_id in unique_test_id :
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round ( 100.0 * counter_dict_test_id_types [ unique_id ] / float ( counter_dict_test_id_types_all [ unique_id ] ) , 1 )
str_progress = progress_bar ( percent_progress , 75 )
row = [ unique_id ,
counter_dict_test_id_types [ unique_id ] ,
counter_dict_test_id_types_all [ unique_id ] ,
percent_progress ,
" [ " + str_progress + " ] " ]
pt . add_row ( row )
2014-07-11 09:33:13 +00:00
result + = " Test automation coverage: \n "
result + = pt . get_string ( )
result + = " \n \n "
return result
2014-04-07 10:59:33 +00:00
2014-04-09 12:00:45 +00:00
def progress_bar ( percent_progress , saturation = 0 ) :
""" This function creates progress bar with optional simple saturation mark """
step = int ( percent_progress / 2 ) # Scale by to (scale: 1 - 50)
str_progress = ' # ' * step + ' . ' * int ( 50 - step )
c = ' ! ' if str_progress [ 38 ] == ' . ' else ' | '
if ( saturation > 0 ) :
saturation = saturation / 2
str_progress = str_progress [ : saturation ] + c + str_progress [ saturation : ]
return str_progress
2014-06-12 16:46:05 +00:00
def get_unique_value_from_summary ( test_summary , index ) :
""" Gets list of unique target names """
result = [ ]
for test in test_summary :
target_name = test [ index ]
if target_name not in result :
result . append ( target_name )
return sorted ( result )
def get_unique_value_from_summary_ext ( test_summary , index_key , index_val ) :
""" Gets list of unique target names and return dictionary """
result = { }
for test in test_summary :
key = test [ index_key ]
val = test [ index_val ]
if key not in result :
result [ key ] = val
return result
def generate_test_summary_by_target ( test_summary ) :
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix """
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary ( test_summary , TARGET_INDEX )
unique_tests = get_unique_value_from_summary ( test_summary , TEST_INDEX )
unique_test_desc = get_unique_value_from_summary_ext ( test_summary , TEST_INDEX , DESC_INDEX )
unique_toolchains = get_unique_value_from_summary ( test_summary , TOOLCHAIN_INDEX )
2014-07-22 09:59:31 +00:00
result = " Test summary: \n "
2014-06-12 16:46:05 +00:00
result_dict = { } # test : { toolchain : result }
for target in unique_targets :
for test in test_summary :
if test [ TEST_INDEX ] not in result_dict :
result_dict [ test [ TEST_INDEX ] ] = { }
result_dict [ test [ TEST_INDEX ] ] [ test [ TOOLCHAIN_INDEX ] ] = test [ RESULT_INDEX ]
pt_cols = [ " Target " , " Test ID " , " Test Description " ] + unique_toolchains
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
pt . padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests :
test_results = result_dict [ test ]
row = [ target , test , unique_test_desc [ test ] ]
for toolchain in unique_toolchains :
row . append ( test_results [ toolchain ] )
pt . add_row ( row )
2014-07-22 09:59:31 +00:00
result + = pt . get_string ( )
result + = " \n \n "
2014-06-12 16:46:05 +00:00
return result
def generate_test_summary ( test_summary ) :
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across """
result = " Test summary: \n "
# Pretty table package is used to print results
pt = PrettyTable ( [ " Result " , " Target " , " Toolchain " , " Test ID " , " Test Description " ,
2014-07-10 17:05:01 +00:00
" Elapsed Time (sec) " , " Timeout (sec) " , " Loops " ] )
2014-06-12 16:46:05 +00:00
pt . align [ " Result " ] = " l " # Left align
pt . align [ " Target " ] = " l " # Left align
pt . align [ " Toolchain " ] = " l " # Left align
pt . align [ " Test ID " ] = " l " # Left align
pt . align [ " Test Description " ] = " l " # Left align
pt . padding_width = 1 # One space between column edges and contents (default)
2014-07-10 12:15:21 +00:00
result_dict = { single_test . TEST_RESULT_OK : 0 ,
single_test . TEST_RESULT_FAIL : 0 ,
single_test . TEST_RESULT_ERROR : 0 ,
single_test . TEST_RESULT_UNDEF : 0 ,
single_test . TEST_RESULT_IOERR_COPY : 0 ,
single_test . TEST_RESULT_IOERR_DISK : 0 ,
2014-07-14 12:08:36 +00:00
single_test . TEST_RESULT_IOERR_SERIAL : 0 ,
2014-07-21 09:03:04 +00:00
single_test . TEST_RESULT_NO_IMAGE : 0 ,
2014-07-10 12:15:21 +00:00
single_test . TEST_RESULT_TIMEOUT : 0 }
2014-06-12 16:46:05 +00:00
for test in test_summary :
if test [ 0 ] in result_dict :
result_dict [ test [ 0 ] ] + = 1
pt . add_row ( test )
result + = pt . get_string ( )
result + = " \n "
# Print result count
result + = " Result: " + ' / ' . join ( [ ' %s %s ' % ( value , key ) for ( key , value ) in { k : v for k , v in result_dict . items ( ) if v != 0 } . iteritems ( ) ] )
result + = " \n "
return result
2014-02-24 10:49:22 +00:00
if __name__ == ' __main__ ' :
2014-02-27 12:59:57 +00:00
# Command line options
parser = optparse . OptionParser ( )
parser . add_option ( ' -i ' , ' --tests ' ,
dest = ' test_spec_filename ' ,
metavar = " FILE " ,
help = ' Points to file with test specification ' )
2014-02-27 14:27:58 +00:00
parser . add_option ( ' -M ' , ' --MUTS ' ,
dest = ' muts_spec_filename ' ,
metavar = " FILE " ,
help = ' Points to file with MUTs specification (overwrites settings.py and private_settings.py) ' )
2014-03-10 17:14:26 +00:00
parser . add_option ( ' -g ' , ' --goanna-for-tests ' ,
dest = ' goanna_for_tests ' ,
metavar = False ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for tests ' )
2014-03-10 17:24:27 +00:00
parser . add_option ( ' -G ' , ' --goanna-for-sdk ' ,
dest = ' goanna_for_mbed_sdk ' ,
metavar = False ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for mbed SDK ' )
2014-02-27 12:59:57 +00:00
parser . add_option ( ' -s ' , ' --suppress-summary ' ,
dest = ' suppress_summary ' ,
default = False ,
action = " store_true " ,
help = ' Suppresses display of wellformatted table with test results ' )
2014-06-12 16:46:05 +00:00
parser . add_option ( ' -t ' , ' --test-summary ' ,
dest = ' test_x_toolchain_summary ' ,
default = False ,
action = " store_true " ,
2014-06-23 09:42:33 +00:00
help = ' Displays wellformatted table with test x toolchain test result per target ' )
2014-06-12 16:46:05 +00:00
2014-04-07 10:59:33 +00:00
parser . add_option ( ' -r ' , ' --test-automation-report ' ,
dest = ' test_automation_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all tests and exits ' )
2014-07-02 11:02:36 +00:00
parser . add_option ( ' -R ' , ' --test-case-report ' ,
dest = ' test_case_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all test cases and exits ' )
2014-06-12 15:03:43 +00:00
parser . add_option ( ' -P ' , ' --only-peripherals ' ,
2014-04-08 15:01:00 +00:00
dest = ' test_only_peripheral ' ,
default = False ,
action = " store_true " ,
help = ' Test only peripheral declared for MUT and skip common tests ' )
2014-06-12 15:03:43 +00:00
parser . add_option ( ' -C ' , ' --only-commons ' ,
dest = ' test_only_common ' ,
default = False ,
action = " store_true " ,
2014-06-12 16:46:05 +00:00
help = ' Test only board internals. Skip perpherials tests and perform common tests. ' )
2014-06-12 15:03:43 +00:00
2014-06-26 16:43:27 +00:00
parser . add_option ( ' -c ' , ' --copy-method ' ,
dest = ' copy_method ' ,
help = " You can choose which copy method you want to use put bin in mbed. You can choose from ' cp ' , ' copy ' , ' xcopy ' . Default is python shutils.copy method. " )
2014-05-14 17:26:26 +00:00
parser . add_option ( ' -n ' , ' --test-by-names ' ,
dest = ' test_by_names ' ,
help = ' Runs only test enumerated it this switch ' )
2014-06-09 15:10:47 +00:00
parser . add_option ( " -S " , " --supported-toolchains " ,
action = " store_true " ,
dest = " supported_toolchains " ,
default = False ,
help = " Displays supported matrix of MCUs and toolchains " )
2014-07-01 16:45:12 +00:00
parser . add_option ( " -O " , " --only-build " ,
action = " store_true " ,
dest = " only_build_tests " ,
default = False ,
help = " Only build tests, skips actual test procedures (flashing etc.) " )
2014-07-09 13:51:54 +00:00
parser . add_option ( ' ' , ' --config ' ,
dest = ' verbose_test_configuration_only ' ,
default = False ,
action = " store_true " ,
help = ' Displays full test specification and MUTs configration and exits ' )
2014-07-10 14:58:17 +00:00
parser . add_option ( ' ' , ' --loops ' ,
dest = ' test_loops_list ' ,
help = ' Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3 ' )
parser . add_option ( ' ' , ' --global-loops ' ,
dest = ' test_global_loops_value ' ,
help = ' Set global number of test loops per test. Default value is set 1 ' )
2014-07-14 16:45:01 +00:00
parser . add_option ( ' ' , ' --firmware-name ' ,
dest = ' firmware_global_name ' ,
help = ' Set global name for all produced projects. E.g. you can call all test binaries firmware.bin ' )
2014-07-17 13:27:05 +00:00
parser . add_option ( ' -u ' , ' --shuffle-tests ' ,
dest = ' shuffle_test_order ' ,
default = False ,
action = " store_true " ,
help = ' Shuffles test execution order ' )
2014-07-15 09:12:54 +00:00
parser . add_option ( ' ' , ' --verbose-skipped ' ,
dest = ' verbose_skipped_tests ' ,
default = False ,
action = " store_true " ,
help = ' Prints some extra information about skipped tests ' )
2014-07-18 10:43:11 +00:00
parser . add_option ( ' -V ' , ' --verbose-test-result ' ,
dest = ' verbose_test_result_only ' ,
default = False ,
action = " store_true " ,
help = ' Prints test serial output ' )
2014-02-27 12:59:57 +00:00
parser . add_option ( ' -v ' , ' --verbose ' ,
dest = ' verbose ' ,
default = False ,
action = " store_true " ,
2014-06-23 09:42:33 +00:00
help = ' Verbose mode (prints some extra information) ' )
2014-02-27 12:59:57 +00:00
2014-02-27 16:47:53 +00:00
parser . description = """ This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s). """
2014-04-09 12:00:45 +00:00
parser . epilog = """ Example: singletest.py -i test_spec.json -M muts_all.json """
2014-02-27 16:47:53 +00:00
2014-02-27 12:59:57 +00:00
( opts , args ) = parser . parse_args ( )
2014-02-24 10:49:22 +00:00
2014-04-07 10:59:33 +00:00
# Print summary / information about automation test status
if opts . test_automation_report :
2014-07-11 09:33:13 +00:00
print get_avail_tests_summary_table ( )
2014-07-02 11:02:36 +00:00
exit ( 0 )
# Print summary / information about automation test status
if opts . test_case_report :
test_case_report_cols = [ ' id ' , ' automated ' , ' description ' , ' peripherals ' , ' host_test ' , ' duration ' , ' source_dir ' ]
2014-07-11 09:33:13 +00:00
print get_avail_tests_summary_table ( cols = test_case_report_cols , result_summary = False , join_delim = ' \n ' )
2014-04-07 10:59:33 +00:00
exit ( 0 )
2014-06-09 15:10:47 +00:00
# Only prints matrix of supported toolchains
if opts . supported_toolchains :
mcu_toolchain_matrix ( )
exit ( 0 )
2014-02-27 12:59:57 +00:00
# Open file with test specification
2014-02-27 14:27:58 +00:00
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
2014-07-09 13:51:54 +00:00
test_spec = get_json_data_from_file ( opts . test_spec_filename ) if opts . test_spec_filename else None
2014-02-27 12:59:57 +00:00
if test_spec is None :
parser . print_help ( )
exit ( - 1 )
2014-02-27 16:47:53 +00:00
# Get extra MUTs if applicable
2014-02-27 14:27:58 +00:00
if opts . muts_spec_filename :
2014-07-09 13:51:54 +00:00
MUTs = get_json_data_from_file ( opts . muts_spec_filename )
2014-02-27 14:27:58 +00:00
if MUTs is None :
parser . print_help ( )
exit ( - 1 )
2014-02-27 16:47:53 +00:00
2014-07-09 13:51:54 +00:00
# Only prints read MUTs configuration
if MUTs and opts . verbose_test_configuration_only :
print " MUTs configuration in %s : " % opts . muts_spec_filename
print print_muts_configuration_from_json ( MUTs )
print
print " Test specification in %s : " % opts . test_spec_filename
print print_test_configuration_from_json ( test_spec )
exit ( 0 )
# Verbose test specification and MUTs configuration
if MUTs and opts . verbose :
print print_muts_configuration_from_json ( MUTs )
if test_spec and opts . verbose :
print print_test_configuration_from_json ( test_spec )
2014-02-27 12:59:57 +00:00
# Magic happens here... ;)
start = time ( )
2014-07-10 16:10:13 +00:00
single_test = SingleTestRunner ( _global_loops_count = opts . test_global_loops_value , _test_loops_list = opts . test_loops_list )
2014-02-24 10:49:22 +00:00
clean = test_spec . get ( ' clean ' , False )
test_ids = test_spec . get ( ' test_ids ' , [ ] )
groups = test_spec . get ( ' test_groups ' , [ ] )
# Here we store test results
test_summary = [ ]
for target , toolchains in test_spec [ ' targets ' ] . iteritems ( ) :
for toolchain in toolchains :
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
2014-07-10 12:15:21 +00:00
if target not in TARGET_MAP :
print ' Skipped tests for %s target. Target platform not found ' % ( target )
continue
2014-02-24 10:49:22 +00:00
T = TARGET_MAP [ target ]
2014-03-10 17:24:27 +00:00
build_mbed_libs_options = [ " analyze " ] if opts . goanna_for_mbed_sdk else None
2014-07-09 13:51:54 +00:00
build_mbed_libs_result = build_mbed_libs ( T , toolchain , options = build_mbed_libs_options )
if not build_mbed_libs_result :
print ' Skipped tests for %s target. Toolchain %s is not yet supported for this target ' % ( T . name , toolchain )
continue
2014-02-24 10:49:22 +00:00
build_dir = join ( BUILD_DIR , " test " , target , toolchain )
2014-07-17 13:27:05 +00:00
# Enumerate through all tests
test_map_keys = TEST_MAP . keys ( )
if opts . shuffle_test_order :
random . shuffle ( test_map_keys )
for test_id in test_map_keys :
test = TEST_MAP [ test_id ]
2014-05-14 17:26:26 +00:00
if opts . test_by_names and test_id not in opts . test_by_names . split ( ' , ' ) :
continue
2014-02-24 10:49:22 +00:00
if test_ids and test_id not in test_ids :
continue
2014-04-08 15:01:00 +00:00
if opts . test_only_peripheral and not test . peripherals :
2014-07-15 09:12:54 +00:00
if opts . verbose_skipped_tests :
2014-04-09 14:30:26 +00:00
print " TargetTest:: %s ::NotPeripheralTestSkipped() " % ( target )
2014-04-08 15:01:00 +00:00
continue
2014-06-12 15:03:43 +00:00
if opts . test_only_common and test . peripherals :
2014-07-15 09:12:54 +00:00
if opts . verbose_skipped_tests :
2014-06-12 15:03:43 +00:00
print " TargetTest:: %s ::PeripheralTestSkipped() " % ( target )
continue
2014-02-24 10:49:22 +00:00
if test . automated and test . is_supported ( target , toolchain ) :
2014-02-27 16:47:53 +00:00
if not is_peripherals_available ( target , test . peripherals ) :
2014-07-15 09:12:54 +00:00
if opts . verbose_skipped_tests :
2014-07-08 13:29:49 +00:00
test_peripherals = test . peripherals if test . peripherals else [ ]
print " TargetTest:: %s ::TestSkipped( %s ) " % ( target , " , " . join ( test_peripherals ) )
2014-02-25 18:04:32 +00:00
continue
2014-02-24 10:49:22 +00:00
2014-07-10 12:15:21 +00:00
# This is basic structure storing test results
2014-02-24 10:49:22 +00:00
test_result = {
' target ' : target ,
' toolchain ' : toolchain ,
' test_id ' : test_id ,
}
2014-03-10 17:14:26 +00:00
build_project_options = [ " analyze " ] if opts . goanna_for_tests else None
2014-06-03 16:35:53 +00:00
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
libraries = [ ]
for lib in LIBRARIES :
if lib [ ' build_dir ' ] in test . dependencies :
libraries . append ( lib [ ' id ' ] )
# Build libs for test
for lib_id in libraries :
build_lib ( lib_id , T , toolchain , options = build_project_options ,
verbose = opts . verbose , clean = clean )
2014-07-01 16:45:12 +00:00
# TODO: move this 2 below loops to separate function
INC_DIRS = [ ]
for lib_id in libraries :
2014-07-08 13:29:49 +00:00
if ' inc_dirs_ext ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] :
2014-07-01 16:45:12 +00:00
INC_DIRS . extend ( LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] )
MACROS = [ ]
for lib_id in libraries :
2014-07-08 13:29:49 +00:00
if ' macros ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' macros ' ] :
2014-07-01 16:45:12 +00:00
MACROS . extend ( LIBRARY_MAP [ lib_id ] [ ' macros ' ] )
2014-07-14 16:45:01 +00:00
project_name = opts . firmware_global_name if opts . firmware_global_name else None
2014-02-27 14:27:58 +00:00
path = build_project ( test . source_dir , join ( build_dir , test_id ) ,
2014-07-14 16:45:01 +00:00
T , toolchain , test . dependencies ,
options = build_project_options ,
clean = clean ,
verbose = opts . verbose ,
name = project_name ,
2014-07-01 16:45:12 +00:00
macros = MACROS ,
inc_dirs = INC_DIRS )
2014-02-24 10:49:22 +00:00
test_result_cache = join ( dirname ( path ) , " test_result.json " )
2014-07-01 16:45:12 +00:00
if opts . only_build_tests :
# We are skipping testing phase, and suppress summary
opts . suppress_summary = True
continue
2014-02-24 10:49:22 +00:00
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = shape_test_request ( target , path , test_id , test . duration )
2014-07-10 16:10:13 +00:00
test_loops = single_test . get_test_loop_count ( test_id )
single_test_result = single_test . handle ( test_spec , target , toolchain , test_loops = test_loops )
2014-07-21 09:03:04 +00:00
if single_test_result is not None :
test_summary . append ( single_test_result )
2014-02-24 10:49:22 +00:00
# print test_spec, target, toolchain
elapsed_time = time ( ) - start
2014-02-27 12:59:57 +00:00
# Human readable summary
if not opts . suppress_summary :
2014-06-12 16:46:05 +00:00
# prints well-formed summary with results (SQL table like)
print generate_test_summary ( test_summary )
if opts . test_x_toolchain_summary :
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print generate_test_summary_by_target ( test_summary )
2014-02-24 10:49:22 +00:00
print " Completed in %d sec " % ( time ( ) - start )