2014-06-04 15:11:54 +00:00
#!/usr/bin/env python
2014-02-24 10:49:22 +00:00
"""
mbed SDK
Copyright ( c ) 2011 - 2013 ARM Limited
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : / / www . apache . org / licenses / LICENSE - 2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
Author : Przemyslaw Wirkus < Przemyslaw . wirkus @arm.com >
2014-03-12 10:59:19 +00:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Usage : singletest . py [ options ]
This script allows you to run mbed defined test cases for particular MCU ( s )
and corresponding toolchain ( s ) .
Options :
- h , - - help show this help message and exit
- i FILE , - - tests = FILE
Points to file with test specification
- M FILE , - - MUTS = FILE Points to file with MUTs specification ( overwrites
settings . py and private_settings . py )
- g , - - goanna - for - tests
Run Goanna static analyse tool for tests
- G , - - goanna - for - sdk Run Goanna static analyse tool for mbed SDK
- s , - - suppress - summary
Suppresses display of wellformatted table with test
results
- v , - - verbose Verbose mode ( pronts some extra information )
Example : singletest . py - i test_spec . json - M muts_all . json
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
File format example : test_spec . json
{
" targets " : {
" KL46Z " : [ " ARM " , " GCC_ARM " ] ,
" LPC1768 " : [ " ARM " , " GCC_ARM " , " GCC_CR " , " GCC_CS " , " IAR " ] ,
" LPC11U24 " : [ " uARM " ] ,
" NRF51822 " : [ " ARM " ]
}
}
File format example : muts_all . json
{
" 1 " : { " mcu " : " LPC1768 " ,
" port " : " COM4 " , " disk " : " J: \\ " ,
" peripherals " : [ " TMP102 " , " digital_loop " , " port_loop " , " analog_loop " , " SD " ]
} ,
" 2 " : { " mcu " : " KL25Z " ,
" port " : " COM7 " , " disk " : " G: \\ " ,
" peripherals " : [ " digital_loop " , " port_loop " , " analog_loop " ]
2014-02-24 10:49:22 +00:00
}
2014-03-12 10:59:19 +00:00
}
2014-02-24 10:49:22 +00:00
"""
import sys
import json
2014-02-27 12:59:57 +00:00
import optparse
import pprint
2014-02-27 16:47:53 +00:00
import re
2014-07-01 16:45:12 +00:00
from types import ListType
2014-02-24 10:49:22 +00:00
from prettytable import PrettyTable
2014-05-02 18:54:40 +00:00
from os . path import join , abspath , dirname , exists , basename
2014-02-24 10:49:22 +00:00
from shutil import copy
from subprocess import call
from time import sleep , time
2014-03-13 11:32:55 +00:00
from subprocess import Popen , PIPE
from threading import Thread
from Queue import Queue , Empty
2014-02-24 10:49:22 +00:00
ROOT = abspath ( join ( dirname ( __file__ ) , " .. " ) )
sys . path . insert ( 0 , ROOT )
2014-06-09 15:10:47 +00:00
2014-02-27 12:59:57 +00:00
# Imports related to mbed build pi
2014-06-03 16:35:53 +00:00
from workspace_tools . build_api import build_project , build_mbed_libs , build_lib
2014-06-09 15:10:47 +00:00
from workspace_tools . build_api import mcu_toolchain_matrix
2014-07-10 09:56:14 +00:00
from workspace_tools . build_api import get_unique_supported_toolchains
from workspace_tools . build_api import get_target_supported_toolchains
2014-02-24 10:49:22 +00:00
from workspace_tools . paths import BUILD_DIR
2014-03-13 11:32:55 +00:00
from workspace_tools . paths import HOST_TESTS
2014-02-24 10:49:22 +00:00
from workspace_tools . targets import TARGET_MAP
2014-02-25 18:04:32 +00:00
from workspace_tools . tests import TEST_MAP
2014-04-07 10:59:33 +00:00
from workspace_tools . tests import TESTS
2014-07-01 16:45:12 +00:00
from workspace_tools . libraries import LIBRARIES , LIBRARY_MAP
2014-06-03 16:35:53 +00:00
2014-02-24 10:49:22 +00:00
# Be sure that the tools directory is in the search path
ROOT = abspath ( join ( dirname ( __file__ ) , " .. " ) )
sys . path . insert ( 0 , ROOT )
2014-02-27 12:59:57 +00:00
# Imports related to mbed build pi
2014-02-25 18:04:32 +00:00
from workspace_tools . settings import MUTs
2014-02-24 10:49:22 +00:00
2014-03-13 11:32:55 +00:00
class ProcessObserver ( Thread ) :
def __init__ ( self , proc ) :
Thread . __init__ ( self )
self . proc = proc
self . queue = Queue ( )
self . daemon = True
self . active = True
self . start ( )
def run ( self ) :
while self . active :
c = self . proc . stdout . read ( 1 )
self . queue . put ( c )
def stop ( self ) :
self . active = False
try :
self . proc . terminate ( )
except Exception , _ :
pass
2014-02-27 17:00:21 +00:00
class SingleTestRunner ( object ) :
2014-02-24 10:49:22 +00:00
""" Object wrapper for single test run which may involve multiple MUTs. """
2014-02-27 16:47:53 +00:00
re_detect_testcase_result = None
2014-06-12 16:46:05 +00:00
TEST_RESULT_OK = " OK "
TEST_RESULT_FAIL = " FAIL "
2014-03-11 17:55:10 +00:00
TEST_RESULT_ERROR = " ERROR "
2014-02-27 16:47:53 +00:00
TEST_RESULT_UNDEF = " UNDEF "
2014-02-27 17:00:21 +00:00
2014-02-27 16:47:53 +00:00
# mbed test suite -> SingleTestRunner
2014-03-11 17:55:10 +00:00
TEST_RESULT_MAPPING = { " success " : TEST_RESULT_OK ,
" failure " : TEST_RESULT_FAIL ,
" error " : TEST_RESULT_ERROR ,
2014-02-27 16:47:53 +00:00
" end " : TEST_RESULT_UNDEF }
2014-02-27 17:00:21 +00:00
2014-02-27 16:47:53 +00:00
def __init__ ( self ) :
2014-02-27 17:00:21 +00:00
pattern = " \\ { ( " + " | " . join ( self . TEST_RESULT_MAPPING . keys ( ) ) + " ) \\ } "
self . re_detect_testcase_result = re . compile ( pattern )
2014-02-25 18:04:32 +00:00
2014-06-26 16:43:27 +00:00
def file_copy_method_selector ( self , image_path , disk , copy_method ) :
""" Copy file depending on method you want to use """
2014-07-02 09:27:04 +00:00
# TODO: Add exception handling for copy procedures (disk can be full). See below.
"""
Traceback ( most recent call last ) :
File " mbed \ workspace_tools \ singletest.py " , line 738 , in < module >
single_test_result = single_test . handle ( test_spec , target , toolchain )
File " mbed \ workspace_tools \ singletest.py " , line 252 , in handle
self . file_copy_method_selector ( image_path , disk , opts . copy_method )
File " mbed \ workspace_tools \ singletest.py " , line 203 , in file_copy_method_selector
copy ( image_path , disk )
File " C: \ Python27 \ lib \ shutil.py " , line 119 , in copy
copyfile ( src , dst )
File " C: \ Python27 \ lib \ shutil.py " , line 84 , in copyfile
copyfileobj ( fsrc , fdst )
File " C: \ Python27 \ lib \ shutil.py " , line 52 , in copyfileobj
fdst . write ( buf )
IOError : [ Errno 28 ] No space left on device
"""
2014-06-26 16:43:27 +00:00
if copy_method == " cp " or copy_method == " copy " or copy_method == " xcopy " :
cmd = [ copy_method , image_path . encode ( ' ascii ' , ' ignore ' ) , disk . encode ( ' ascii ' , ' ignore ' ) + basename ( image_path ) . encode ( ' ascii ' , ' ignore ' ) ]
call ( cmd , shell = True )
else :
# Default python method
copy ( image_path , disk )
2014-02-24 10:49:22 +00:00
def handle ( self , test_spec , target_name , toolchain_name ) :
"""
Function determines MUT ' s mbed disk/port and copies binary to
target . Test is being invoked afterwards .
"""
data = json . loads ( test_spec )
# Get test information, image and test timeout
test_id = data [ ' test_id ' ]
test = TEST_MAP [ test_id ]
test_description = TEST_MAP [ test_id ] . get_description ( )
image = data [ " image " ]
duration = data . get ( " duration " , 10 )
# Find a suitable MUT:
mut = None
for id , m in MUTs . iteritems ( ) :
if m [ ' mcu ' ] == data [ ' mcu ' ] :
mut = m
break
if mut is None :
2014-02-25 18:04:32 +00:00
print " Error: No mbed available: mut[ %s ] " % data [ ' mcu ' ]
2014-02-24 10:49:22 +00:00
return
disk = mut [ ' disk ' ]
port = mut [ ' port ' ]
2014-02-27 17:00:21 +00:00
target_by_mcu = TARGET_MAP [ mut [ ' mcu ' ] ]
2014-02-24 10:49:22 +00:00
# Program
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
if not exists ( image_path ) :
print " Error: Image file does not exist: %s " % image_path
elapsed_time = 0
test_result = " {error} "
return ( test_result , target_name , toolchain_name ,
test_id , test_description , round ( elapsed_time , 2 ) , duration )
# Program MUT with proper image file
2014-06-04 14:26:16 +00:00
if not disk . endswith ( ' / ' ) and not disk . endswith ( ' \\ ' ) :
2014-06-03 13:53:58 +00:00
disk + = ' / '
2014-06-26 16:43:27 +00:00
# Choose one method of copy files to mbed virtual drive
self . file_copy_method_selector ( image_path , disk , opts . copy_method )
2014-02-24 10:49:22 +00:00
# Copy Extra Files
2014-02-27 17:00:21 +00:00
if not target_by_mcu . is_disk_virtual and test . extra_files :
2014-02-24 10:49:22 +00:00
for f in test . extra_files :
copy ( f , disk )
2014-02-27 17:00:21 +00:00
sleep ( target_by_mcu . program_cycle_s ( ) )
2014-02-24 10:49:22 +00:00
# Host test execution
2014-02-27 17:00:21 +00:00
start_host_exec_time = time ( )
2014-03-17 11:59:25 +00:00
test_result = self . run_host_test ( test . host_test , disk , port , duration , opts . verbose )
2014-02-27 17:00:21 +00:00
elapsed_time = time ( ) - start_host_exec_time
2014-02-27 16:47:53 +00:00
print print_test_result ( test_result , target_name , toolchain_name ,
test_id , test_description , elapsed_time , duration )
2014-02-24 10:49:22 +00:00
return ( test_result , target_name , toolchain_name ,
test_id , test_description , round ( elapsed_time , 2 ) , duration )
2014-03-17 11:59:25 +00:00
def run_host_test ( self , name , disk , port , duration , verbose = False , extra_serial = " " ) :
2014-03-13 11:32:55 +00:00
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = [ " python " , " %s .py " % name , ' -p ' , port , ' -d ' , disk , ' -t ' , str ( duration ) , " -e " , extra_serial ]
proc = Popen ( cmd , stdout = PIPE , cwd = HOST_TESTS )
obs = ProcessObserver ( proc )
start = time ( )
line = ' '
output = [ ]
while ( time ( ) - start ) < duration :
try :
c = obs . queue . get ( block = True , timeout = 1 )
except Empty , _ :
c = None
if c :
output . append ( c )
# Give the mbed under test a way to communicate the end of the test
if c in [ ' \n ' , ' \r ' ] :
if ' {end} ' in line : break
line = ' '
else :
line + = c
# Stop test process
obs . stop ( )
2014-03-17 11:59:25 +00:00
# Handle verbose mode
if verbose :
print " Test::Output::Start "
print " " . join ( output )
print " Test::Output::Finish "
2014-03-13 11:32:55 +00:00
# Parse test 'output' data
result = self . TEST_RESULT_UNDEF
for line in " " . join ( output ) . splitlines ( ) :
search_result = self . re_detect_testcase_result . search ( line )
if search_result and len ( search_result . groups ( ) ) :
result = self . TEST_RESULT_MAPPING [ search_result . groups ( 0 ) [ 0 ] ]
break
return result
2014-02-27 17:00:21 +00:00
def is_peripherals_available ( target_mcu_name , peripherals = None ) :
2014-02-27 16:47:53 +00:00
""" Checks if specified target should run specific peripheral test case. """
if peripherals is not None :
peripherals = set ( peripherals )
for id , mut in MUTs . iteritems ( ) :
2014-02-27 17:00:21 +00:00
# Target MCU name check
if mut [ " mcu " ] != target_mcu_name :
2014-02-27 16:47:53 +00:00
continue
# Peripherals check
if peripherals is not None :
if ' peripherals ' not in mut :
continue
if not peripherals . issubset ( set ( mut [ ' peripherals ' ] ) ) :
continue
return True
return False
def print_test_result ( test_result , target_name , toolchain_name ,
test_id , test_description , elapsed_time , duration ) :
2014-03-07 17:31:33 +00:00
""" Use specific convention to print test result and related data. """
2014-02-27 16:47:53 +00:00
tokens = [ ]
tokens . append ( " TargetTest " )
tokens . append ( target_name )
tokens . append ( toolchain_name )
tokens . append ( test_id )
tokens . append ( test_description )
separator = " :: "
2014-02-28 11:33:57 +00:00
time_info = " in %.2f of %d sec " % ( round ( elapsed_time , 2 ) , duration )
2014-02-27 16:47:53 +00:00
result = separator . join ( tokens ) + " [ " + test_result + " ] " + time_info
return result
2014-02-24 10:49:22 +00:00
def shape_test_request ( mcu , image_path , test_id , duration = 10 ) :
""" Function prepares JOSN structure describing test specification. """
test_spec = {
" mcu " : mcu ,
" image " : image_path ,
" duration " : duration ,
" test_id " : test_id ,
}
return json . dumps ( test_spec )
2014-02-27 14:27:58 +00:00
def get_json_data_from_file ( json_spec_filename , verbose = False ) :
2014-03-03 12:15:46 +00:00
""" Loads from file JSON formatted string to data structure """
2014-02-27 14:27:58 +00:00
result = None
2014-02-27 14:06:47 +00:00
try :
2014-02-27 14:27:58 +00:00
with open ( json_spec_filename ) as data_file :
2014-02-27 14:06:47 +00:00
try :
2014-02-27 14:27:58 +00:00
result = json . load ( data_file )
2014-02-27 14:06:47 +00:00
except ValueError as json_error_msg :
2014-02-27 14:27:58 +00:00
result = None
2014-07-09 13:51:54 +00:00
print " Error in ' %s ' file: %s " % ( json_spec_filename , json_error_msg )
2014-02-27 14:06:47 +00:00
except IOError as fileopen_error_msg :
print " Error: %s " % ( fileopen_error_msg )
2014-02-27 14:27:58 +00:00
if verbose and result :
2014-02-27 14:06:47 +00:00
pp = pprint . PrettyPrinter ( indent = 4 )
2014-02-27 14:27:58 +00:00
pp . pprint ( result )
return result
2014-02-27 14:06:47 +00:00
2014-07-09 13:51:54 +00:00
def print_muts_configuration_from_json ( json_data , join_delim = " , " ) :
""" Prints MUTs configuration passed to test script for verboseness. """
muts_info_cols = [ ]
# We need to check all unique properties for each defined MUT
for k in json_data :
mut_info = json_data [ k ]
for property in mut_info :
if property not in muts_info_cols :
muts_info_cols . append ( property )
# Prepare pretty table object to display all MUTs
pt_cols = [ " index " ] + muts_info_cols
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
# Add rows to pretty print object
for k in json_data :
row = [ k ]
mut_info = json_data [ k ]
for col in muts_info_cols :
cell_val = mut_info [ col ] if col in mut_info else None
if type ( cell_val ) == ListType :
cell_val = join_delim . join ( cell_val )
row . append ( cell_val )
pt . add_row ( row )
return pt . get_string ( )
def print_test_configuration_from_json ( json_data , join_delim = " , " ) :
""" Prints test specification configuration passed to test script for verboseness. """
toolchains_info_cols = [ ]
# We need to check all toolchains for each device
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
toolchains = targets [ target ]
for toolchain in toolchains :
if toolchain not in toolchains_info_cols :
toolchains_info_cols . append ( toolchain )
# Prepare pretty table object to display test specification
pt_cols = [ " mcu " ] + sorted ( toolchains_info_cols )
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
2014-07-10 09:56:14 +00:00
# { target : [conflicted toolchains] }
toolchain_conflicts = { }
2014-07-09 13:51:54 +00:00
for k in json_data :
# k should be 'targets'
targets = json_data [ k ]
for target in targets :
2014-07-10 09:56:14 +00:00
target_supported_toolchains = get_target_supported_toolchains ( target )
if not target_supported_toolchains :
target_supported_toolchains = [ ]
target_name = target if target in TARGET_MAP else " %s * " % target
row = [ target_name ]
2014-07-09 13:51:54 +00:00
toolchains = targets [ target ]
for toolchain in toolchains_info_cols :
2014-07-10 09:56:14 +00:00
# Check for conflicts
conflict = False
if toolchain in toolchains :
if toolchain not in target_supported_toolchains :
conflict = True
if target not in toolchain_conflicts :
toolchain_conflicts [ target ] = [ ]
toolchain_conflicts [ target ] . append ( toolchain )
# Add marker inside table about target usage / conflict
2014-07-09 13:51:54 +00:00
cell_val = ' Yes ' if toolchain in toolchains else ' - '
2014-07-10 09:56:14 +00:00
if conflict :
cell_val + = ' * '
2014-07-09 13:51:54 +00:00
row . append ( cell_val )
pt . add_row ( row )
2014-07-10 09:56:14 +00:00
# generate result string
result = pt . get_string ( ) # Test specification table
if toolchain_conflicts : # Print conflicts if the exist
result + = " \n "
result + = " Toolchain conflicts: \n "
for target in toolchain_conflicts :
if target not in TARGET_MAP :
result + = " \t * Target %s unknown \n " % ( target )
conflict_target_list = " , " . join ( toolchain_conflicts [ target ] )
sufix = ' s ' if len ( toolchain_conflicts [ target ] ) > 1 else ' '
result + = " \t * Target %s does not support %s toolchain %s \n " % ( target , conflict_target_list , sufix )
return result
2014-07-09 13:51:54 +00:00
2014-07-02 11:02:36 +00:00
def get_avail_tests_summary_table ( cols = None , result_summary = True , join_delim = ' , ' ) :
2014-06-12 16:46:05 +00:00
# get all unique test ID prefixes
unique_test_id = [ ]
for test in TESTS :
split = test [ ' id ' ] . split ( ' _ ' ) [ : - 1 ]
test_id_prefix = ' _ ' . join ( split )
if test_id_prefix not in unique_test_id :
unique_test_id . append ( test_id_prefix )
unique_test_id . sort ( )
counter_dict_test_id_types = dict ( ( t , 0 ) for t in unique_test_id )
counter_dict_test_id_types_all = dict ( ( t , 0 ) for t in unique_test_id )
2014-07-02 14:01:02 +00:00
test_properties = [ ' id ' , ' automated ' , ' description ' , ' peripherals ' , ' host_test ' , ' duration ' ] if cols is None else cols
2014-06-12 16:46:05 +00:00
# All tests status table print
pt = PrettyTable ( test_properties )
for col in test_properties :
pt . align [ col ] = " l "
pt . align [ ' duration ' ] = " r "
counter_all = 0
counter_automated = 0
pt . padding_width = 1 # One space between column edges and contents (default)
2014-07-02 11:02:36 +00:00
for test_id in TEST_MAP :
2014-06-12 16:46:05 +00:00
row = [ ]
2014-07-02 11:02:36 +00:00
test = TEST_MAP [ test_id ]
split = test_id . split ( ' _ ' ) [ : - 1 ]
2014-06-12 16:46:05 +00:00
test_id_prefix = ' _ ' . join ( split )
2014-04-07 10:59:33 +00:00
2014-06-12 16:46:05 +00:00
for col in test_properties :
2014-07-02 11:02:36 +00:00
col_value = test [ col ]
if type ( test [ col ] ) == ListType :
col_value = join_delim . join ( test [ col ] )
elif test [ col ] == None :
col_value = " - "
row . append ( col_value )
if test [ ' automated ' ] == True :
2014-06-12 16:46:05 +00:00
counter_dict_test_id_types [ test_id_prefix ] + = 1
counter_automated + = 1
pt . add_row ( row )
# Update counters
counter_all + = 1
counter_dict_test_id_types_all [ test_id_prefix ] + = 1
print pt
print
2014-07-02 11:02:36 +00:00
if result_summary :
# Automation result summary
test_id_cols = [ ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
2014-06-12 16:46:05 +00:00
2014-07-02 11:02:36 +00:00
percent_progress = round ( 100.0 * counter_automated / float ( counter_all ) , 1 )
2014-05-14 17:26:26 +00:00
str_progress = progress_bar ( percent_progress , 75 )
2014-07-02 11:02:36 +00:00
pt . add_row ( [ counter_automated , counter_all , percent_progress , str_progress ] )
print " Automation coverage: "
print pt
print
# Test automation coverage table print
test_id_cols = [ ' id ' , ' automated ' , ' all ' , ' percent [ % ] ' , ' progress ' ]
pt = PrettyTable ( test_id_cols )
pt . align [ ' id ' ] = " l "
pt . align [ ' automated ' ] = " r "
pt . align [ ' all ' ] = " r "
pt . align [ ' percent [ % ] ' ] = " r "
for unique_id in unique_test_id :
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round ( 100.0 * counter_dict_test_id_types [ unique_id ] / float ( counter_dict_test_id_types_all [ unique_id ] ) , 1 )
str_progress = progress_bar ( percent_progress , 75 )
row = [ unique_id ,
counter_dict_test_id_types [ unique_id ] ,
counter_dict_test_id_types_all [ unique_id ] ,
percent_progress ,
" [ " + str_progress + " ] " ]
pt . add_row ( row )
print " Test automation coverage: "
print pt
print
2014-04-07 10:59:33 +00:00
2014-04-09 12:00:45 +00:00
def progress_bar ( percent_progress , saturation = 0 ) :
""" This function creates progress bar with optional simple saturation mark """
step = int ( percent_progress / 2 ) # Scale by to (scale: 1 - 50)
str_progress = ' # ' * step + ' . ' * int ( 50 - step )
c = ' ! ' if str_progress [ 38 ] == ' . ' else ' | '
if ( saturation > 0 ) :
saturation = saturation / 2
str_progress = str_progress [ : saturation ] + c + str_progress [ saturation : ]
return str_progress
2014-06-12 16:46:05 +00:00
def get_unique_value_from_summary ( test_summary , index ) :
""" Gets list of unique target names """
result = [ ]
for test in test_summary :
target_name = test [ index ]
if target_name not in result :
result . append ( target_name )
return sorted ( result )
def get_unique_value_from_summary_ext ( test_summary , index_key , index_val ) :
""" Gets list of unique target names and return dictionary """
result = { }
for test in test_summary :
key = test [ index_key ]
val = test [ index_val ]
if key not in result :
result [ key ] = val
return result
def generate_test_summary_by_target ( test_summary ) :
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix """
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary ( test_summary , TARGET_INDEX )
unique_tests = get_unique_value_from_summary ( test_summary , TEST_INDEX )
unique_test_desc = get_unique_value_from_summary_ext ( test_summary , TEST_INDEX , DESC_INDEX )
unique_toolchains = get_unique_value_from_summary ( test_summary , TOOLCHAIN_INDEX )
result = " "
result_dict = { } # test : { toolchain : result }
for target in unique_targets :
result = " Test summary: \n "
for test in test_summary :
if test [ TEST_INDEX ] not in result_dict :
result_dict [ test [ TEST_INDEX ] ] = { }
result_dict [ test [ TEST_INDEX ] ] [ test [ TOOLCHAIN_INDEX ] ] = test [ RESULT_INDEX ]
pass
pt_cols = [ " Target " , " Test ID " , " Test Description " ] + unique_toolchains
pt = PrettyTable ( pt_cols )
for col in pt_cols :
pt . align [ col ] = " l "
pt . padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests :
test_results = result_dict [ test ]
row = [ target , test , unique_test_desc [ test ] ]
for toolchain in unique_toolchains :
row . append ( test_results [ toolchain ] )
pt . add_row ( row )
result + = pt . get_string ( )
result + = " \n "
return result
def generate_test_summary ( test_summary ) :
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across """
result = " Test summary: \n "
# Pretty table package is used to print results
pt = PrettyTable ( [ " Result " , " Target " , " Toolchain " , " Test ID " , " Test Description " ,
" Elapsed Time (sec) " , " Timeout (sec) " ] )
pt . align [ " Result " ] = " l " # Left align
pt . align [ " Target " ] = " l " # Left align
pt . align [ " Toolchain " ] = " l " # Left align
pt . align [ " Test ID " ] = " l " # Left align
pt . align [ " Test Description " ] = " l " # Left align
pt . padding_width = 1 # One space between column edges and contents (default)
result_dict = { single_test . TEST_RESULT_OK : 0 ,
single_test . TEST_RESULT_FAIL : 0 ,
single_test . TEST_RESULT_ERROR : 0 ,
single_test . TEST_RESULT_UNDEF : 0 }
for test in test_summary :
if test [ 0 ] in result_dict :
result_dict [ test [ 0 ] ] + = 1
pt . add_row ( test )
result + = pt . get_string ( )
result + = " \n "
# Print result count
result + = " Result: " + ' / ' . join ( [ ' %s %s ' % ( value , key ) for ( key , value ) in { k : v for k , v in result_dict . items ( ) if v != 0 } . iteritems ( ) ] )
result + = " \n "
return result
2014-02-24 10:49:22 +00:00
if __name__ == ' __main__ ' :
2014-02-27 12:59:57 +00:00
# Command line options
parser = optparse . OptionParser ( )
parser . add_option ( ' -i ' , ' --tests ' ,
dest = ' test_spec_filename ' ,
metavar = " FILE " ,
help = ' Points to file with test specification ' )
2014-02-27 14:27:58 +00:00
parser . add_option ( ' -M ' , ' --MUTS ' ,
dest = ' muts_spec_filename ' ,
metavar = " FILE " ,
help = ' Points to file with MUTs specification (overwrites settings.py and private_settings.py) ' )
2014-03-10 17:14:26 +00:00
parser . add_option ( ' -g ' , ' --goanna-for-tests ' ,
dest = ' goanna_for_tests ' ,
metavar = False ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for tests ' )
2014-03-10 17:24:27 +00:00
parser . add_option ( ' -G ' , ' --goanna-for-sdk ' ,
dest = ' goanna_for_mbed_sdk ' ,
metavar = False ,
action = " store_true " ,
help = ' Run Goanna static analyse tool for mbed SDK ' )
2014-02-27 12:59:57 +00:00
parser . add_option ( ' -s ' , ' --suppress-summary ' ,
dest = ' suppress_summary ' ,
default = False ,
action = " store_true " ,
help = ' Suppresses display of wellformatted table with test results ' )
2014-06-12 16:46:05 +00:00
parser . add_option ( ' -t ' , ' --test-summary ' ,
dest = ' test_x_toolchain_summary ' ,
default = False ,
action = " store_true " ,
2014-06-23 09:42:33 +00:00
help = ' Displays wellformatted table with test x toolchain test result per target ' )
2014-06-12 16:46:05 +00:00
2014-04-07 10:59:33 +00:00
parser . add_option ( ' -r ' , ' --test-automation-report ' ,
dest = ' test_automation_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all tests and exits ' )
2014-07-02 11:02:36 +00:00
parser . add_option ( ' -R ' , ' --test-case-report ' ,
dest = ' test_case_report ' ,
default = False ,
action = " store_true " ,
help = ' Prints information about all test cases and exits ' )
2014-06-12 15:03:43 +00:00
parser . add_option ( ' -P ' , ' --only-peripherals ' ,
2014-04-08 15:01:00 +00:00
dest = ' test_only_peripheral ' ,
default = False ,
action = " store_true " ,
help = ' Test only peripheral declared for MUT and skip common tests ' )
2014-06-12 15:03:43 +00:00
parser . add_option ( ' -C ' , ' --only-commons ' ,
dest = ' test_only_common ' ,
default = False ,
action = " store_true " ,
2014-06-12 16:46:05 +00:00
help = ' Test only board internals. Skip perpherials tests and perform common tests. ' )
2014-06-12 15:03:43 +00:00
2014-06-26 16:43:27 +00:00
parser . add_option ( ' -c ' , ' --copy-method ' ,
dest = ' copy_method ' ,
help = " You can choose which copy method you want to use put bin in mbed. You can choose from ' cp ' , ' copy ' , ' xcopy ' . Default is python shutils.copy method. " )
2014-05-14 17:26:26 +00:00
parser . add_option ( ' -n ' , ' --test-by-names ' ,
dest = ' test_by_names ' ,
help = ' Runs only test enumerated it this switch ' )
2014-06-09 15:10:47 +00:00
parser . add_option ( " -S " , " --supported-toolchains " ,
action = " store_true " ,
dest = " supported_toolchains " ,
default = False ,
help = " Displays supported matrix of MCUs and toolchains " )
2014-07-01 16:45:12 +00:00
parser . add_option ( " -O " , " --only-build " ,
action = " store_true " ,
dest = " only_build_tests " ,
default = False ,
help = " Only build tests, skips actual test procedures (flashing etc.) " )
2014-07-09 13:51:54 +00:00
parser . add_option ( ' ' , ' --config ' ,
dest = ' verbose_test_configuration_only ' ,
default = False ,
action = " store_true " ,
help = ' Displays full test specification and MUTs configration and exits ' )
2014-02-27 12:59:57 +00:00
parser . add_option ( ' -v ' , ' --verbose ' ,
dest = ' verbose ' ,
default = False ,
action = " store_true " ,
2014-06-23 09:42:33 +00:00
help = ' Verbose mode (prints some extra information) ' )
2014-02-27 12:59:57 +00:00
2014-02-27 16:47:53 +00:00
parser . description = """ This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s). """
2014-04-09 12:00:45 +00:00
parser . epilog = """ Example: singletest.py -i test_spec.json -M muts_all.json """
2014-02-27 16:47:53 +00:00
2014-02-27 12:59:57 +00:00
( opts , args ) = parser . parse_args ( )
2014-02-24 10:49:22 +00:00
2014-04-07 10:59:33 +00:00
# Print summary / information about automation test status
if opts . test_automation_report :
2014-07-02 11:02:36 +00:00
get_avail_tests_summary_table ( )
exit ( 0 )
# Print summary / information about automation test status
if opts . test_case_report :
test_case_report_cols = [ ' id ' , ' automated ' , ' description ' , ' peripherals ' , ' host_test ' , ' duration ' , ' source_dir ' ]
get_avail_tests_summary_table ( cols = test_case_report_cols , result_summary = False , join_delim = ' \n ' )
2014-04-07 10:59:33 +00:00
exit ( 0 )
2014-06-09 15:10:47 +00:00
# Only prints matrix of supported toolchains
if opts . supported_toolchains :
mcu_toolchain_matrix ( )
exit ( 0 )
2014-02-27 12:59:57 +00:00
# Open file with test specification
2014-02-27 14:27:58 +00:00
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
2014-07-09 13:51:54 +00:00
test_spec = get_json_data_from_file ( opts . test_spec_filename ) if opts . test_spec_filename else None
2014-02-27 12:59:57 +00:00
if test_spec is None :
parser . print_help ( )
exit ( - 1 )
2014-02-27 16:47:53 +00:00
# Get extra MUTs if applicable
2014-02-27 14:27:58 +00:00
if opts . muts_spec_filename :
2014-07-09 13:51:54 +00:00
MUTs = get_json_data_from_file ( opts . muts_spec_filename )
2014-02-27 14:27:58 +00:00
if MUTs is None :
parser . print_help ( )
exit ( - 1 )
2014-02-27 16:47:53 +00:00
2014-07-09 13:51:54 +00:00
# Only prints read MUTs configuration
if MUTs and opts . verbose_test_configuration_only :
print " MUTs configuration in %s : " % opts . muts_spec_filename
print print_muts_configuration_from_json ( MUTs )
print
print " Test specification in %s : " % opts . test_spec_filename
print print_test_configuration_from_json ( test_spec )
exit ( 0 )
# Verbose test specification and MUTs configuration
if MUTs and opts . verbose :
print print_muts_configuration_from_json ( MUTs )
if test_spec and opts . verbose :
print print_test_configuration_from_json ( test_spec )
2014-02-27 12:59:57 +00:00
# Magic happens here... ;)
start = time ( )
single_test = SingleTestRunner ( )
2014-02-24 10:49:22 +00:00
clean = test_spec . get ( ' clean ' , False )
test_ids = test_spec . get ( ' test_ids ' , [ ] )
groups = test_spec . get ( ' test_groups ' , [ ] )
# Here we store test results
test_summary = [ ]
for target , toolchains in test_spec [ ' targets ' ] . iteritems ( ) :
for toolchain in toolchains :
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
T = TARGET_MAP [ target ]
2014-03-10 17:24:27 +00:00
build_mbed_libs_options = [ " analyze " ] if opts . goanna_for_mbed_sdk else None
2014-07-09 13:51:54 +00:00
build_mbed_libs_result = build_mbed_libs ( T , toolchain , options = build_mbed_libs_options )
if not build_mbed_libs_result :
print ' Skipped tests for %s target. Toolchain %s is not yet supported for this target ' % ( T . name , toolchain )
continue
2014-02-24 10:49:22 +00:00
build_dir = join ( BUILD_DIR , " test " , target , toolchain )
for test_id , test in TEST_MAP . iteritems ( ) :
2014-05-14 17:26:26 +00:00
if opts . test_by_names and test_id not in opts . test_by_names . split ( ' , ' ) :
continue
2014-02-24 10:49:22 +00:00
if test_ids and test_id not in test_ids :
continue
2014-04-08 15:01:00 +00:00
if opts . test_only_peripheral and not test . peripherals :
if opts . verbose :
2014-04-09 14:30:26 +00:00
print " TargetTest:: %s ::NotPeripheralTestSkipped() " % ( target )
2014-04-08 15:01:00 +00:00
continue
2014-06-12 15:03:43 +00:00
if opts . test_only_common and test . peripherals :
if opts . verbose :
print " TargetTest:: %s ::PeripheralTestSkipped() " % ( target )
continue
2014-02-24 10:49:22 +00:00
if test . automated and test . is_supported ( target , toolchain ) :
2014-02-27 16:47:53 +00:00
if not is_peripherals_available ( target , test . peripherals ) :
2014-02-27 12:59:57 +00:00
if opts . verbose :
2014-07-08 13:29:49 +00:00
test_peripherals = test . peripherals if test . peripherals else [ ]
print " TargetTest:: %s ::TestSkipped( %s ) " % ( target , " , " . join ( test_peripherals ) )
2014-02-25 18:04:32 +00:00
continue
2014-02-24 10:49:22 +00:00
test_result = {
' target ' : target ,
' toolchain ' : toolchain ,
' test_id ' : test_id ,
}
2014-03-10 17:14:26 +00:00
build_project_options = [ " analyze " ] if opts . goanna_for_tests else None
2014-06-03 16:35:53 +00:00
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
libraries = [ ]
for lib in LIBRARIES :
if lib [ ' build_dir ' ] in test . dependencies :
libraries . append ( lib [ ' id ' ] )
# Build libs for test
for lib_id in libraries :
build_lib ( lib_id , T , toolchain , options = build_project_options ,
verbose = opts . verbose , clean = clean )
2014-07-01 16:45:12 +00:00
# TODO: move this 2 below loops to separate function
INC_DIRS = [ ]
for lib_id in libraries :
2014-07-08 13:29:49 +00:00
if ' inc_dirs_ext ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] :
2014-07-01 16:45:12 +00:00
INC_DIRS . extend ( LIBRARY_MAP [ lib_id ] [ ' inc_dirs_ext ' ] )
MACROS = [ ]
for lib_id in libraries :
2014-07-08 13:29:49 +00:00
if ' macros ' in LIBRARY_MAP [ lib_id ] and LIBRARY_MAP [ lib_id ] [ ' macros ' ] :
2014-07-01 16:45:12 +00:00
MACROS . extend ( LIBRARY_MAP [ lib_id ] [ ' macros ' ] )
2014-02-27 14:27:58 +00:00
path = build_project ( test . source_dir , join ( build_dir , test_id ) ,
2014-03-10 17:14:26 +00:00
T , toolchain , test . dependencies , options = build_project_options ,
2014-07-01 16:45:12 +00:00
clean = clean , verbose = opts . verbose ,
macros = MACROS ,
inc_dirs = INC_DIRS )
2014-02-24 10:49:22 +00:00
test_result_cache = join ( dirname ( path ) , " test_result.json " )
2014-07-01 16:45:12 +00:00
if opts . only_build_tests :
# We are skipping testing phase, and suppress summary
opts . suppress_summary = True
continue
2014-02-24 10:49:22 +00:00
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = shape_test_request ( target , path , test_id , test . duration )
single_test_result = single_test . handle ( test_spec , target , toolchain )
test_summary . append ( single_test_result )
# print test_spec, target, toolchain
elapsed_time = time ( ) - start
2014-02-27 12:59:57 +00:00
# Human readable summary
if not opts . suppress_summary :
2014-06-12 16:46:05 +00:00
# prints well-formed summary with results (SQL table like)
print generate_test_summary ( test_summary )
if opts . test_x_toolchain_summary :
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print generate_test_summary_by_target ( test_summary )
2014-02-24 10:49:22 +00:00
print " Completed in %d sec " % ( time ( ) - start )