2014-02-24 10:49:22 +00:00
|
|
|
"""
|
|
|
|
mbed SDK
|
|
|
|
Copyright (c) 2011-2013 ARM Limited
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
|
|
|
|
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
1. Update your private_settings.py with all MUTs you can possibly connect.
|
|
|
|
Make sure mcu / port / serial names are concretely inputed.
|
|
|
|
2. Update test_spec dictionary in __main__ section.
|
2014-02-25 18:04:32 +00:00
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
Example 1:
|
2014-02-25 18:04:32 +00:00
|
|
|
In below example only LPC11U24 will be tested
|
2014-02-24 10:49:22 +00:00
|
|
|
and test will be prepared using only uARM toolchain. Note that other
|
|
|
|
targets are just commented.
|
|
|
|
Uncomment or add your own targets at will.
|
2014-02-25 18:04:32 +00:00
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
test_spec = {
|
|
|
|
"targets": {
|
|
|
|
# "KL25Z": ["ARM", "GCC_ARM"],
|
|
|
|
# "LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
|
|
|
|
"LPC11U24": ["uARM"]
|
|
|
|
# "NRF51822": ["ARM"]
|
|
|
|
# "NUCLEO_F103RB": ["ARM"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import json
|
2014-02-27 12:59:57 +00:00
|
|
|
import optparse
|
|
|
|
import pprint
|
2014-02-27 16:47:53 +00:00
|
|
|
import re
|
2014-02-24 10:49:22 +00:00
|
|
|
from prettytable import PrettyTable
|
|
|
|
from serial import Serial
|
|
|
|
|
|
|
|
from os.path import join, abspath, dirname, exists
|
|
|
|
from shutil import copy
|
|
|
|
from subprocess import call
|
|
|
|
from time import sleep, time
|
|
|
|
|
|
|
|
ROOT = abspath(join(dirname(__file__), ".."))
|
|
|
|
sys.path.insert(0, ROOT)
|
2014-02-27 12:59:57 +00:00
|
|
|
# Imports related to mbed build pi
|
2014-02-24 10:49:22 +00:00
|
|
|
from workspace_tools.build_api import build_project, build_mbed_libs
|
|
|
|
from workspace_tools.paths import BUILD_DIR
|
|
|
|
from workspace_tools.targets import TARGET_MAP
|
2014-02-25 18:04:32 +00:00
|
|
|
from workspace_tools.tests import TEST_MAP
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Be sure that the tools directory is in the search path
|
|
|
|
ROOT = abspath(join(dirname(__file__), ".."))
|
|
|
|
sys.path.insert(0, ROOT)
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Imports related to mbed build pi
|
2014-02-24 10:49:22 +00:00
|
|
|
from workspace_tools.utils import delete_dir_files
|
2014-02-25 18:04:32 +00:00
|
|
|
from workspace_tools.settings import MUTs
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
class SingleTestRunner(object):
|
2014-02-24 10:49:22 +00:00
|
|
|
""" Object wrapper for single test run which may involve multiple MUTs."""
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
re_detect_testcase_result = None
|
|
|
|
TEST_RESULT_UNDEF = "UNDEF"
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
# mbed test suite -> SingleTestRunner
|
2014-02-27 17:00:21 +00:00
|
|
|
TEST_RESULT_MAPPING = {"success" : "OK",
|
|
|
|
"failure" : "FAIL",
|
|
|
|
"error" : "ERROR",
|
2014-02-27 16:47:53 +00:00
|
|
|
"end" : TEST_RESULT_UNDEF}
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
def __init__(self):
|
2014-02-27 17:00:21 +00:00
|
|
|
pattern = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
|
|
|
|
self.re_detect_testcase_result = re.compile(pattern)
|
2014-02-25 18:04:32 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
def run_host_test(self, target_name, port,
|
|
|
|
duration, verbose=False):
|
2014-02-24 10:49:22 +00:00
|
|
|
"""
|
|
|
|
Functions resets target and grabs by timeouted pooling test log
|
|
|
|
via serial port.
|
|
|
|
Function assumes target is already flashed with proper 'test' binary.
|
|
|
|
"""
|
|
|
|
output = ""
|
|
|
|
# Prepare serial for receiving data from target
|
2014-02-27 17:00:21 +00:00
|
|
|
baud = 9600
|
2014-02-24 10:49:22 +00:00
|
|
|
serial = Serial(port, timeout=1)
|
2014-02-27 17:00:21 +00:00
|
|
|
serial.setBaudrate(baud)
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
# Resetting target and pooling
|
2014-02-27 16:47:53 +00:00
|
|
|
reset(target_name, serial, verbose=verbose)
|
|
|
|
start_serial_timeour = time()
|
2014-02-24 10:49:22 +00:00
|
|
|
try:
|
2014-02-27 16:47:53 +00:00
|
|
|
while (time() - start_serial_timeour) < duration:
|
2014-02-24 10:49:22 +00:00
|
|
|
test_output = serial.read(512)
|
|
|
|
output += test_output
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
if '{end}' in output:
|
|
|
|
break
|
|
|
|
except KeyboardInterrupt, _:
|
|
|
|
print "CTRL+C break"
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
serial.close()
|
|
|
|
|
|
|
|
# Handle verbose mode
|
|
|
|
if verbose:
|
|
|
|
print "Test::Output::Start"
|
|
|
|
print output
|
|
|
|
print "Test::Output::Finish"
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
# Parse test 'output' data
|
2014-02-27 17:00:21 +00:00
|
|
|
result = self.TEST_RESULT_UNDEF
|
2014-02-24 10:49:22 +00:00
|
|
|
for line in output.splitlines():
|
2014-02-27 17:00:21 +00:00
|
|
|
search_result = self.re_detect_testcase_result.search(line)
|
|
|
|
if search_result and len(search_result.groups()):
|
|
|
|
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
|
2014-02-27 13:07:12 +00:00
|
|
|
break
|
2014-02-24 10:49:22 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
def handle(self, test_spec, target_name, toolchain_name):
|
|
|
|
"""
|
|
|
|
Function determines MUT's mbed disk/port and copies binary to
|
|
|
|
target. Test is being invoked afterwards.
|
|
|
|
"""
|
|
|
|
data = json.loads(test_spec)
|
|
|
|
# Get test information, image and test timeout
|
|
|
|
test_id = data['test_id']
|
|
|
|
test = TEST_MAP[test_id]
|
|
|
|
test_description = TEST_MAP[test_id].get_description()
|
|
|
|
image = data["image"]
|
|
|
|
duration = data.get("duration", 10)
|
|
|
|
|
|
|
|
# Find a suitable MUT:
|
|
|
|
mut = None
|
|
|
|
for id, m in MUTs.iteritems():
|
|
|
|
if m['mcu'] == data['mcu']:
|
|
|
|
mut = m
|
|
|
|
break
|
|
|
|
|
|
|
|
if mut is None:
|
2014-02-25 18:04:32 +00:00
|
|
|
print "Error: No mbed available: mut[%s]" % data['mcu']
|
2014-02-24 10:49:22 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
disk = mut['disk']
|
|
|
|
port = mut['port']
|
2014-02-27 17:00:21 +00:00
|
|
|
target_by_mcu = TARGET_MAP[mut['mcu']]
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Program
|
|
|
|
# When the build and test system were separate, this was relative to a
|
|
|
|
# base network folder base path: join(NETWORK_BASE_PATH, )
|
|
|
|
image_path = image
|
|
|
|
if not exists(image_path):
|
|
|
|
print "Error: Image file does not exist: %s" % image_path
|
|
|
|
elapsed_time = 0
|
|
|
|
test_result = "{error}"
|
|
|
|
return (test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, round(elapsed_time, 2), duration)
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
if not target_by_mcu.is_disk_virtual:
|
2014-02-24 10:49:22 +00:00
|
|
|
delete_dir_files(disk)
|
|
|
|
|
|
|
|
# Program MUT with proper image file
|
|
|
|
copy(image_path, disk)
|
|
|
|
|
|
|
|
# Copy Extra Files
|
2014-02-27 17:00:21 +00:00
|
|
|
if not target_by_mcu.is_disk_virtual and test.extra_files:
|
2014-02-24 10:49:22 +00:00
|
|
|
for f in test.extra_files:
|
|
|
|
copy(f, disk)
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
sleep(target_by_mcu.program_cycle_s())
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Host test execution
|
2014-02-27 17:00:21 +00:00
|
|
|
start_host_exec_time = time()
|
2014-02-27 16:47:53 +00:00
|
|
|
test_result = self.run_host_test(target_name, port, duration)
|
2014-02-27 17:00:21 +00:00
|
|
|
elapsed_time = time() - start_host_exec_time
|
2014-02-27 16:47:53 +00:00
|
|
|
print print_test_result(test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, elapsed_time, duration)
|
2014-02-24 10:49:22 +00:00
|
|
|
return (test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, round(elapsed_time, 2), duration)
|
|
|
|
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
def flush_serial(serial):
|
|
|
|
""" Flushing serial in/out. """
|
|
|
|
serial.flushInput()
|
|
|
|
serial.flushOutput()
|
|
|
|
|
|
|
|
|
|
|
|
def reset(mcu_name, serial, verbose=False, sleep_before_reset=0, sleep_after_reset=0):
|
|
|
|
"""
|
|
|
|
Functions resets target using various methods (e.g. serial break)
|
|
|
|
depending on target type.
|
|
|
|
"""
|
|
|
|
if sleep_before_reset > 0:
|
|
|
|
sleep(sleep_before_reset)
|
|
|
|
if verbose:
|
|
|
|
verbose_msg = "Reset::cmd(sendBreak)"
|
|
|
|
# Reset type decision
|
|
|
|
if mcu_name.startswith('NRF51822'): # Nordic
|
|
|
|
call(["nrfjprog", "-r"])
|
|
|
|
verbose_msg = "Reset::cmd(nrfjprog)"
|
|
|
|
elif mcu_name.startswith('NUCLEO'): # ST NUCLEO
|
|
|
|
call(["ST-LINK_CLI.exe", "-Rst"])
|
|
|
|
verbose_msg = "Reset::cmd(ST-LINK_CLI.exe)"
|
|
|
|
else:
|
|
|
|
serial.sendBreak()
|
|
|
|
|
|
|
|
if sleep_before_reset > 0:
|
|
|
|
sleep(sleep_after_reset)
|
|
|
|
if verbose:
|
|
|
|
print verbose_msg
|
|
|
|
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
def is_peripherals_available(target_mcu_name, peripherals=None):
|
2014-02-27 16:47:53 +00:00
|
|
|
""" Checks if specified target should run specific peripheral test case."""
|
|
|
|
if peripherals is not None:
|
|
|
|
peripherals = set(peripherals)
|
|
|
|
for id, mut in MUTs.iteritems():
|
2014-02-27 17:00:21 +00:00
|
|
|
# Target MCU name check
|
|
|
|
if mut["mcu"] != target_mcu_name:
|
2014-02-27 16:47:53 +00:00
|
|
|
continue
|
|
|
|
# Peripherals check
|
|
|
|
if peripherals is not None:
|
|
|
|
if 'peripherals' not in mut:
|
|
|
|
continue
|
|
|
|
if not peripherals.issubset(set(mut['peripherals'])):
|
|
|
|
continue
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def print_test_result(test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, elapsed_time, duration):
|
|
|
|
""" Use specific convention to pront test result and related data."""
|
|
|
|
tokens = []
|
|
|
|
tokens.append("TargetTest")
|
|
|
|
tokens.append(target_name)
|
|
|
|
tokens.append(toolchain_name)
|
|
|
|
tokens.append(test_id)
|
|
|
|
tokens.append(test_description)
|
|
|
|
separator = "::"
|
2014-02-28 11:33:57 +00:00
|
|
|
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
|
2014-02-27 16:47:53 +00:00
|
|
|
result = separator.join(tokens) + " [" + test_result +"]" + time_info
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
def shape_test_request(mcu, image_path, test_id, duration=10):
|
|
|
|
""" Function prepares JOSN structure describing test specification."""
|
|
|
|
test_spec = {
|
|
|
|
"mcu": mcu,
|
|
|
|
"image": image_path,
|
|
|
|
"duration": duration,
|
|
|
|
"test_id": test_id,
|
|
|
|
}
|
|
|
|
return json.dumps(test_spec)
|
|
|
|
|
|
|
|
|
2014-02-27 14:27:58 +00:00
|
|
|
def get_json_data_from_file(json_spec_filename, verbose=False):
|
|
|
|
result = None
|
2014-02-27 14:06:47 +00:00
|
|
|
try:
|
2014-02-27 14:27:58 +00:00
|
|
|
with open(json_spec_filename) as data_file:
|
2014-02-27 14:06:47 +00:00
|
|
|
try:
|
2014-02-27 14:27:58 +00:00
|
|
|
result = json.load(data_file)
|
2014-02-27 14:06:47 +00:00
|
|
|
except ValueError as json_error_msg:
|
2014-02-27 14:27:58 +00:00
|
|
|
result = None
|
2014-02-27 14:06:47 +00:00
|
|
|
print "Error: %s" % (json_error_msg)
|
|
|
|
except IOError as fileopen_error_msg:
|
|
|
|
print "Error: %s" % (fileopen_error_msg)
|
2014-02-27 14:27:58 +00:00
|
|
|
if verbose and result:
|
2014-02-27 14:06:47 +00:00
|
|
|
pp = pprint.PrettyPrinter(indent=4)
|
2014-02-27 14:27:58 +00:00
|
|
|
pp.pprint(result)
|
|
|
|
return result
|
|
|
|
|
2014-02-27 14:06:47 +00:00
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
if __name__ == '__main__':
|
2014-02-27 12:59:57 +00:00
|
|
|
# Command line options
|
|
|
|
parser = optparse.OptionParser()
|
|
|
|
parser.add_option('-i', '--tests',
|
|
|
|
dest='test_spec_filename',
|
|
|
|
metavar="FILE",
|
|
|
|
help='Points to file with test specification')
|
|
|
|
|
2014-02-27 14:27:58 +00:00
|
|
|
parser.add_option('-M', '--MUTS',
|
|
|
|
dest='muts_spec_filename',
|
|
|
|
metavar="FILE",
|
|
|
|
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
parser.add_option('-s', '--suppress-summary',
|
|
|
|
dest='suppress_summary',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Suppresses display of wellformatted table with test results')
|
|
|
|
|
|
|
|
parser.add_option('-v', '--verbose',
|
|
|
|
dest='verbose',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Verbose mode (pronts some extra information)')
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
|
|
|
|
parser.epilog = """Example: singletest.py -i test_spec.json [-M muts_all.json]"""
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
(opts, args) = parser.parse_args()
|
2014-02-24 10:49:22 +00:00
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Open file with test specification
|
2014-02-27 14:27:58 +00:00
|
|
|
# test_spec_filename tells script which targets and their toolchain(s)
|
|
|
|
# should be covered by the test scenario
|
|
|
|
test_spec = get_json_data_from_file(opts.test_spec_filename, opts.verbose) if opts.test_spec_filename else None
|
2014-02-27 12:59:57 +00:00
|
|
|
if test_spec is None:
|
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
# Get extra MUTs if applicable
|
2014-02-27 14:27:58 +00:00
|
|
|
if opts.muts_spec_filename:
|
|
|
|
MUTs = get_json_data_from_file(opts.muts_spec_filename, opts.verbose)
|
|
|
|
if MUTs is None:
|
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
2014-02-27 16:47:53 +00:00
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Magic happens here... ;)
|
|
|
|
start = time()
|
|
|
|
single_test = SingleTestRunner()
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
clean = test_spec.get('clean', False)
|
|
|
|
test_ids = test_spec.get('test_ids', [])
|
|
|
|
groups = test_spec.get('test_groups', [])
|
|
|
|
|
|
|
|
# Here we store test results
|
|
|
|
test_summary = []
|
|
|
|
|
|
|
|
for target, toolchains in test_spec['targets'].iteritems():
|
|
|
|
for toolchain in toolchains:
|
|
|
|
# print '=== %s::%s ===' % (target, toolchain)
|
|
|
|
# Let's build our test
|
|
|
|
T = TARGET_MAP[target]
|
|
|
|
build_mbed_libs(T, toolchain)
|
|
|
|
build_dir = join(BUILD_DIR, "test", target, toolchain)
|
|
|
|
|
|
|
|
for test_id, test in TEST_MAP.iteritems():
|
|
|
|
if test_ids and test_id not in test_ids:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if test.automated and test.is_supported(target, toolchain):
|
2014-02-27 16:47:53 +00:00
|
|
|
if not is_peripherals_available(target, test.peripherals):
|
2014-02-27 12:59:57 +00:00
|
|
|
if opts.verbose:
|
|
|
|
print "TargetTest::%s::TestSkipped(%s)" % (target, ",".join(test.peripherals))
|
2014-02-25 18:04:32 +00:00
|
|
|
continue
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
test_result = {
|
|
|
|
'target': target,
|
|
|
|
'toolchain': toolchain,
|
|
|
|
'test_id': test_id,
|
|
|
|
}
|
|
|
|
|
2014-02-27 14:27:58 +00:00
|
|
|
path = build_project(test.source_dir, join(build_dir, test_id),
|
|
|
|
T, toolchain, test.dependencies,
|
|
|
|
clean=clean, verbose=opts.verbose)
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
test_result_cache = join(dirname(path), "test_result.json")
|
|
|
|
|
|
|
|
# For an automated test the duration act as a timeout after
|
|
|
|
# which the test gets interrupted
|
|
|
|
test_spec = shape_test_request(target, path, test_id, test.duration)
|
|
|
|
single_test_result = single_test.handle(test_spec, target, toolchain)
|
|
|
|
test_summary.append(single_test_result)
|
|
|
|
# print test_spec, target, toolchain
|
|
|
|
|
|
|
|
elapsed_time = time() - start
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Human readable summary
|
|
|
|
if not opts.suppress_summary:
|
|
|
|
print
|
|
|
|
print "Test summary:"
|
|
|
|
# Pretty table package is used to print results
|
|
|
|
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
|
|
|
|
"Elapsed Time (sec)", "Timeout (sec)"])
|
|
|
|
pt.align["Result"] = "l" # Left align
|
|
|
|
pt.align["Target"] = "l" # Left align
|
|
|
|
pt.align["Toolchain"] = "l" # Left align
|
|
|
|
pt.align["Test ID"] = "l" # Left align
|
|
|
|
pt.align["Test Description"] = "l" # Left align
|
|
|
|
pt.padding_width = 1 # One space between column edges and contents (default)
|
|
|
|
|
|
|
|
for test in test_summary:
|
|
|
|
pt.add_row(test)
|
|
|
|
print pt
|
2014-02-24 10:49:22 +00:00
|
|
|
print "Completed in %d sec" % (time() - start)
|