2014-02-24 10:49:22 +00:00
|
|
|
"""
|
|
|
|
mbed SDK
|
|
|
|
Copyright (c) 2011-2013 ARM Limited
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
|
|
|
|
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
|
|
|
|
|
2014-03-12 10:59:19 +00:00
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
Usage: singletest.py [options]
|
|
|
|
|
|
|
|
This script allows you to run mbed defined test cases for particular MCU(s)
|
|
|
|
and corresponding toolchain(s).
|
|
|
|
|
|
|
|
Options:
|
|
|
|
-h, --help show this help message and exit
|
|
|
|
-i FILE, --tests=FILE
|
|
|
|
Points to file with test specification
|
|
|
|
-M FILE, --MUTS=FILE Points to file with MUTs specification (overwrites
|
|
|
|
settings.py and private_settings.py)
|
|
|
|
-g, --goanna-for-tests
|
|
|
|
Run Goanna static analyse tool for tests
|
|
|
|
-G, --goanna-for-sdk Run Goanna static analyse tool for mbed SDK
|
|
|
|
-s, --suppress-summary
|
|
|
|
Suppresses display of wellformatted table with test
|
|
|
|
results
|
|
|
|
-v, --verbose Verbose mode (pronts some extra information)
|
|
|
|
|
|
|
|
Example: singletest.py -i test_spec.json -M muts_all.json
|
|
|
|
|
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
File format example: test_spec.json
|
|
|
|
|
|
|
|
{
|
|
|
|
"targets": {
|
|
|
|
"KL46Z": ["ARM", "GCC_ARM"],
|
|
|
|
"LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
|
|
|
|
"LPC11U24": ["uARM"],
|
|
|
|
"NRF51822": ["ARM"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
File format example: muts_all.json
|
|
|
|
|
|
|
|
{
|
|
|
|
"1" : {"mcu": "LPC1768",
|
|
|
|
"port":"COM4", "disk":"J:\\",
|
|
|
|
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
|
|
|
|
},
|
|
|
|
|
|
|
|
"2" : {"mcu": "KL25Z",
|
|
|
|
"port":"COM7", "disk":"G:\\",
|
|
|
|
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
|
2014-02-24 10:49:22 +00:00
|
|
|
}
|
2014-03-12 10:59:19 +00:00
|
|
|
}
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import json
|
2014-02-27 12:59:57 +00:00
|
|
|
import optparse
|
|
|
|
import pprint
|
2014-02-27 16:47:53 +00:00
|
|
|
import re
|
2014-02-24 10:49:22 +00:00
|
|
|
from prettytable import PrettyTable
|
|
|
|
from serial import Serial
|
|
|
|
|
|
|
|
from os.path import join, abspath, dirname, exists
|
|
|
|
from shutil import copy
|
|
|
|
from subprocess import call
|
|
|
|
from time import sleep, time
|
|
|
|
|
2014-03-13 11:32:55 +00:00
|
|
|
from subprocess import Popen, PIPE
|
|
|
|
from threading import Thread
|
|
|
|
from Queue import Queue, Empty
|
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
ROOT = abspath(join(dirname(__file__), ".."))
|
|
|
|
sys.path.insert(0, ROOT)
|
2014-02-27 12:59:57 +00:00
|
|
|
# Imports related to mbed build pi
|
2014-02-24 10:49:22 +00:00
|
|
|
from workspace_tools.build_api import build_project, build_mbed_libs
|
|
|
|
from workspace_tools.paths import BUILD_DIR
|
2014-03-13 11:32:55 +00:00
|
|
|
from workspace_tools.paths import HOST_TESTS
|
2014-02-24 10:49:22 +00:00
|
|
|
from workspace_tools.targets import TARGET_MAP
|
2014-02-25 18:04:32 +00:00
|
|
|
from workspace_tools.tests import TEST_MAP
|
2014-04-07 10:59:33 +00:00
|
|
|
from workspace_tools.tests import TESTS
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Be sure that the tools directory is in the search path
|
|
|
|
ROOT = abspath(join(dirname(__file__), ".."))
|
|
|
|
sys.path.insert(0, ROOT)
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Imports related to mbed build pi
|
2014-02-24 10:49:22 +00:00
|
|
|
from workspace_tools.utils import delete_dir_files
|
2014-02-25 18:04:32 +00:00
|
|
|
from workspace_tools.settings import MUTs
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
|
2014-03-13 11:32:55 +00:00
|
|
|
class ProcessObserver(Thread):
|
|
|
|
def __init__(self, proc):
|
|
|
|
Thread.__init__(self)
|
|
|
|
self.proc = proc
|
|
|
|
self.queue = Queue()
|
|
|
|
self.daemon = True
|
|
|
|
self.active = True
|
|
|
|
self.start()
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
while self.active:
|
|
|
|
c = self.proc.stdout.read(1)
|
|
|
|
self.queue.put(c)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.active = False
|
|
|
|
try:
|
|
|
|
self.proc.terminate()
|
|
|
|
except Exception, _:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
class SingleTestRunner(object):
|
2014-02-24 10:49:22 +00:00
|
|
|
""" Object wrapper for single test run which may involve multiple MUTs."""
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
re_detect_testcase_result = None
|
2014-03-11 17:55:10 +00:00
|
|
|
TEST_RESULT_OK = "OK"
|
|
|
|
TEST_RESULT_FAIL = "FAIL"
|
|
|
|
TEST_RESULT_ERROR = "ERROR"
|
2014-02-27 16:47:53 +00:00
|
|
|
TEST_RESULT_UNDEF = "UNDEF"
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
# mbed test suite -> SingleTestRunner
|
2014-03-11 17:55:10 +00:00
|
|
|
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
|
|
|
|
"failure" : TEST_RESULT_FAIL,
|
|
|
|
"error" : TEST_RESULT_ERROR,
|
2014-02-27 16:47:53 +00:00
|
|
|
"end" : TEST_RESULT_UNDEF}
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
def __init__(self):
|
2014-02-27 17:00:21 +00:00
|
|
|
pattern = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
|
|
|
|
self.re_detect_testcase_result = re.compile(pattern)
|
2014-02-25 18:04:32 +00:00
|
|
|
|
2014-03-13 11:32:55 +00:00
|
|
|
def run_simple_test(self, target_name, port,
|
|
|
|
duration, verbose=False):
|
2014-02-24 10:49:22 +00:00
|
|
|
"""
|
|
|
|
Functions resets target and grabs by timeouted pooling test log
|
|
|
|
via serial port.
|
|
|
|
Function assumes target is already flashed with proper 'test' binary.
|
|
|
|
"""
|
|
|
|
output = ""
|
|
|
|
# Prepare serial for receiving data from target
|
2014-02-27 17:00:21 +00:00
|
|
|
baud = 9600
|
2014-02-24 10:49:22 +00:00
|
|
|
serial = Serial(port, timeout=1)
|
2014-02-27 17:00:21 +00:00
|
|
|
serial.setBaudrate(baud)
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
# Resetting target and pooling
|
2014-02-27 16:47:53 +00:00
|
|
|
reset(target_name, serial, verbose=verbose)
|
|
|
|
start_serial_timeour = time()
|
2014-02-24 10:49:22 +00:00
|
|
|
try:
|
2014-02-27 16:47:53 +00:00
|
|
|
while (time() - start_serial_timeour) < duration:
|
2014-02-24 10:49:22 +00:00
|
|
|
test_output = serial.read(512)
|
|
|
|
output += test_output
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
if '{end}' in output:
|
|
|
|
break
|
|
|
|
except KeyboardInterrupt, _:
|
|
|
|
print "CTRL+C break"
|
2014-02-27 16:47:53 +00:00
|
|
|
flush_serial(serial)
|
2014-02-24 10:49:22 +00:00
|
|
|
serial.close()
|
|
|
|
|
|
|
|
# Handle verbose mode
|
|
|
|
if verbose:
|
|
|
|
print "Test::Output::Start"
|
|
|
|
print output
|
|
|
|
print "Test::Output::Finish"
|
2014-02-27 17:00:21 +00:00
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
# Parse test 'output' data
|
2014-02-27 17:00:21 +00:00
|
|
|
result = self.TEST_RESULT_UNDEF
|
2014-02-24 10:49:22 +00:00
|
|
|
for line in output.splitlines():
|
2014-02-27 17:00:21 +00:00
|
|
|
search_result = self.re_detect_testcase_result.search(line)
|
|
|
|
if search_result and len(search_result.groups()):
|
|
|
|
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
|
2014-02-27 13:07:12 +00:00
|
|
|
break
|
2014-02-24 10:49:22 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
def handle(self, test_spec, target_name, toolchain_name):
|
|
|
|
"""
|
|
|
|
Function determines MUT's mbed disk/port and copies binary to
|
|
|
|
target. Test is being invoked afterwards.
|
|
|
|
"""
|
|
|
|
data = json.loads(test_spec)
|
|
|
|
# Get test information, image and test timeout
|
|
|
|
test_id = data['test_id']
|
|
|
|
test = TEST_MAP[test_id]
|
|
|
|
test_description = TEST_MAP[test_id].get_description()
|
|
|
|
image = data["image"]
|
|
|
|
duration = data.get("duration", 10)
|
|
|
|
|
|
|
|
# Find a suitable MUT:
|
|
|
|
mut = None
|
|
|
|
for id, m in MUTs.iteritems():
|
|
|
|
if m['mcu'] == data['mcu']:
|
|
|
|
mut = m
|
|
|
|
break
|
|
|
|
|
|
|
|
if mut is None:
|
2014-02-25 18:04:32 +00:00
|
|
|
print "Error: No mbed available: mut[%s]" % data['mcu']
|
2014-02-24 10:49:22 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
disk = mut['disk']
|
|
|
|
port = mut['port']
|
2014-02-27 17:00:21 +00:00
|
|
|
target_by_mcu = TARGET_MAP[mut['mcu']]
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Program
|
|
|
|
# When the build and test system were separate, this was relative to a
|
|
|
|
# base network folder base path: join(NETWORK_BASE_PATH, )
|
|
|
|
image_path = image
|
|
|
|
if not exists(image_path):
|
|
|
|
print "Error: Image file does not exist: %s" % image_path
|
|
|
|
elapsed_time = 0
|
|
|
|
test_result = "{error}"
|
|
|
|
return (test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, round(elapsed_time, 2), duration)
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
if not target_by_mcu.is_disk_virtual:
|
2014-02-24 10:49:22 +00:00
|
|
|
delete_dir_files(disk)
|
|
|
|
|
|
|
|
# Program MUT with proper image file
|
|
|
|
copy(image_path, disk)
|
|
|
|
|
|
|
|
# Copy Extra Files
|
2014-02-27 17:00:21 +00:00
|
|
|
if not target_by_mcu.is_disk_virtual and test.extra_files:
|
2014-02-24 10:49:22 +00:00
|
|
|
for f in test.extra_files:
|
|
|
|
copy(f, disk)
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
sleep(target_by_mcu.program_cycle_s())
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
# Host test execution
|
2014-02-27 17:00:21 +00:00
|
|
|
start_host_exec_time = time()
|
2014-03-13 11:32:55 +00:00
|
|
|
#test_result = self.run_simple_test(target_name, port, duration, verbose=opts.verbose)
|
2014-03-17 11:59:25 +00:00
|
|
|
test_result = self.run_host_test(test.host_test, disk, port, duration, opts.verbose)
|
2014-02-27 17:00:21 +00:00
|
|
|
elapsed_time = time() - start_host_exec_time
|
2014-02-27 16:47:53 +00:00
|
|
|
print print_test_result(test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, elapsed_time, duration)
|
2014-02-24 10:49:22 +00:00
|
|
|
return (test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, round(elapsed_time, 2), duration)
|
|
|
|
|
2014-03-17 11:59:25 +00:00
|
|
|
def run_host_test(self, name, disk, port, duration, verbose=False, extra_serial=""):
|
2014-03-13 11:32:55 +00:00
|
|
|
# print "{%s} port:%s disk:%s" % (name, port, disk),
|
|
|
|
cmd = ["python", "%s.py" % name, '-p', port, '-d', disk, '-t', str(duration), "-e", extra_serial]
|
|
|
|
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
|
|
|
|
obs = ProcessObserver(proc)
|
|
|
|
start = time()
|
|
|
|
line = ''
|
|
|
|
output = []
|
|
|
|
while (time() - start) < duration:
|
|
|
|
try:
|
|
|
|
c = obs.queue.get(block=True, timeout=1)
|
|
|
|
except Empty, _:
|
|
|
|
c = None
|
|
|
|
|
|
|
|
if c:
|
|
|
|
output.append(c)
|
|
|
|
# Give the mbed under test a way to communicate the end of the test
|
|
|
|
if c in ['\n', '\r']:
|
|
|
|
if '{end}' in line: break
|
|
|
|
line = ''
|
|
|
|
else:
|
|
|
|
line += c
|
|
|
|
|
|
|
|
# Stop test process
|
|
|
|
obs.stop()
|
|
|
|
|
2014-03-17 11:59:25 +00:00
|
|
|
# Handle verbose mode
|
|
|
|
if verbose:
|
|
|
|
print "Test::Output::Start"
|
|
|
|
print "".join(output)
|
|
|
|
print "Test::Output::Finish"
|
|
|
|
|
2014-03-13 11:32:55 +00:00
|
|
|
# Parse test 'output' data
|
|
|
|
result = self.TEST_RESULT_UNDEF
|
|
|
|
for line in "".join(output).splitlines():
|
|
|
|
search_result = self.re_detect_testcase_result.search(line)
|
|
|
|
if search_result and len(search_result.groups()):
|
|
|
|
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
|
|
|
|
break
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
def flush_serial(serial):
|
|
|
|
""" Flushing serial in/out. """
|
|
|
|
serial.flushInput()
|
|
|
|
serial.flushOutput()
|
|
|
|
|
|
|
|
|
|
|
|
def reset(mcu_name, serial, verbose=False, sleep_before_reset=0, sleep_after_reset=0):
|
|
|
|
"""
|
|
|
|
Functions resets target using various methods (e.g. serial break)
|
|
|
|
depending on target type.
|
|
|
|
"""
|
|
|
|
if sleep_before_reset > 0:
|
|
|
|
sleep(sleep_before_reset)
|
|
|
|
if verbose:
|
|
|
|
verbose_msg = "Reset::cmd(sendBreak)"
|
2014-04-07 10:59:33 +00:00
|
|
|
|
2014-03-25 16:14:20 +00:00
|
|
|
serial.sendBreak()
|
2014-04-07 10:59:33 +00:00
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
if sleep_before_reset > 0:
|
|
|
|
sleep(sleep_after_reset)
|
|
|
|
if verbose:
|
|
|
|
print verbose_msg
|
|
|
|
|
|
|
|
|
2014-02-27 17:00:21 +00:00
|
|
|
def is_peripherals_available(target_mcu_name, peripherals=None):
|
2014-02-27 16:47:53 +00:00
|
|
|
""" Checks if specified target should run specific peripheral test case."""
|
|
|
|
if peripherals is not None:
|
|
|
|
peripherals = set(peripherals)
|
|
|
|
for id, mut in MUTs.iteritems():
|
2014-02-27 17:00:21 +00:00
|
|
|
# Target MCU name check
|
|
|
|
if mut["mcu"] != target_mcu_name:
|
2014-02-27 16:47:53 +00:00
|
|
|
continue
|
|
|
|
# Peripherals check
|
|
|
|
if peripherals is not None:
|
|
|
|
if 'peripherals' not in mut:
|
|
|
|
continue
|
|
|
|
if not peripherals.issubset(set(mut['peripherals'])):
|
|
|
|
continue
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def print_test_result(test_result, target_name, toolchain_name,
|
|
|
|
test_id, test_description, elapsed_time, duration):
|
2014-03-07 17:31:33 +00:00
|
|
|
""" Use specific convention to print test result and related data."""
|
2014-02-27 16:47:53 +00:00
|
|
|
tokens = []
|
|
|
|
tokens.append("TargetTest")
|
|
|
|
tokens.append(target_name)
|
|
|
|
tokens.append(toolchain_name)
|
|
|
|
tokens.append(test_id)
|
|
|
|
tokens.append(test_description)
|
|
|
|
separator = "::"
|
2014-02-28 11:33:57 +00:00
|
|
|
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
|
2014-02-27 16:47:53 +00:00
|
|
|
result = separator.join(tokens) + " [" + test_result +"]" + time_info
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
def shape_test_request(mcu, image_path, test_id, duration=10):
|
|
|
|
""" Function prepares JOSN structure describing test specification."""
|
|
|
|
test_spec = {
|
|
|
|
"mcu": mcu,
|
|
|
|
"image": image_path,
|
|
|
|
"duration": duration,
|
|
|
|
"test_id": test_id,
|
|
|
|
}
|
|
|
|
return json.dumps(test_spec)
|
|
|
|
|
|
|
|
|
2014-02-27 14:27:58 +00:00
|
|
|
def get_json_data_from_file(json_spec_filename, verbose=False):
|
2014-03-03 12:15:46 +00:00
|
|
|
""" Loads from file JSON formatted string to data structure """
|
2014-02-27 14:27:58 +00:00
|
|
|
result = None
|
2014-02-27 14:06:47 +00:00
|
|
|
try:
|
2014-02-27 14:27:58 +00:00
|
|
|
with open(json_spec_filename) as data_file:
|
2014-02-27 14:06:47 +00:00
|
|
|
try:
|
2014-02-27 14:27:58 +00:00
|
|
|
result = json.load(data_file)
|
2014-02-27 14:06:47 +00:00
|
|
|
except ValueError as json_error_msg:
|
2014-02-27 14:27:58 +00:00
|
|
|
result = None
|
2014-02-27 14:06:47 +00:00
|
|
|
print "Error: %s" % (json_error_msg)
|
|
|
|
except IOError as fileopen_error_msg:
|
|
|
|
print "Error: %s" % (fileopen_error_msg)
|
2014-02-27 14:27:58 +00:00
|
|
|
if verbose and result:
|
2014-02-27 14:06:47 +00:00
|
|
|
pp = pprint.PrettyPrinter(indent=4)
|
2014-02-27 14:27:58 +00:00
|
|
|
pp.pprint(result)
|
|
|
|
return result
|
|
|
|
|
2014-02-27 14:06:47 +00:00
|
|
|
|
2014-04-07 10:59:33 +00:00
|
|
|
def get_result_summary_table():
|
|
|
|
# get all unique test ID prefixes
|
|
|
|
unique_test_id = []
|
|
|
|
for test in TESTS:
|
2014-04-07 17:09:16 +00:00
|
|
|
split = test['id'].split('_')[:-1]
|
|
|
|
test_id_prefix = '_'.join(split)
|
2014-04-07 10:59:33 +00:00
|
|
|
if test_id_prefix not in unique_test_id:
|
|
|
|
unique_test_id.append(test_id_prefix)
|
|
|
|
unique_test_id.sort()
|
|
|
|
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
|
|
|
|
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
|
|
|
|
|
|
|
|
test_properties = ['id', 'automated', 'description', 'peripherals', 'host_test', 'duration']
|
2014-04-16 10:06:19 +00:00
|
|
|
|
|
|
|
# All tests status table print
|
2014-04-07 10:59:33 +00:00
|
|
|
pt = PrettyTable(test_properties)
|
|
|
|
for col in test_properties:
|
2014-04-16 10:06:19 +00:00
|
|
|
pt.align[col] = "l"
|
|
|
|
pt.align['duration'] = "r"
|
2014-04-07 10:59:33 +00:00
|
|
|
|
|
|
|
counter_all = 0
|
|
|
|
counter_automated = 0
|
|
|
|
|
|
|
|
pt.padding_width = 1 # One space between column edges and contents (default)
|
|
|
|
for test in TESTS:
|
|
|
|
row = []
|
2014-04-07 17:09:16 +00:00
|
|
|
split = test['id'].split('_')[:-1]
|
|
|
|
test_id_prefix = '_'.join(split)
|
2014-04-07 10:59:33 +00:00
|
|
|
|
|
|
|
for col in test_properties:
|
|
|
|
row.append(test[col] if col in test else "")
|
|
|
|
if 'automated' in test and test['automated'] == True:
|
|
|
|
counter_dict_test_id_types[test_id_prefix] += 1
|
|
|
|
counter_automated += 1
|
|
|
|
pt.add_row(row)
|
|
|
|
# Update counters
|
|
|
|
counter_all += 1
|
|
|
|
counter_dict_test_id_types_all[test_id_prefix] += 1
|
|
|
|
print pt
|
|
|
|
print
|
2014-04-16 10:06:19 +00:00
|
|
|
|
|
|
|
# Automation result summary
|
|
|
|
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
|
|
|
|
pt = PrettyTable(test_id_cols)
|
|
|
|
pt.align['automated'] = "r"
|
|
|
|
pt.align['all'] = "r"
|
|
|
|
pt.align['percent [%]'] = "r"
|
|
|
|
|
|
|
|
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
|
|
|
|
str_progress = progress_bar(percent_progress, 75)
|
|
|
|
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
|
|
|
|
print "Automation coverage:"
|
|
|
|
print pt
|
|
|
|
print
|
|
|
|
|
|
|
|
# Test automation coverage table print
|
2014-04-07 10:59:33 +00:00
|
|
|
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
|
|
|
|
pt = PrettyTable(test_id_cols)
|
2014-04-16 10:06:19 +00:00
|
|
|
pt.align['id'] = "l"
|
|
|
|
pt.align['automated'] = "r"
|
|
|
|
pt.align['all'] = "r"
|
|
|
|
pt.align['percent [%]'] = "r"
|
2014-04-07 10:59:33 +00:00
|
|
|
for unique_id in unique_test_id:
|
|
|
|
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
|
2014-04-16 10:06:19 +00:00
|
|
|
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
|
2014-04-09 12:00:45 +00:00
|
|
|
str_progress = progress_bar(percent_progress, 75)
|
2014-04-07 10:59:33 +00:00
|
|
|
row = [unique_id,
|
|
|
|
counter_dict_test_id_types[unique_id],
|
|
|
|
counter_dict_test_id_types_all[unique_id],
|
|
|
|
percent_progress,
|
|
|
|
"[" + str_progress + "]"]
|
|
|
|
pt.add_row(row)
|
2014-04-16 10:06:19 +00:00
|
|
|
print "Test automation coverage:"
|
2014-04-07 10:59:33 +00:00
|
|
|
print pt
|
2014-04-16 10:06:19 +00:00
|
|
|
print
|
2014-04-07 10:59:33 +00:00
|
|
|
|
|
|
|
|
2014-04-09 12:00:45 +00:00
|
|
|
def progress_bar(percent_progress, saturation=0):
|
|
|
|
""" This function creates progress bar with optional simple saturation mark"""
|
|
|
|
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
|
|
|
|
str_progress = '#' * step + '.' * int(50 - step)
|
|
|
|
c = '!' if str_progress[38] == '.' else '|'
|
|
|
|
if (saturation > 0):
|
|
|
|
saturation = saturation / 2
|
|
|
|
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
|
|
|
|
return str_progress
|
|
|
|
|
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
if __name__ == '__main__':
|
2014-02-27 12:59:57 +00:00
|
|
|
# Command line options
|
|
|
|
parser = optparse.OptionParser()
|
|
|
|
parser.add_option('-i', '--tests',
|
|
|
|
dest='test_spec_filename',
|
|
|
|
metavar="FILE",
|
|
|
|
help='Points to file with test specification')
|
|
|
|
|
2014-02-27 14:27:58 +00:00
|
|
|
parser.add_option('-M', '--MUTS',
|
|
|
|
dest='muts_spec_filename',
|
|
|
|
metavar="FILE",
|
|
|
|
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
|
|
|
|
|
2014-03-10 17:14:26 +00:00
|
|
|
parser.add_option('-g', '--goanna-for-tests',
|
|
|
|
dest='goanna_for_tests',
|
|
|
|
metavar=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Run Goanna static analyse tool for tests')
|
|
|
|
|
2014-03-10 17:24:27 +00:00
|
|
|
parser.add_option('-G', '--goanna-for-sdk',
|
|
|
|
dest='goanna_for_mbed_sdk',
|
|
|
|
metavar=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Run Goanna static analyse tool for mbed SDK')
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
parser.add_option('-s', '--suppress-summary',
|
|
|
|
dest='suppress_summary',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Suppresses display of wellformatted table with test results')
|
|
|
|
|
2014-04-07 10:59:33 +00:00
|
|
|
parser.add_option('-r', '--test-automation-report',
|
|
|
|
dest='test_automation_report',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Prints information about all tests and exits')
|
|
|
|
|
2014-04-08 15:01:00 +00:00
|
|
|
parser.add_option('-P', '--only-peripheral',
|
|
|
|
dest='test_only_peripheral',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Test only peripheral declared for MUT and skip common tests')
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
parser.add_option('-v', '--verbose',
|
|
|
|
dest='verbose',
|
|
|
|
default=False,
|
|
|
|
action="store_true",
|
|
|
|
help='Verbose mode (pronts some extra information)')
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
|
2014-04-09 12:00:45 +00:00
|
|
|
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
|
2014-02-27 16:47:53 +00:00
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
(opts, args) = parser.parse_args()
|
2014-02-24 10:49:22 +00:00
|
|
|
|
2014-04-07 10:59:33 +00:00
|
|
|
# Print summary / information about automation test status
|
|
|
|
if opts.test_automation_report:
|
|
|
|
get_result_summary_table()
|
|
|
|
exit(0)
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Open file with test specification
|
2014-02-27 14:27:58 +00:00
|
|
|
# test_spec_filename tells script which targets and their toolchain(s)
|
|
|
|
# should be covered by the test scenario
|
|
|
|
test_spec = get_json_data_from_file(opts.test_spec_filename, opts.verbose) if opts.test_spec_filename else None
|
2014-02-27 12:59:57 +00:00
|
|
|
if test_spec is None:
|
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
|
|
|
|
2014-02-27 16:47:53 +00:00
|
|
|
# Get extra MUTs if applicable
|
2014-02-27 14:27:58 +00:00
|
|
|
if opts.muts_spec_filename:
|
|
|
|
MUTs = get_json_data_from_file(opts.muts_spec_filename, opts.verbose)
|
|
|
|
if MUTs is None:
|
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
2014-02-27 16:47:53 +00:00
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Magic happens here... ;)
|
|
|
|
start = time()
|
|
|
|
single_test = SingleTestRunner()
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
clean = test_spec.get('clean', False)
|
|
|
|
test_ids = test_spec.get('test_ids', [])
|
|
|
|
groups = test_spec.get('test_groups', [])
|
|
|
|
|
|
|
|
# Here we store test results
|
|
|
|
test_summary = []
|
|
|
|
|
|
|
|
for target, toolchains in test_spec['targets'].iteritems():
|
|
|
|
for toolchain in toolchains:
|
|
|
|
# print '=== %s::%s ===' % (target, toolchain)
|
|
|
|
# Let's build our test
|
|
|
|
T = TARGET_MAP[target]
|
2014-03-10 17:24:27 +00:00
|
|
|
build_mbed_libs_options = ["analyze"] if opts.goanna_for_mbed_sdk else None
|
|
|
|
build_mbed_libs(T, toolchain, options=build_mbed_libs_options)
|
2014-02-24 10:49:22 +00:00
|
|
|
build_dir = join(BUILD_DIR, "test", target, toolchain)
|
|
|
|
|
|
|
|
for test_id, test in TEST_MAP.iteritems():
|
|
|
|
if test_ids and test_id not in test_ids:
|
|
|
|
continue
|
|
|
|
|
2014-04-08 15:01:00 +00:00
|
|
|
if opts.test_only_peripheral and not test.peripherals:
|
|
|
|
if opts.verbose:
|
2014-04-09 14:30:26 +00:00
|
|
|
print "TargetTest::%s::NotPeripheralTestSkipped()" % (target)
|
2014-04-08 15:01:00 +00:00
|
|
|
continue
|
|
|
|
|
2014-02-24 10:49:22 +00:00
|
|
|
if test.automated and test.is_supported(target, toolchain):
|
2014-02-27 16:47:53 +00:00
|
|
|
if not is_peripherals_available(target, test.peripherals):
|
2014-02-27 12:59:57 +00:00
|
|
|
if opts.verbose:
|
|
|
|
print "TargetTest::%s::TestSkipped(%s)" % (target, ",".join(test.peripherals))
|
2014-02-25 18:04:32 +00:00
|
|
|
continue
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
test_result = {
|
|
|
|
'target': target,
|
|
|
|
'toolchain': toolchain,
|
|
|
|
'test_id': test_id,
|
|
|
|
}
|
|
|
|
|
2014-03-10 17:14:26 +00:00
|
|
|
build_project_options = ["analyze"] if opts.goanna_for_tests else None
|
2014-02-27 14:27:58 +00:00
|
|
|
path = build_project(test.source_dir, join(build_dir, test_id),
|
2014-03-10 17:14:26 +00:00
|
|
|
T, toolchain, test.dependencies, options=build_project_options,
|
2014-02-27 14:27:58 +00:00
|
|
|
clean=clean, verbose=opts.verbose)
|
2014-02-24 10:49:22 +00:00
|
|
|
|
|
|
|
test_result_cache = join(dirname(path), "test_result.json")
|
|
|
|
|
|
|
|
# For an automated test the duration act as a timeout after
|
|
|
|
# which the test gets interrupted
|
|
|
|
test_spec = shape_test_request(target, path, test_id, test.duration)
|
|
|
|
single_test_result = single_test.handle(test_spec, target, toolchain)
|
|
|
|
test_summary.append(single_test_result)
|
|
|
|
# print test_spec, target, toolchain
|
|
|
|
|
|
|
|
elapsed_time = time() - start
|
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
# Human readable summary
|
|
|
|
if not opts.suppress_summary:
|
2014-03-11 17:55:10 +00:00
|
|
|
result_dict = { single_test.TEST_RESULT_OK : 0,
|
|
|
|
single_test.TEST_RESULT_FAIL : 0,
|
|
|
|
single_test.TEST_RESULT_ERROR : 0,
|
|
|
|
single_test.TEST_RESULT_UNDEF : 0 }
|
2014-03-13 11:32:55 +00:00
|
|
|
|
2014-02-27 12:59:57 +00:00
|
|
|
print
|
|
|
|
print "Test summary:"
|
|
|
|
# Pretty table package is used to print results
|
|
|
|
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
|
|
|
|
"Elapsed Time (sec)", "Timeout (sec)"])
|
|
|
|
pt.align["Result"] = "l" # Left align
|
|
|
|
pt.align["Target"] = "l" # Left align
|
|
|
|
pt.align["Toolchain"] = "l" # Left align
|
|
|
|
pt.align["Test ID"] = "l" # Left align
|
|
|
|
pt.align["Test Description"] = "l" # Left align
|
|
|
|
pt.padding_width = 1 # One space between column edges and contents (default)
|
|
|
|
|
|
|
|
for test in test_summary:
|
2014-03-11 17:55:10 +00:00
|
|
|
if test[0] in result_dict:
|
|
|
|
result_dict[test[0]] += 1
|
2014-02-27 12:59:57 +00:00
|
|
|
pt.add_row(test)
|
|
|
|
print pt
|
2014-03-13 11:32:55 +00:00
|
|
|
|
2014-03-11 17:55:10 +00:00
|
|
|
# Print result count
|
|
|
|
print "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
|
|
|
|
#print result_dict
|
2014-02-24 10:49:22 +00:00
|
|
|
print "Completed in %d sec" % (time() - start)
|