Buildbot configuration preliminary settings. Also added 'singletest.py' runner to perform test in one buildbot slave (non-thread safe)

pull/190/head
Przemek Wirkus 2014-02-24 10:49:22 +00:00
parent b4aaab2958
commit 37e1b85540
2 changed files with 681 additions and 0 deletions

View File

@ -0,0 +1,377 @@
# -*- python -*-
# ex: set syntax=python:
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
# The 'slaves' list defines the set of recognized buildslaves. Each element is
# a BuildSlave object, specifying a unique slave name and password. The same
# slave name and password must be configured on the slave.
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("example-slave", "pass"),
BuildSlave("example-slave-2", "pass"),
BuildSlave("example-slave-KL25Z", "pass"),
BuildSlave("example-slave-LPC1768", "pass"),
BuildSlave("example-slave-LPC11U24", "pass"),
]
# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
# This must match the value configured into the buildslaves (with their
# --master option)
c['slavePortnum'] = 9989
####### OFFICIAL_MBED_LIBRARY_BUILD
OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC1768', ('ARM', 'GCC_ARM', 'GCC_CR', 'GCC_CS', 'IAR')),
('KL05Z', ('ARM', 'uARM', 'GCC_ARM')),
('KL25Z', ('ARM', 'GCC_ARM')),
('LPC11U24', ('ARM', 'uARM')),
('KL46Z', ('ARM', 'GCC_ARM')),
('LPC4088', ('ARM', 'GCC_ARM', 'GCC_CR')),
('LPC1347', ('ARM',)),
('LPC1549', ('uARM',)),
('LPC2368', ('ARM',)),
('LPC812', ('uARM',)),
('LPC11U35_401', ('ARM', 'uARM')),
('LPC1114', ('uARM',)),
('NUCLEO_F103RB', ('ARM', 'uARM')),
('NUCLEO_L152RE', ('ARM', 'uARM')),
('NUCLEO_F401RE', ('ARM', 'uARM')),
('NUCLEO_F030R8', ('ARM', 'uARM')),
('UBLOX_C027', ('ARM', 'GCC_ARM', 'GCC_CR', 'GCC_CS', 'IAR')),
# ('NRF51822', ('ARM',)),
)
# Which hardware platforms are supported for target testing
OFFICIAL_MBED_TESTBED_SUPPORTED_HARDWARE = (
# 'KL25Z',
# 'LPC1768',
# 'LPC11U24',
)
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Here we point to the buildbot clone of pyflakes.
from buildbot.changes.gitpoller import GitPoller
c['change_source'] = []
"""
c['change_source'].append(GitPoller(
'git://github.com/buildbot/pyflakes.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
"""
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.changes import filter
c['schedulers'] = []
# Create builders to generate one target using all assigned toolchains
release_builder_name = "BuildRelease"
builder_names = [release_builder_name]
for target_name, toolchains in OFFICIAL_MBED_LIBRARY_BUILD:
builder_name = "All_TC_%s" % target_name
builder_names.append(builder_name)
c['schedulers'].append(ForceScheduler(name="force", builderNames=builder_names))
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
from buildbot.process.factory import BuildFactory
from buildbot.steps.source.git import Git
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LogLineObserver
import buildbot.status.results
import re
class TestCommand(ShellCommand):
failedTestsCount = 0 # FAIL
passedTestsCount = 0 # OK
errorsTestsCount = 0 # ERROR
undefsTestsCount = 0 # UNDEF
testsResults = []
def __init__(self, stage=None,module=None, moduleset=None, **kwargs):
ShellCommand.__init__(self, **kwargs)
self.failedTestsCount = 0
self.passedTestsCount = 0
self.errorsTestsCount = 0
self.tracebackPyCount = 0
self.testsResults = []
testFailuresObserver = UnitTestsObserver ()
self.addLogObserver('stdio', testFailuresObserver)
def createSummary(self, log):
if self.failedTestsCount >= 0 or self.passedTestsCount >= 0 or self.errorsTestsCount >= 0 or self.undefsTestsCount >= 0:
self.addHTMLLog ('tests summary', self.createTestsSummary())
def getText(self, cmd, results):
text = ShellCommand.getText(self, cmd, results)
text.append("OK: " + str(self.passedTestsCount))
text.append("FAIL: " + str(self.failedTestsCount))
text.append("ERROR: " + str(self.errorsTestsCount))
text.append("UNDEF: " + str(self.undefsTestsCount))
text.append("Traceback: " + str(self.tracebackPyCount))
return text
def evaluateCommand(self, cmd):
if self.failedTestsCount > 0:
return buildbot.status.results.WARNINGS
elif self.errorsTestsCount > 0 or self.undefsTestsCount > 0 or self.tracebackPyCount > 0:
return buildbot.status.results.FAILURE
return buildbot.status.results.SUCCESS
def createTestsSummary (self):
# Create a string with your html report and return it
html = "<h4>Report</h4><table>"
for result in self.testsResults:
if result:
red = '#F78181'
green = '#D0FA58'
orange = '#FAAC58'
bgcolor = green
html += "<tr bgcolor='" + bgcolor + "'>"
for r in result:
html += "<td>" + str(r) + "</td>"
html += "</tr>"
html += "</table>"
return html
class UnitTestsObserver(LogLineObserver):
reGroupTestResult = []
reGroupPyResult = []
def __init__(self):
LogLineObserver.__init__(self)
if len(self.reGroupTestResult) == 0:
# self.reGroupTestResult.append(re.compile("^\{u?'test_id': u?'(\w+)', u?'toolchain': u?'(\w+)', u?'target': u?'(\w+)', u?'result': u?'([\{\}\w]+)'\}[\r\n]*$"))
self.reGroupTestResult.append(re.compile("^(\w+Test)::(\w+)::(\w+)::(\w+)::.* \[(\w+)\] in (\d+) of (\d+) sec[\r\n]*$"))
self.reGroupPyResult.append(re.compile("^Traceback \(most recent call last\):[\r\n]*$"))
def outLineReceived(self, line):
matched = False
for r in self.reGroupTestResult:
result = r.match(line)
if result:
self.step.testsResults.append(result.groups())
if result.group(5) == 'OK':
self.step.passedTestsCount += 1
elif result.group(5) == 'FAIL':
self.step.failedTestsCount += 1
elif result.group(5) == 'UNDEF':
self.step.undefsTestsCount += 1
elif result.group(5) == 'ERROR':
self.step.errorsTestsCount += 1
matched = True
if not matched:
for r in self.reGroupPyResult:
result = r.match(line)
if result:
self.step.tracebackPyCount += 1
matched = True
class BuildCommand(ShellCommand):
warningsCount = 0 # [Warning]
errorsCount = 0 # [Error]
testsResults = []
def __init__(self, stage=None,module=None, moduleset=None, **kwargs):
ShellCommand.__init__(self, **kwargs)
self.warningsCount = 0
self.errorsCount = 0
self.testsResults = []
buildProcessObserver = BuildObserver ()
self.addLogObserver('stdio', buildProcessObserver)
def createSummary(self, log):
if self.warningsCount >= 0 or self.errorsCount >= 0:
self.addHTMLLog ('tests summary', self.createTestsSummary())
def getText(self, cmd, results):
text = ShellCommand.getText(self, cmd, results)
if self.warningsCount > 0 or self.errorsCount > 0:
text.append("warnings: " + str(self.warningsCount))
text.append("errors: " + str(self.errorsCount))
return text
def evaluateCommand(self, cmd):
if self.warningsCount > 0:
return buildbot.status.results.WARNINGS
elif self.errorsCount > 0:
return buildbot.status.results.FAILURE
else:
return buildbot.status.results.SUCCESS
def createTestsSummary (self):
# Create a string with your html report and return it
html = "<h4>Report</h4><table>"
#for result in self.testsResults:
html += "</table>"
return html
class BuildObserver(LogLineObserver):
regroupresult = []
def __init__(self):
LogLineObserver.__init__(self)
if len(self.regroupresult) == 0:
self.regroupresult.append(re.compile("^\[([Ww]arning)\] (.*)"))
self.regroupresult.append(re.compile("^\[([Ee]rror)\] (.*)"))
def outLineReceived(self, line):
matched = False
for r in self.regroupresult:
result = r.match(line)
if result:
self.step.testsResults.append(result.groups())
if result.group(1) == 'Warning':
self.step.warningsCount += 1
elif result.group(1) == 'Error':
self.step.errorsCount += 1
matched = True
#if not matched:
# [Future-Dev] Other check...
####### BUILDERS - mbed project
git_clone = Git(repourl='https://github.com/mbedmicro/mbed.git', mode='incremental')
# create the build factory for mbed and add the steps to it
from buildbot.config import BuilderConfig
c['builders'] = []
copy_private_settings = ShellCommand(name = "copy private_settings.py",
command = "cp ../private_settings.py workspace_tools/private_settings.py",
haltOnFailure = True,
description = "Copy private_settings.py")
mbed_build_release = BuildFactory()
mbed_build_release.addStep(git_clone)
mbed_build_release.addStep(copy_private_settings)
for target_name, toolchains in OFFICIAL_MBED_LIBRARY_BUILD:
builder_name = "All_TC_%s" % target_name
mbed_build = BuildFactory()
mbed_build.addStep(git_clone)
mbed_build.addStep(copy_private_settings)
# Adding all chains for target
for toolchain in toolchains:
build_py = BuildCommand(name = "Build %s using %s" % (target_name, toolchain),
command = "python workspace_tools/build.py -m %s -t %s" % (target_name, toolchain),
haltOnFailure = True,
warnOnWarnings = True,
description = "Building %s using %s" % (target_name, toolchain),
descriptionDone = "Built %s using %s" % (target_name, toolchain))
mbed_build.addStep(build_py)
mbed_build_release.addStep(build_py) # For build release we need all toolchains
if target_name in OFFICIAL_MBED_TESTBED_SUPPORTED_HARDWARE:
copy_example_test_spec_json = ShellCommand(name = "Copy example_test_spec.json",
command = "cp ../example_test_spec.json workspace_tools/data/example_test_spec.json",
haltOnFailure = True,
description = "Copy example_test_spec.json")
autotest_py = ShellCommand(name = "Running autotest.py for %s" % (target_name),
command = "python workspace_tools/autotest.py workspace_tools/data/example_test_spec.json",
haltOnFailure = True,
description = "Running autotest.py")
mbed_build.addStep(copy_example_test_spec_json)
mbed_build.addStep(autotest_py)
# Add builder with steps for each toolchain
c['builders'].append(BuilderConfig(name=builder_name,
slavenames=["example-slave-%s" % (target_name)],
factory=mbed_build))
else:
# Add builder with steps for each toolchain
c['builders'].append(BuilderConfig(name=builder_name,
slavenames=["example-slave"],
factory=mbed_build))
# copy_example_test_spec_json = ShellCommand(name = "Copy example_test_spec.json",
# command = "cp ../example_test_spec.json workspace_tools/data/example_test_spec.json",
# haltOnFailure = True,
# description = "Copy example_test_spec.json")
singletest_py = TestCommand(name = "Running Target Tests",
command = "python workspace_tools/singletest.py",
haltOnFailure = True,
warnOnWarnings = True,
description = "Running Target Tests",
descriptionDone = "Target Testing Finished")
mbed_build_release.addStep(singletest_py)
# Release build collects all building toolchains
c['builders'].append(BuilderConfig(name=release_builder_name,
slavenames=["example-slave"],
factory=mbed_build_release))
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
authz_cfg=authz.Authz(
# change any of these to True to enable; see the manual for more
# options
auth=auth.BasicAuth([("pyflakes","pyflakes")]),
gracefulShutdown = False,
forceBuild = 'auth', # use this to test your slave once it is set up
forceAllBuilds = True,
pingBuilder = True,
stopBuild = True,
stopAllBuilds = True,
cancelPendingBuild = True,
)
c['status'].append(html.WebStatus(http_port=8010, authz=authz_cfg, order_console_by_time=True))
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = "Green Tea"
c['titleURL'] = ""
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://localhost:8010/"
####### DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state. You can leave
# this at its default for all but the largest installations.
'db_url' : "sqlite:///state.sqlite",
}

View File

@ -0,0 +1,304 @@
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
Usage:
1. Update your private_settings.py with all MUTs you can possibly connect.
Make sure mcu / port / serial names are concretely inputed.
2. Update test_spec dictionary in __main__ section.
Example 1:
In below example only LPC11U24 will be tested
and test will be prepared using only uARM toolchain. Note that other
targets are just commented.
Uncomment or add your own targets at will.
test_spec = {
"targets": {
# "KL25Z": ["ARM", "GCC_ARM"],
# "LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
"LPC11U24": ["uARM"]
# "NRF51822": ["ARM"]
# "NUCLEO_F103RB": ["ARM"]
}
}
"""
import sys
import json
from prettytable import PrettyTable
from serial import Serial
from os.path import join, abspath, dirname, exists
from shutil import copy
from subprocess import call
from time import sleep, time
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.build_api import build_project, build_mbed_libs
# from workspace_tools.tests import TEST_MAP, GROUPS
from workspace_tools.paths import BUILD_DIR
from workspace_tools.targets import TARGET_MAP
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.utils import delete_dir_files
from workspace_tools.settings import *
from workspace_tools.tests import *
class SingleTestRunner():
""" Object wrapper for single test run which may involve multiple MUTs."""
def __init__(self):
pass
def reset(self, mcu_name, serial, verbose=False):
"""
Functions resets target using various methods (e.g. serial break)
depending on target type.
"""
verbose_msg = "Reset::cmd(sendBreak)"
if mcu_name.startswith('NRF51822'): # Nordic
call(["nrfjprog", "-r"])
verbose_msg = "Reset::cmd(nrfjprog)"
elif mcu_name.startswith('NUCLEO'): # ST NUCLEO
call(["ST-LINK_CLI.exe", "-Rst"])
verbose_msg = "Reset::cmd(ST-LINK_CLI.exe)"
else:
serial.sendBreak()
if verbose:
print verbose_msg
def flush_serial(self, serial):
""" Flushing serial in/out. """
serial.flushInput()
serial.flushOutput()
def run_host_test(self, name, target_name, disk, port,
duration, extra_serial, verbose=True):
"""
Functions resets target and grabs by timeouted pooling test log
via serial port.
Function assumes target is already flashed with proper 'test' binary.
"""
output = ""
# Prepare serial for receiving data from target
baud = 9600
serial = Serial(port, timeout=1)
serial.setBaudrate(baud)
self.flush_serial(serial)
# Resetting target and pooling
self.reset(target_name, serial, verbose=verbose)
start = time()
try:
while (time() - start) < duration:
test_output = serial.read(512)
output += test_output
self.flush_serial(serial)
if '{end}' in output:
break
except KeyboardInterrupt, _:
print "CTRL+C break"
self.flush_serial(serial)
serial.close()
# Handle verbose mode
if verbose:
print "Test::Output::Start"
print output
print "Test::Output::Finish"
# Parse test 'output' data
result = "UNDEF"
for line in output.splitlines():
if '{success}' in line: result = "OK"
if '{failure}' in line: result = "FAIL"
if '{error}' in line: result = "ERROR"
if '{end}' in line: break
return result
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to pront test result and related data."""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %d of %d sec" % (elapsed_time, duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return result
def handle(self, test_spec, target_name, toolchain_name):
"""
Function determines MUT's mbed disk/port and copies binary to
target. Test is being invoked afterwards.
"""
data = json.loads(test_spec)
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
# Find a suitable MUT:
mut = None
for id, m in MUTs.iteritems():
if m['mcu'] == data['mcu']:
mut = m
break
if mut is None:
print "Error: No mbed available: %s" % data['mcu']
return
disk = mut['disk']
port = mut['port']
extra_serial = mut.get('extra_serial', "")
target = TARGET_MAP[mut['mcu']]
# Program
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
if not exists(image_path):
print "Error: Image file does not exist: %s" % image_path
elapsed_time = 0
test_result = "{error}"
return (test_result, target_name, toolchain_name,
test_id, test_description, round(elapsed_time, 2), duration)
if not target.is_disk_virtual:
delete_dir_files(disk)
# Program MUT with proper image file
copy(image_path, disk)
# Copy Extra Files
if not target.is_disk_virtual and test.extra_files:
for f in test.extra_files:
copy(f, disk)
sleep(target.program_cycle_s())
# Host test execution
start = time()
test_result = self.run_host_test(test.host_test, target_name, disk, port, duration, extra_serial)
elapsed_time = time() - start
print self.print_test_result(test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration)
return (test_result, target_name, toolchain_name,
test_id, test_description, round(elapsed_time, 2), duration)
def shape_test_request(mcu, image_path, test_id, duration=10):
""" Function prepares JOSN structure describing test specification."""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
if __name__ == '__main__':
start = time()
single_test = SingleTestRunner()
# Below list tells script which targets and their toolchain(s)
# should be covered by the test scenario
test_spec = {
"targets": {
# "KL25Z": ["ARM", "GCC_ARM"],
# "LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
"LPC11U24": ["uARM"]
# "NRF51822": ["ARM"]
# "NUCLEO_F103RB": ["ARM"]
}
}
clean = test_spec.get('clean', False)
test_ids = test_spec.get('test_ids', [])
groups = test_spec.get('test_groups', [])
# Here we store test results
test_summary = []
for target, toolchains in test_spec['targets'].iteritems():
for toolchain in toolchains:
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
T = TARGET_MAP[target]
build_mbed_libs(T, toolchain)
build_dir = join(BUILD_DIR, "test", target, toolchain)
for test_id, test in TEST_MAP.iteritems():
if test_ids and test_id not in test_ids:
continue
if test.automated and test.is_supported(target, toolchain):
test_result = {
'target': target,
'toolchain': toolchain,
'test_id': test_id,
}
path = build_project(test.source_dir, join(build_dir, test_id), T, toolchain, test.dependencies, clean=clean, verbose=False)
if target.startswith('NRF51822'): # Nordic:
#Convert bin to Hex and Program nrf chip via jlink
print "NORDIC board"
# call(["nrfjprog.exe", "-e", "--program", path.replace(".bin", ".hex"), "--verify"])
test_result_cache = join(dirname(path), "test_result.json")
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = shape_test_request(target, path, test_id, test.duration)
single_test_result = single_test.handle(test_spec, target, toolchain)
test_summary.append(single_test_result)
# print test_spec, target, toolchain
elapsed_time = time() - start
print
print "Test summary:"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
for test in test_summary:
pt.add_row(test)
print pt
print "Completed in %d sec" % (time() - start)