Merge pull request #3702 from bridadan/remove_old_directories

Remove old tool directories
pull/3734/head
Sam Grove 2017-02-09 09:39:25 -06:00 committed by GitHub
commit 942cf8cbf0
2 changed files with 0 additions and 704 deletions

View File

@ -1,406 +0,0 @@
# -*- python -*-
# ex: set syntax=python:
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
# The 'slaves' list defines the set of recognized buildslaves. Each element is
# a BuildSlave object, specifying a unique slave name and password. The same
# slave name and password must be configured on the slave.
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("example-slave", "pass"),
BuildSlave("example-slave-2", "pass"),
BuildSlave("example-slave-KL25Z", "pass"),
BuildSlave("example-slave-LPC1768", "pass"),
BuildSlave("example-slave-LPC11U24", "pass"),
]
# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
# This must match the value configured into the buildslaves (with their
# --master option)
c['slavePortnum'] = 9989
####### OFFICIAL_MBED_LIBRARY_BUILD
OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC1768', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('KL05Z', ('ARM', 'uARM', 'GCC_ARM')),
('KL25Z', ('ARM', 'GCC_ARM')),
('LPC11U24', ('ARM', 'uARM')),
('KL46Z', ('ARM', 'GCC_ARM')),
('LPC4088', ('ARM', 'GCC_ARM', 'GCC_CR')),
('LPC1347', ('ARM',)),
('LPC1549', ('uARM',)),
('LPC2368', ('ARM',)),
('LPC812', ('uARM',)),
('LPC11U35_401', ('ARM', 'uARM')),
('LPC1114', ('uARM',)),
('NUCLEO_F103RB', ('ARM', 'uARM')),
('NUCLEO_L152RE', ('ARM', 'uARM')),
('NUCLEO_F401RE', ('ARM', 'uARM')),
('NUCLEO_F030R8', ('ARM', 'uARM')),
('UBLOX_C027', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
# ('NRF51822', ('ARM',)),
)
# Which hardware platforms are supported for target testing
OFFICIAL_MBED_TESTBED_SUPPORTED_HARDWARE = (
# 'KL25Z',
# 'LPC1768',
# 'LPC11U24',
)
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Here we point to the buildbot clone of pyflakes.
from buildbot.changes.gitpoller import GitPoller
c['change_source'] = []
"""
c['change_source'].append(GitPoller(
'git://github.com/buildbot/pyflakes.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
"""
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.changes import filter
c['schedulers'] = []
# Create builders to generate one target using all assigned toolchains
release_builder_name = "BuildRelease"
builder_names = [release_builder_name]
for target_name, toolchains in OFFICIAL_MBED_LIBRARY_BUILD:
builder_name = "All_TC_%s" % target_name
builder_names.append(builder_name)
c['schedulers'].append(ForceScheduler(name="force", builderNames=builder_names))
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
from buildbot.process.factory import BuildFactory
from buildbot.steps.source.git import Git
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LogLineObserver
import buildbot.status.results
import re
import pprint
class TestCommand(ShellCommand):
failedTestsCount = 0 # FAIL
passedTestsCount = 0 # OK
errorsTestsCount = 0 # ERROR
undefsTestsCount = 0 # UNDEF
testsResults = []
def __init__(self, stage=None,module=None, moduleset=None, **kwargs):
ShellCommand.__init__(self, **kwargs)
self.failedTestsCount = 0
self.passedTestsCount = 0
self.errorsTestsCount = 0
self.tracebackPyCount = 0
self.testsResults = []
testFailuresObserver = UnitTestsObserver ()
self.addLogObserver('stdio', testFailuresObserver)
def createSummary(self, log):
if self.failedTestsCount >= 0 or self.passedTestsCount >= 0 or self.errorsTestsCount >= 0 or self.undefsTestsCount >= 0:
self.addHTMLLog ('tests summary', self.createTestsSummary())
def getText(self, cmd, results):
text = ShellCommand.getText(self, cmd, results)
text.append("OK: " + str(self.passedTestsCount))
text.append("FAIL: " + str(self.failedTestsCount))
text.append("ERROR: " + str(self.errorsTestsCount))
text.append("UNDEF: " + str(self.undefsTestsCount))
text.append("Traceback: " + str(self.tracebackPyCount))
return text
def evaluateCommand(self, cmd):
if self.failedTestsCount > 0:
return buildbot.status.results.WARNINGS
elif self.errorsTestsCount > 0 or self.undefsTestsCount > 0 or self.tracebackPyCount > 0:
return buildbot.status.results.FAILURE
return buildbot.status.results.SUCCESS
def find_unique_tc_result_value(self, index):
""" Get unique values from each row in data parameter """
result = []
for tc_result_list in self.testsResults:
if tc_result_list[index] not in result:
result.append(tc_result_list[index])
return result
def html_view_test_result(self, targets, tests, toolchain):
""" Generates simple result table """
COLOR_OK = "LimeGreen"
COLOR_FAIL = "LightCoral"
COLOR_UNDEF = "LightSlateGray"
COLOR_NEUTRAL = "Silver"
STATUS_COLORS = { "OK" : COLOR_OK,
"FAIL" : COLOR_FAIL,
"UNDEF" : COLOR_UNDEF}
result = "<table>"
result += "<tr valign='center'><td align='center'><b>" + toolchain + "</b></td>"
for test in tests:
result += "<td align='center'>" + test + "<br></td>"
result += "</tr>"
for target in targets:
result += "<tr><td width='110px'><br>" + target + "<br></td>"
for test in tests:
for tc_result_list in self.testsResults:
if tc_result_list[1] == target and tc_result_list[2] == toolchain and tc_result_list[3] == test:
status = tc_result_list[4]
bgcolor = STATUS_COLORS[status]
result += "<td align='center' bgcolor='" + bgcolor + "'>" + status + "</td>"
break;
else:
result += "<td bgcolor='" + COLOR_NEUTRAL + "'></td>"
result += "</tr>"
result += "</table>"
return result
def createTestsSummary (self):
targets = self.find_unique_tc_result_value(1)
toolchains = self.find_unique_tc_result_value(2)
tests = self.find_unique_tc_result_value(3)
html_result = ""
for toolchain in toolchains:
html_result += self.html_view_test_result(targets, tests, toolchain)
html_result += "<br>"
return html_result
class UnitTestsObserver(LogLineObserver):
reGroupTestResult = []
reGroupPyResult = []
def __init__(self):
LogLineObserver.__init__(self)
if len(self.reGroupTestResult) == 0:
self.reGroupTestResult.append(re.compile("^(\w+Test)::(\w+)::(\w+)::(\w+)::.* \[(\w+)\] in (\d+\.\d+) of (\d+) sec[\r\n]*$"))
def outLineReceived(self, line):
matched = False
for r in self.reGroupTestResult:
result = r.match(line)
if result:
self.step.testsResults.append(result.groups())
if result.group(5) == 'OK':
self.step.passedTestsCount += 1
elif result.group(5) == 'FAIL':
self.step.failedTestsCount += 1
elif result.group(5) == 'UNDEF':
self.step.undefsTestsCount += 1
elif result.group(5) == 'ERROR':
self.step.errorsTestsCount += 1
matched = True
class BuildCommand(ShellCommand):
warningsCount = 0 # [Warning]
errorsCount = 0 # [Error]
testsResults = []
def __init__(self, stage=None,module=None, moduleset=None, **kwargs):
ShellCommand.__init__(self, **kwargs)
self.warningsCount = 0
self.errorsCount = 0
self.testsResults = []
buildProcessObserver = BuildObserver ()
self.addLogObserver('stdio', buildProcessObserver)
def createSummary(self, log):
if self.warningsCount >= 0 or self.errorsCount >= 0:
self.addHTMLLog ('tests summary', self.createTestsSummary())
def getText(self, cmd, results):
text = ShellCommand.getText(self, cmd, results)
if self.warningsCount > 0 or self.errorsCount > 0:
text.append("warnings: " + str(self.warningsCount))
text.append("errors: " + str(self.errorsCount))
return text
def evaluateCommand(self, cmd):
if self.warningsCount > 0:
return buildbot.status.results.WARNINGS
elif self.errorsCount > 0:
return buildbot.status.results.FAILURE
else:
return buildbot.status.results.SUCCESS
def createTestsSummary (self):
# Create a string with your html report and return it
html = "<h4>Report</h4><table>"
#for result in self.testsResults:
html += "</table>"
return html
class BuildObserver(LogLineObserver):
regroupresult = []
def __init__(self):
LogLineObserver.__init__(self)
if len(self.regroupresult) == 0:
self.regroupresult.append(re.compile("^\[([Ww]arning)\] (.*)"))
self.regroupresult.append(re.compile("^\[([Ee]rror)\] (.*)"))
def outLineReceived(self, line):
matched = False
for r in self.regroupresult:
result = r.match(line)
if result:
self.step.testsResults.append(result.groups())
if result.group(1) == 'Warning':
self.step.warningsCount += 1
elif result.group(1) == 'Error':
self.step.errorsCount += 1
matched = True
#if not matched:
# [Future-Dev] Other check...
####### BUILDERS - mbed project
git_clone = Git(repourl='https://github.com/mbedmicro/mbed.git', mode='incremental')
# create the build factory for mbed and add the steps to it
from buildbot.config import BuilderConfig
c['builders'] = []
copy_mbed_settings = ShellCommand(name = "copy mbed_settings.py",
command = "cp ../mbed_settings.py mbed_settings.py",
haltOnFailure = True,
description = "Copy mbed_settings.py")
mbed_build_release = BuildFactory()
mbed_build_release.addStep(git_clone)
mbed_build_release.addStep(copy_mbed_settings)
for target_name, toolchains in OFFICIAL_MBED_LIBRARY_BUILD:
builder_name = "All_TC_%s" % target_name
mbed_build = BuildFactory()
mbed_build.addStep(git_clone)
mbed_build.addStep(copy_mbed_settings)
# Adding all chains for target
for toolchain in toolchains:
build_py = BuildCommand(name = "Build %s using %s" % (target_name, toolchain),
command = "python tools/build.py -m %s -t %s" % (target_name, toolchain),
haltOnFailure = True,
warnOnWarnings = True,
description = "Building %s using %s" % (target_name, toolchain),
descriptionDone = "Built %s using %s" % (target_name, toolchain))
mbed_build.addStep(build_py)
mbed_build_release.addStep(build_py) # For build release we need all toolchains
if target_name in OFFICIAL_MBED_TESTBED_SUPPORTED_HARDWARE:
copy_example_test_spec_json = ShellCommand(name = "Copy example_test_spec.json",
command = "cp ../example_test_spec.json tools/data/example_test_spec.json",
haltOnFailure = True,
description = "Copy example_test_spec.json")
autotest_py = ShellCommand(name = "Running autotest.py for %s" % (target_name),
command = "python tools/autotest.py tools/data/example_test_spec.json",
haltOnFailure = True,
description = "Running autotest.py")
mbed_build.addStep(copy_example_test_spec_json)
mbed_build.addStep(autotest_py)
# Add builder with steps for each toolchain
c['builders'].append(BuilderConfig(name=builder_name,
slavenames=["example-slave-%s" % (target_name)],
factory=mbed_build))
else:
# Add builder with steps for each toolchain
c['builders'].append(BuilderConfig(name=builder_name,
slavenames=["example-slave"],
factory=mbed_build))
# copy_example_test_spec_json = ShellCommand(name = "Copy example_test_spec.json",
# command = "cp ../example_test_spec.json tools/data/example_test_spec.json",
# haltOnFailure = True,
# description = "Copy example_test_spec.json")
singletest_py = TestCommand(name = "Running Target Tests",
command = "python tools/singletest.py -i tools/test_spec.json -M tools/muts_all.json",
haltOnFailure = True,
warnOnWarnings = True,
description = "Running Target Tests",
descriptionDone = "Target Testing Finished")
mbed_build_release.addStep(singletest_py)
# Release build collects all building toolchains
c['builders'].append(BuilderConfig(name=release_builder_name,
slavenames=["example-slave"],
factory=mbed_build_release))
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
authz_cfg=authz.Authz(
# change any of these to True to enable; see the manual for more
# options
auth=auth.BasicAuth([("pyflakes","pyflakes")]),
gracefulShutdown = False,
forceBuild = 'auth', # use this to test your slave once it is set up
forceAllBuilds = True,
pingBuilder = True,
stopBuild = True,
stopAllBuilds = True,
cancelPendingBuild = True,
)
c['status'].append(html.WebStatus(http_port=8010, authz=authz_cfg, order_console_by_time=True))
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = "Green Tea"
c['titleURL'] = ""
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://localhost:8010/"
####### DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state. You can leave
# this at its default for all but the largest installations.
'db_url' : "sqlite:///state.sqlite",
# 'db_url' : "mysql://buildbot:123456@localhost/buildbot_mbed?max_idle=300",
}

View File

@ -1,298 +0,0 @@
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os import remove, rename
from os.path import join, dirname, exists, abspath
ROOT = abspath(join(dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
import argparse
import os
from argparse import ArgumentTypeError
import sys
from shutil import rmtree
from collections import namedtuple
from copy import copy
from tools.paths import EXPORT_DIR
from tools.tests import TESTS
from tools.build_api import get_mbed_official_release, RELEASE_VERSIONS
from tools.test_api import find_tests
from tools.project import export
from Queue import Queue
from threading import Thread, Lock
from tools.project_api import print_results, get_exporter_toolchain
from tools.tests import test_name_known, test_known
from tools.export import EXPORTERS
from tools.utils import argparse_force_lowercase_type, \
argparse_many, columnate, args_error, \
argparse_filestring_type
from tools.options import extract_profile
print_lock = Lock()
def do_queue(Class, function, interable) :
q = Queue()
threads = [Class(q, function) for each in range(20)]
for thing in interable :
q.put(thing)
for each in threads :
each.setDaemon(True)
each.start()
q.join()
class Reader (Thread) :
def __init__(self, queue, func) :
Thread.__init__(self)
self.queue = queue
self.func = func
def start(self) :
sys.stdout.flush()
while not self.queue.empty() :
test = self.queue.get()
self.func(test)
self.queue.task_done()
class ExportBuildTest(object):
"""Object to encapsulate logic for progen build testing"""
def __init__(self, tests, parser, options):
"""
Initialize an instance of class ProgenBuildTest
Args:
tests: array of TestCase instances
"""
self.total = len(tests)
self.parser = parser
self.options = options
self.counter = 0
self.successes = []
self.failures = []
self.skips = []
self.tests = [ExportBuildTest.test_case(test) for test in tests]
self.build_queue = Queue()
@staticmethod
def test_case(case):
TestCase = namedtuple('TestCase', case.keys())
return TestCase(**case)
def handle_log(self,log):
try:
with open(log, 'r') as in_log:
print in_log.read()
sys.stdout.flush()
log_name = join(EXPORT_DIR, dirname(log) + "_log.txt")
if exists(log_name):
# delete it if so
remove(log_name)
rename(log, log_name)
except IOError:
pass
def batch_tests(self, clean=False):
"""Performs all exports of self.tests
Peroform_exports will fill self.build_queue.
This function will empty self.build_queue and call the test's
IDE's build function."""
do_queue(Reader, self.perform_exports, self.tests)
self.counter = 0
self.total = self.build_queue.qsize()
while not self.build_queue.empty():
build = self.build_queue.get()
self.counter +=1
exporter = build[0]
test_case = build[1]
self.display_counter("Building test case %s::%s\t%s"
% (test_case.mcu,
test_case.ide,
test_case.name))
cwd = os.getcwd()
os.chdir(exporter.export_dir)
res = EXPORTERS[exporter.NAME.lower()].build(exporter.project_name, cleanup=False)
os.chdir(cwd)
if res:
self.failures.append("%s::%s\t%s" % (test_case.mcu,
test_case.ide,
test_case.name))
else:
self.successes.append("%s::%s\t%s" % (test_case.mcu,
test_case.ide,
test_case.name))
self.handle_log(exporter.generated_files[-1])
if clean:
rmtree(exporter.export_dir)
def display_counter (self, message) :
with print_lock:
sys.stdout.write("{}/{} {}".format(self.counter, self.total,
message) +"\n")
sys.stdout.flush()
def perform_exports(self, test_case):
"""
Generate the project file for test_case and fill self.build_queue
Args:
test_case: object of type TestCase
"""
sys.stdout.flush()
self.counter += 1
name_str = ('%s_%s_%s') % (test_case.mcu, test_case.ide, test_case.name)
self.display_counter("Exporting test case %s::%s\t%s" % (test_case.mcu,
test_case.ide,
test_case.name))
exporter, toolchain = get_exporter_toolchain(test_case.ide)
if test_case.mcu not in exporter.TARGETS:
self.skips.append("%s::%s\t%s" % (test_case.mcu, test_case.ide,
test_case.name))
return
profile = extract_profile(self.parser, self.options, toolchain)
exporter = export(test_case.mcu, test_case.ide,
project_id=test_case.id, zip_proj=None,
src=test_case.src,
export_path=join(EXPORT_DIR, name_str),
silent=True, build_profile=profile)
exporter.generated_files.append(join(EXPORT_DIR,name_str,test_case.log))
self.build_queue.put((exporter,test_case))
# Check if the specified name is in all_os_tests
def check_valid_mbed_os(test):
"""Check if the specified name is in all_os_tests
args:
test: string name to index all_os_tests
returns: tuple of test_name and source location of test,
as given by find_tests"""
all_os_tests = find_tests(ROOT, "K64F", "ARM")
if test in all_os_tests.keys():
return (test, all_os_tests[test])
else:
supported = columnate([t for t in all_os_tests.keys()])
raise ArgumentTypeError("Program with name '{0}' not found. "
"Supported tests are: \n{1}".format(test,supported))
def check_version(version):
"""Check if the specified version is valid
args:
version: integer versio of mbed
returns:
version if it is valid"""
if version not in RELEASE_VERSIONS:
raise ArgumentTypeError("Choose from versions : %s"%", ".join(RELEASE_VERSIONS))
return version
def main():
"""Entry point"""
ide_list = ["iar", "uvision"]
default_v2 = [test_name_known("MBED_BLINKY")]
default_v5 = [check_valid_mbed_os('tests-mbedmicro-rtos-mbed-basic')]
parser = argparse.ArgumentParser(description=
"Test progen builders. Leave any flag off"
" to run with all possible options.")
parser.add_argument("-i",
dest="ides",
default=ide_list,
type=argparse_many(argparse_force_lowercase_type(
ide_list, "toolchain")),
help="The target IDE: %s"% str(ide_list))
parser.add_argument( "-p",
type=argparse_many(test_known),
dest="programs",
help="The index of the desired test program: [0-%d]"
% (len(TESTS) - 1))
parser.add_argument("-n",
type=argparse_many(test_name_known),
dest="programs",
help="The name of the desired test program")
parser.add_argument("-m", "--mcu",
help=("Generate projects for the given MCUs"),
metavar="MCU",
type=argparse_many(str.upper))
parser.add_argument("-os-tests",
type=argparse_many(check_valid_mbed_os),
dest="os_tests",
help="Mbed-os tests")
parser.add_argument("-c", "--clean",
dest="clean",
action="store_true",
help="clean up the exported project files",
default=False)
parser.add_argument("--release",
dest="release",
type=check_version,
help="Which version of mbed to test",
default=RELEASE_VERSIONS[-1])
parser.add_argument("--profile",
dest="profile",
action="append",
type=argparse_filestring_type,
default=[])
options = parser.parse_args()
# targets in chosen release
targetnames = [target[0] for target in
get_mbed_official_release(options.release)]
# all targets in release are default
test_targets = options.mcu or targetnames
if not all([t in targetnames for t in test_targets]):
args_error(parser, "Only specify targets in release %s:\n%s"
%(options.release, columnate(sorted(targetnames))))
v2_tests, v5_tests = [],[]
if options.release == '5':
v5_tests = options.os_tests or default_v5
elif options.release == '2':
v2_tests = options.programs or default_v2
tests = []
default_test = {key:None for key in ['ide', 'mcu', 'name', 'id', 'src', 'log']}
for mcu in test_targets:
for ide in options.ides:
log = "build_log.txt" if ide == 'iar' \
else join('build', 'build_log.txt')
# add each test case to the tests array
default_test.update({'mcu': mcu, 'ide': ide, 'log':log})
for test in v2_tests:
default_test.update({'name':TESTS[test]["id"], 'id':test})
tests.append(copy(default_test))
for test in v5_tests:
default_test.update({'name':test[0],'src':[test[1],ROOT]})
tests.append(copy(default_test))
test = ExportBuildTest(tests, parser, options)
test.batch_tests(clean=options.clean)
print_results(test.successes, test.failures, test.skips)
sys.exit(len(test.failures))
if __name__ == "__main__":
main()