Merge pull request #5848 from theotherjimmy/compile-py3

Python2+3: mbed compile, mbed test --compile,  python unit tests
pull/6042/head
Cruz Monrreal 2018-02-07 15:48:18 -06:00 committed by GitHub
commit c679dee286
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 543 additions and 445 deletions

View File

@ -10,7 +10,7 @@ env:
--data @- << DATA\n{
"state": "$0",
"description": "$1",
"context": "travis-ci/$NAME",
"context": "travis-ci/$NAME/$(python --version)",
"target_url": "https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID"
}\nDATA'
@ -73,6 +73,10 @@ matrix:
- env:
- NAME=tools
python:
- '2.7'
- '3.5'
- '3.6'
install:
# Install dependencies
- sudo apt-get install gcc-arm-embedded
@ -175,7 +179,7 @@ matrix:
- mkdir BUILD
script:
# Run local mbed 2 testing
- python2 -u tools/build_travis.py --vendor "${NAME#mbed2-}"
- python -u tools/build_travis.py --vendor "${NAME#mbed2-}"
- <<: *mbed-2
env: NAME=mbed2-STM
- <<: *mbed-2
@ -190,3 +194,8 @@ matrix:
env: NAME=mbed2-NUVOTON
- <<: *mbed-2
env: NAME=mbed2-RENESAS
# Change python version here only because 3x the other jobs does not add any more coverage
python:
- '2.7'
- '3.5'
- '3.6'

View File

@ -1,10 +1,16 @@
from urllib2 import urlopen, URLError
try:
from urllib2 import urlopen, URLError
except ImportError:
from urllib.request import urlopen, URLError
from bs4 import BeautifulSoup
from os.path import join, dirname, basename
from os import makedirs
from errno import EEXIST
from threading import Thread
from Queue import Queue
try:
from Queue import Queue
except ImportError:
from queue import Queue
from re import compile, sub
from sys import stderr, stdout
from itertools import takewhile

View File

@ -1,3 +1,4 @@
from __future__ import print_function, division, absolute_import
import argparse
from os.path import basename
from tools.arm_pack_manager import Cache
@ -5,7 +6,7 @@ from os.path import basename, join, dirname, exists
from os import makedirs
from itertools import takewhile
from fuzzywuzzy import process
from tools.arm_pack_manager import Cache
from .arm_pack_manager import Cache
parser = argparse.ArgumentParser(description='A Handy little utility for keeping your cache of pack files up to date.')
subparsers = parser.add_subparsers(title="Commands")
@ -69,7 +70,7 @@ def fuzzy_find(matches, urls) :
for key, value in process.extract(match, urls, limit=None) :
choices.setdefault(key, 0)
choices[key] += value
choices = sorted([(v, k) for k, v in choices.iteritems()], reverse=True)
choices = sorted([(v, k) for k, v in choices.items()], reverse=True)
if not choices : return []
elif len(choices) == 1 : return [choices[0][1]]
elif choices[0][0] > choices[1][0] : choices = choices[:1]
@ -133,12 +134,12 @@ def command_find_part (cache, matches, long=False, intersection=True,
aliases = sum([fuzzy_find([m], cache.aliases.keys()) for m in matches], [])
if print_parts:
for part in choices :
print part
print(part)
if long :
pp.pprint(cache.index[part])
if print_aliases:
for alias in aliases :
print alias
print(alias)
if long :
pp.pprint(cache.index[cache.aliases[alias]])
@ -155,7 +156,7 @@ def command_dump_parts (cache, out, parts, intersection=False) :
else :
for part in parts :
index.update(dict(cache.find_device(part)))
for n, p in index.iteritems() :
for n, p in index.items() :
try :
if not exists(join(out, dirname(p['algorithm']['file']))) :
makedirs(join(out, dirname(p['algorithm']['file'])))

View File

@ -17,6 +17,8 @@ limitations under the License.
LIBRARIES BUILD
"""
from __future__ import print_function, division, absolute_import
import sys
from time import time
from os.path import join, abspath, dirname
@ -130,7 +132,7 @@ if __name__ == '__main__':
# Only prints matrix of supported toolchains
if options.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=options.general_filter_regex)
print(mcu_toolchain_matrix(platform_filter=options.general_filter_regex))
exit(0)
@ -184,7 +186,7 @@ if __name__ == '__main__':
tt_id = "%s::%s" % (toolchain, target)
if toolchain not in TARGET_MAP[target].supported_toolchains:
# Log this later
print "%s skipped: toolchain not supported" % tt_id
print("%s skipped: toolchain not supported" % tt_id)
skipped.append(tt_id)
else:
try:
@ -224,26 +226,24 @@ if __name__ == '__main__':
successes.append(tt_id)
else:
skipped.append(tt_id)
except Exception, e:
except Exception as e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
failures.append(tt_id)
print e
print(e)
# Write summary of the builds
print
print "Completed in: (%.2f)s" % (time() - start)
print
print("\nCompleted in: (%.2f)s\n" % (time() - start))
for report, report_name in [(successes, "Build successes:"),
(skipped, "Build skipped:"),
(failures, "Build failures:"),
]:
if report:
print print_build_results(report, report_name),
print(print_build_results(report, report_name))
if failures:
sys.exit(1)

View File

@ -14,12 +14,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
@ -27,20 +27,22 @@ from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.arm_pack_manager import Cache
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
from .arm_pack_manager import Cache
from .utils import (mkdir, run_cmd, run_cmd_ext, NotSupportedException,
ToolException, InvalidReleaseTargetException,
intelhex_offset)
from .paths import (MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL,
MBED_CONFIG_FILE, MBED_LIBRARIES_DRIVERS,
MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,
BUILD_DIR)
from .targets import TARGET_NAMES, TARGET_MAP
from .libraries import Library
from .toolchains import TOOLCHAIN_CLASSES
from .config import Config
RELEASE_VERSIONS = ['2', '5']
@ -124,7 +126,7 @@ def get_config(src_paths, target, toolchain_name):
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
if not isinstance(src_paths, list):
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
@ -404,7 +406,7 @@ def scan_resources(src_paths, toolchain, dependencies_paths=None,
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
if isinstance(inc_dirs, list):
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
@ -462,7 +464,7 @@ def build_project(src_paths, build_path, target, toolchain_name,
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
if not isinstance(src_paths, list):
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
@ -533,9 +535,9 @@ def build_project(src_paths, build_path, target, toolchain_name,
memap_bars = memap_instance.generate_output('bars',
real_stats_depth, None,
getattr(toolchain.target, 'device_name', None))
print memap_bars
print(memap_bars)
else:
print memap_table
print(memap_table)
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
@ -619,7 +621,7 @@ def build_library(src_paths, build_path, target, toolchain_name,
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
if not isinstance(src_paths, list):
src_paths = [src_paths]
# Build path
@ -788,7 +790,7 @@ def build_lib(lib_id, target, toolchain_name, verbose=False,
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
if not isinstance(src_paths, list):
src_paths = [src_paths]
# The first path will give the name to the library

View File

@ -154,7 +154,7 @@ if __name__ == '__main__':
test_builds = {}
total_build_success = True
for target_name, target_toolchains in build_config.iteritems():
for target_name, target_toolchains in build_config.items():
target = TARGET_MAP[target_name]
for target_toolchain in target_toolchains:

View File

@ -18,6 +18,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import os
import sys
@ -382,11 +383,12 @@ def run_builds(dry_run, vendor):
toolchain_list = build["toolchains"]
if type(toolchain_list) != type([]): toolchain_list = [toolchain_list]
for toolchain in toolchain_list:
cmdline = "python tools/build.py -m %s -t %s -c --silent "% (build["target"], toolchain)
cmdline = ("%s tools/build.py -m %s -t %s -c --silent "%
(sys.executable, build["target"], toolchain))
libs = build.get("libs", [])
if libs:
cmdline = cmdline + " ".join(["--" + l for l in libs])
print "Executing: " + cmdline
print("Executing: %s" % cmdline)
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
@ -408,19 +410,15 @@ def run_test_linking(dry_run, vendor):
for test_lib in tests:
test_names = tests[test_lib]
test_lib_switch = "--" + test_lib if test_lib else ""
cmdline = "python tools/make.py -m %s -t %s -c --silent %s -n %s " % (link["target"], toolchain, test_lib_switch, ",".join(test_names))
print "Executing: " + cmdline
cmdline = ("%s tools/make.py -m %s -t %s -c --silent %s "
"-n %s" % (sys.executable, link["target"],
toolchain, test_lib_switch,
",".join(test_names)))
print("Executing: %s" % cmdline)
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
def run_test_testsuite(dry_run, vendor):
cmdline = "python tools/singletest.py --version"
print "Executing: " + cmdline
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
if __name__ == "__main__":
parser = ArgumentParser()
@ -434,4 +432,3 @@ if __name__ == "__main__":
run_builds("-s" in sys.argv, options.vendor)
run_test_linking("-s" in sys.argv, options.vendor)
run_test_testsuite("-s" in sys.argv, options.vendor)

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
from copy import deepcopy
from six import moves
@ -28,12 +29,16 @@ from intelhex import IntelHex
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
from jsonschema import Draft4Validator, RefResolver
# Implementation of mbed configuration mechanism
from tools.utils import json_file_to_dict, intelhex_offset
from tools.arm_pack_manager import Cache
from tools.targets import CUMULATIVE_ATTRIBUTES, TARGET_MAP, \
generate_py_target, get_resolution_order
from ..utils import json_file_to_dict, intelhex_offset
from ..arm_pack_manager import Cache
from ..targets import (CUMULATIVE_ATTRIBUTES, TARGET_MAP, generate_py_target,
get_resolution_order, Target)
try:
unicode
except NameError:
unicode = str
PATH_OVERRIDES = set(["target.bootloader_img"])
BOOTLOADER_OVERRIDES = set(["target.bootloader_img", "target.restrict_size",
"target.mbed_app_start", "target.mbed_app_size"])
@ -432,15 +437,14 @@ class Config(object):
self.lib_config_data = {}
# Make sure that each config is processed only once
self.processed_configs = {}
if isinstance(tgt, basestring):
if isinstance(tgt, Target):
self.target = tgt
else:
if tgt in TARGET_MAP:
self.target = TARGET_MAP[tgt]
else:
self.target = generate_py_target(
self.app_config_data.get("custom_targets", {}), tgt)
else:
self.target = tgt
self.target = deepcopy(self.target)
self.target_labels = self.target.labels
for override in BOOTLOADER_OVERRIDES:
@ -465,7 +469,7 @@ class Config(object):
continue
full_path = os.path.normpath(os.path.abspath(config_file))
# Check that we didn't already process this file
if self.processed_configs.has_key(full_path):
if full_path in self.processed_configs:
continue
self.processed_configs[full_path] = True
# Read the library configuration and add a "__full_config_path"
@ -496,7 +500,7 @@ class Config(object):
# If there's already a configuration for a module with the same
# name, exit with error
if self.lib_config_data.has_key(cfg["name"]):
if cfg["name"] in self.lib_config_data:
raise ConfigException(
"Library name '%s' is not unique (defined in '%s' and '%s')"
% (cfg["name"], full_path,
@ -662,7 +666,7 @@ class Config(object):
# Check for invalid cumulative overrides in libraries
if (unit_kind == 'library' and
any(attr.startswith('target.extra_labels') for attr
in overrides.iterkeys())):
in overrides.keys())):
raise ConfigException(
"Target override 'target.extra_labels' in " +
ConfigParameter.get_display_name(unit_name, unit_kind,
@ -670,7 +674,7 @@ class Config(object):
" is only allowed at the application level")
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
for attr, cumulatives in self.cumulative_overrides.items():
if 'target.'+attr in overrides:
key = 'target.' + attr
if not isinstance(overrides[key], list):
@ -729,7 +733,7 @@ class Config(object):
unit_kind,
label)))))
for cumulatives in self.cumulative_overrides.itervalues():
for cumulatives in self.cumulative_overrides.values():
cumulatives.update_target(self.target)
return params

View File

@ -15,6 +15,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
import re
@ -67,8 +68,8 @@ def main():
# Only prints matrix of supported toolchains
if options.supported_toolchains:
print mcu_toolchain_matrix(
platform_filter=options.general_filter_regex)
print(mcu_toolchain_matrix(
platform_filter=options.general_filter_regex))
exit(0)
# If auto_detect attribute is present, we assume other auto-detection
@ -81,23 +82,23 @@ def main():
for mut in muts.values():
if re.match(mcu_filter, mut['mcu']):
interface_version = get_interface_version(mut['disk'])
print ""
print "[mbed] Detected %s, port %s, mounted %s, interface version %s:" % \
(mut['mcu'], mut['port'], mut['disk'], interface_version)
print "[mbed] Supported toolchains for %s" % mut['mcu']
print mcu_toolchain_matrix(platform_filter=mut['mcu'])
print("")
print("[mbed] Detected %s, port %s, mounted %s, interface "
"version %s:" %
(mut['mcu'], mut['port'], mut['disk'], interface_version))
print("[mbed] Supported toolchains for %s" % mut['mcu'])
print(mcu_toolchain_matrix(platform_filter=mut['mcu']))
count += 1
if count == 0:
print "[mbed] No mbed targets were detected on your system."
print("[mbed] No mbed targets were detected on your system.")
except KeyboardInterrupt:
print "\n[CTRL+c] exit"
print("\n[CTRL+c] exit")
except Exception as exc:
import traceback
traceback.print_exc(file=sys.stdout)
print "[ERROR] %s" % str(exc)
print("[ERROR] %s" % str(exc))
sys.exit(1)
def get_interface_version(mount_point):

View File

@ -15,6 +15,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import sys
from os.path import join, abspath, dirname, exists
from os.path import basename, relpath, normpath, splitext
@ -22,50 +24,43 @@ from os import makedirs, walk
import copy
from shutil import rmtree, copyfile
import zipfile
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.build_api import prepare_toolchain
from tools.build_api import scan_resources
from tools.toolchains import Resources
from tools.export import lpcxpresso, ds5_5, iar, makefile
from tools.export import embitz, coide, kds, simplicity, atmelstudio, mcuxpresso
from tools.export import sw4stm32, e2studio, zip, cmsis, uvision, cdt, vscode
from tools.export import gnuarmeclipse
from tools.export import qtcreator
from tools.export import cmake
from tools.export import nb
from tools.targets import TARGET_NAMES
from ..build_api import prepare_toolchain, scan_resources
from ..toolchains import Resources
from ..targets import TARGET_NAMES
from . import (lpcxpresso, ds5_5, iar, makefile, embitz, coide, kds, simplicity,
atmelstudio, mcuxpresso, sw4stm32, e2studio, zip, cmsis, uvision,
cdt, vscode, gnuarmeclipse, qtcreator, cmake, nb)
EXPORTERS = {
'uvision5': uvision.Uvision,
'uvision': uvision.Uvision,
'lpcxpresso': lpcxpresso.LPCXpresso,
'gcc_arm': makefile.GccArm,
'make_gcc_arm': makefile.GccArm,
'make_armc5': makefile.Armc5,
'make_armc6': makefile.Armc6,
'make_iar': makefile.IAR,
'ds5_5': ds5_5.DS5_5,
'iar': iar.IAR,
'embitz' : embitz.EmBitz,
'coide' : coide.CoIDE,
'kds' : kds.KDS,
'simplicityv3' : simplicity.SimplicityV3,
'atmelstudio' : atmelstudio.AtmelStudio,
'sw4stm32' : sw4stm32.Sw4STM32,
'e2studio' : e2studio.E2Studio,
'eclipse_gcc_arm' : cdt.EclipseGcc,
'eclipse_iar' : cdt.EclipseIAR,
'eclipse_armc5' : cdt.EclipseArmc5,
'gnuarmeclipse': gnuarmeclipse.GNUARMEclipse,
'netbeans': nb.GNUARMNetbeans,
'mcuxpresso': mcuxpresso.MCUXpresso,
'qtcreator': qtcreator.QtCreator,
'vscode_gcc_arm' : vscode.VSCodeGcc,
'vscode_iar' : vscode.VSCodeIAR,
'vscode_armc5' : vscode.VSCodeArmc5,
'cmake_gcc_arm': cmake.GccArm
u'uvision5': uvision.Uvision,
u'uvision': uvision.Uvision,
u'lpcxpresso': lpcxpresso.LPCXpresso,
u'gcc_arm': makefile.GccArm,
u'make_gcc_arm': makefile.GccArm,
u'make_armc5': makefile.Armc5,
u'make_armc6': makefile.Armc6,
u'make_iar': makefile.IAR,
u'ds5_5': ds5_5.DS5_5,
u'iar': iar.IAR,
u'embitz' : embitz.EmBitz,
u'coide' : coide.CoIDE,
u'kds' : kds.KDS,
u'simplicityv3' : simplicity.SimplicityV3,
u'atmelstudio' : atmelstudio.AtmelStudio,
u'sw4stm32' : sw4stm32.Sw4STM32,
u'e2studio' : e2studio.E2Studio,
u'eclipse_gcc_arm' : cdt.EclipseGcc,
u'eclipse_iar' : cdt.EclipseIAR,
u'eclipse_armc5' : cdt.EclipseArmc5,
u'gnuarmeclipse': gnuarmeclipse.GNUARMEclipse,
u'mcuxpresso': mcuxpresso.MCUXpresso,
u'netbeans': nb.GNUARMNetbeans,
u'qtcreator': qtcreator.QtCreator,
u'vscode_gcc_arm' : vscode.VSCodeGcc,
u'vscode_iar' : vscode.VSCodeIAR,
u'vscode_armc5' : vscode.VSCodeArmc5,
u'cmake_gcc_arm': cmake.GccArm
}
ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN = """
@ -226,7 +221,7 @@ def zip_export(file_name, prefix, resources, project_files, inc_repos):
with zipfile.ZipFile(file_name, "w") as zip_file:
for prj_file in project_files:
zip_file.write(prj_file, join(prefix, basename(prj_file)))
for loc, res in resources.iteritems():
for loc, res in resources.items():
to_zip = (
res.headers + res.s_sources + res.c_sources +\
res.cpp_sources + res.libraries + res.hex_files + \
@ -320,7 +315,7 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
# Call unified scan_resources
resource_dict = {loc: scan_resources(path, toolchain, inc_dirs=inc_dirs, collect_ignores=True)
for loc, path in src_paths.iteritems()}
for loc, path in src_paths.items()}
resources = Resources()
toolchain.build_dir = export_path
config_header = toolchain.get_config_header()
@ -329,12 +324,12 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
if zip_proj:
subtract_basepath(resources, ".")
for loc, res in resource_dict.iteritems():
for loc, res in resource_dict.items():
temp = copy.deepcopy(res)
subtract_basepath(temp, ".", loc)
resources.add(temp)
else:
for _, res in resource_dict.iteritems():
for _, res in resource_dict.items():
resources.add(res)
# Change linker script if specified
@ -347,7 +342,7 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
files.append(config_header)
if zip_proj:
for resource in resource_dict.values():
for label, res in resource.features.iteritems():
for label, res in resource.features.items():
if label not in toolchain.target.features:
resource.add(res)
if isinstance(zip_proj, basestring):

View File

@ -74,13 +74,13 @@ class CoIDE(Exporter):
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in CoIDE.FILE_TYPES.iteritems():
for r_type, n in CoIDE.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
header_files = []
for r_type, n in CoIDE.FILE_TYPES2.iteritems():
for r_type, n in CoIDE.FILE_TYPES2.items():
for file in getattr(self.resources, r_type):
header_files.append({
'name': basename(file), 'type': n, 'path': file

View File

@ -50,7 +50,7 @@ class DS5_5(Exporter):
def generate(self):
source_files = []
for r_type, n in DS5_5.FILE_TYPES.iteritems():
for r_type, n in DS5_5.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file

View File

@ -52,7 +52,7 @@ class EmBitz(Exporter):
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in self.FILE_TYPES.iteritems():
for r_type, n in self.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': file, 'type': n

View File

@ -104,7 +104,7 @@ class Exporter(object):
"""
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
in self.toolchain.flags.items()}
asm_defines = self.toolchain.get_compile_options(
self.toolchain.get_symbols(for_asm=True),
filter(None, self.resources.inc_dirs),

View File

@ -92,7 +92,7 @@ class GNUARMEclipse(Exporter):
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
in self.toolchain.flags.items()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
@ -117,7 +117,7 @@ class GNUARMEclipse(Exporter):
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in toolchain.flags.iteritems()}
in toolchain.flags.items()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])

View File

@ -6,7 +6,7 @@ mbedclean:
$(RM) $(SECONDARY_FLASH)$(SECONDARY_SIZE) {{name}}.* linker-script-*.ld
-@echo ' '
{% for config, data in options.iteritems() %}
{% for config, data in options.items() %}
linker-script-{{config}}.ld: ../{{ld_script}}
{{data.cpp_cmd}} {{data.ld.other}} $< -o $@
{{name}}.elf: linker-script-{{config}}.ld

View File

@ -140,7 +140,7 @@ class Makefile(Exporter):
def format_flags(self):
"""Format toolchain flags for Makefile"""
flags = {}
for k, v in self.flags.iteritems():
for k, v in self.flags.items():
if k in ['asm_flags', 'c_flags', 'cxx_flags']:
flags[k] = map(lambda x: x.replace('"', '\\"'), v)
else:

View File

@ -6,7 +6,7 @@ mbedclean:
$(RM) $(EXECUTABLES) {{name}}.* linker-script-*.ld
-@echo ' '
{% for config, data in options.iteritems() %}
{% for config, data in options.items() %}
linker-script-{{config}}.ld: ../{{ld_script}}
{{data.cpp_cmd}} {{data.ld.other}} $< -o $@
{{name}}.elf: linker-script-{{config}}.ld

View File

@ -3,11 +3,11 @@
PREPROC_CMD ?= {{cpp_cmd}}
ldclean:
{% for config, opts in options.iteritems() %}
{% for config, opts in options.items() %}
$(RM) {{opts['ld']['script']}}
{% endfor %}
{% for config, opts in options.iteritems() %}
{% for config, opts in options.items() %}
{{opts['ld']['script']}}: ../{{ld_script}}
$(PREPROC_CMD) {{opts.ld.other}} $< -o $@

View File

@ -215,7 +215,7 @@ class Uvision(Exporter):
'name': self.project_name,
# project_files => dict of generators - file group to generator of
# UVFile tuples defined above
'project_files': sorted(list(self.format_src(srcs).iteritems()),
'project_files': sorted(list(self.format_src(srcs).items()),
key=lambda (group, _): group.lower()),
'include_paths': '; '.join(self.resources.inc_dirs).encode('utf-8'),
'device': DeviceUvision(self.target),

View File

@ -22,7 +22,10 @@ import struct
import binascii
import argparse
import logging
import StringIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import jinja2
from collections import namedtuple
from itertools import count
@ -124,7 +127,7 @@ class PackFlashAlgo(object):
if fmt == "hex":
blob = binascii.b2a_hex(self.algo_data)
line_list = []
for i in xrange(0, len(blob), group_size):
for i in range(0, len(blob), group_size):
line_list.append('"' + blob[i:i + group_size] + '"')
return ("\n" + padding).join(line_list)
elif fmt == "c":
@ -311,7 +314,7 @@ class ElfFileSimple(ELFFile):
def __init__(self, data):
"""Construct a ElfFileSimple from bytes or a bytearray"""
super(ElfFileSimple, self).__init__(StringIO.StringIO(data))
super(ElfFileSimple, self).__init__(StringIO(data))
self.symbols = self._read_symbol_table()
def _read_symbol_table(self):

View File

@ -30,14 +30,14 @@ def hook_tool(function):
return function(t_self, *args, **kwargs)
_RUNNING_HOOKS[tool] = True
# If this tool isn't hooked, return original function
if not _HOOKS.has_key(tool):
if tool not in _HOOKS:
res = function(t_self, *args, **kwargs)
_RUNNING_HOOKS[tool] = False
return res
tooldesc = _HOOKS[tool]
setattr(t_self, tool_flag, False)
# If there is a replace hook, execute the replacement instead
if tooldesc.has_key("replace"):
if "replace" in tooldesc:
res = tooldesc["replace"](t_self, *args, **kwargs)
# If the replacement has set the "done" flag, exit now
# Otherwise continue as usual
@ -45,12 +45,12 @@ def hook_tool(function):
_RUNNING_HOOKS[tool] = False
return res
# Execute pre-function before main function if specified
if tooldesc.has_key("pre"):
if "pre" in tooldesc:
tooldesc["pre"](t_self, *args, **kwargs)
# Execute the main function now
res = function(t_self, *args, **kwargs)
# Execute post-function after main function if specified
if tooldesc.has_key("post"):
if "post" in tooldesc:
post_res = tooldesc["post"](t_self, *args, **kwargs)
_RUNNING_HOOKS[tool] = False
return post_res or res
@ -173,7 +173,7 @@ class Hook(object):
hook_type - one of the _HOOK_TYPES
cmdline - the initial command line
"""
if self._cmdline_hooks.has_key(hook_type):
if hook_type in self._cmdline_hooks:
cmdline = self._cmdline_hooks[hook_type](
self.toolchain.__class__.__name__, cmdline)
return cmdline

View File

@ -15,25 +15,25 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
from host_registry import HostRegistry
from .host_registry import HostRegistry
# Host test supervisors
from echo import EchoTest
from rtc_auto import RTCTest
from stdio_auto import StdioTest
from hello_auto import HelloTest
from detect_auto import DetectPlatformTest
from default_auto import DefaultAuto
from dev_null_auto import DevNullTest
from wait_us_auto import WaitusTest
from tcpecho_server_auto import TCPEchoServerTest
from udpecho_server_auto import UDPEchoServerTest
from tcpecho_client_auto import TCPEchoClientTest
from udpecho_client_auto import UDPEchoClientTest
from wfi_auto import WFITest
from serial_nc_rx_auto import SerialNCRXTest
from serial_nc_tx_auto import SerialNCTXTest
from serial_complete_auto import SerialCompleteTest
from .echo import EchoTest
from .rtc_auto import RTCTest
from .stdio_auto import StdioTest
from .hello_auto import HelloTest
from .detect_auto import DetectPlatformTest
from .default_auto import DefaultAuto
from .dev_null_auto import DevNullTest
from .wait_us_auto import WaitusTest
from .tcpecho_server_auto import TCPEchoServerTest
from .udpecho_server_auto import UDPEchoServerTest
from .tcpecho_client_auto import TCPEchoClientTest
from .udpecho_client_auto import UDPEchoClientTest
from .wfi_auto import WFITest
from .serial_nc_rx_auto import SerialNCRXTest
from .serial_nc_tx_auto import SerialNCTXTest
from .serial_complete_auto import SerialCompleteTest
# Populate registry with supervising objects
HOSTREGISTRY = HostRegistry()

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from sys import stdout
@ -30,7 +31,7 @@ class DefaultAuto():
return selftest.RESULT_IO_SERIAL
stdout.write(c)
stdout.flush()
except KeyboardInterrupt, _:
except KeyboardInterrupt:
selftest.notify("\r\n[CTRL+C] exit")
result = selftest.RESULT_ERROR
return result

View File

@ -14,26 +14,27 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import host_test_registry
from . import host_test_registry
# This plugins provide 'flashing' methods to host test scripts
import module_copy_mbed
import module_copy_shell
import module_copy_silabs
from . import module_copy_mbed
from . import module_copy_shell
from . import module_copy_silabs
try:
import module_copy_smart
from . import module_copy_smart
except:
pass
#import module_copy_firefox
import module_copy_mps2
from . import module_copy_mps2
# Plugins used to reset certain platform
import module_reset_mbed
import module_reset_silabs
import module_reset_mps2
from . import module_reset_mbed
from . import module_reset_silabs
from . import module_reset_mps2
# Plugin registry instance
@ -77,4 +78,4 @@ def get_plugin_caps(type):
def print_plugin_info():
""" Prints plugins' information in user friendly way
"""
print HOST_TEST_PLUGIN_REGISTRY
print(HOST_TEST_PLUGIN_REGISTRY)

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from os import access, F_OK
from sys import stdout
@ -58,16 +59,13 @@ class HostTestPluginBase:
def print_plugin_error(self, text):
""" Function prints error in console and exits always with False
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
print("Plugin error: %s::%s: %s" % (self.name, self.type, text))
return False
def print_plugin_info(self, text, NL=True):
""" Function prints notification in console and exits always with True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
print("Plugin info: %s::%s: %s"% (self.name, self.type, text))
return True
def print_plugin_char(self, char):

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
class HostTestRegistry:
""" Simple class used to register and store
@ -23,7 +24,7 @@ class HostTestRegistry:
PLUGINS = {} # 'Plugin Name' : Plugin Object
def print_error(self, text):
print "Plugin load failed. Reason: %s"% text
print("Plugin load failed. Reason: %s" % text)
def register_plugin(self, plugin):
""" Registers and stores plugin inside registry for further use.

View File

@ -14,9 +14,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from shutil import copy
from host_test_plugins import HostTestPluginBase
from .host_test_plugins import HostTestPluginBase
from time import sleep
@ -32,7 +33,7 @@ class HostTestPluginCopyMethod_Mbed(HostTestPluginBase):
destination_disk += '/'
try:
copy(image_path, destination_disk)
except Exception, e:
except Exception as e:
self.print_plugin_error("shutil.copy('%s', '%s')"% (image_path, destination_disk))
self.print_plugin_error("Error: %s"% str(e))
result = False

View File

@ -14,12 +14,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import re
import os, shutil
from os.path import join
from host_test_plugins import HostTestPluginBase
from time import sleep
from .host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_MPS2(HostTestPluginBase):

View File

@ -14,12 +14,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
from os.path import join, basename
from host_test_plugins import HostTestPluginBase
from time import sleep
from .host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_Shell(HostTestPluginBase):

View File

@ -14,9 +14,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from host_test_plugins import HostTestPluginBase
from time import sleep
from .host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_Silabs(HostTestPluginBase):

View File

@ -14,8 +14,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from host_test_plugins import HostTestPluginBase
from .host_test_plugins import HostTestPluginBase
class HostTestPluginResetMethod_Mbed(HostTestPluginBase):

View File

@ -14,10 +14,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
from host_test_plugins import HostTestPluginBase
from time import sleep
from .host_test_plugins import HostTestPluginBase
# Note: This plugin is not fully functional, needs improvements

View File

@ -14,8 +14,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from host_test_plugins import HostTestPluginBase
from .host_test_plugins import HostTestPluginBase
class HostTestPluginResetMethod_SiLabs(HostTestPluginBase):

View File

@ -14,17 +14,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import socket
from sys import stdout
from SocketServer import BaseRequestHandler, TCPServer
try:
from SocketServer import BaseRequestHandler, TCPServer
except ImportError:
from socketserver import BaseRequestHandler, TCPServer
class TCPEchoClient_Handler(BaseRequestHandler):
def handle(self):
""" One handle per connection
"""
print "HOST: Connection received...",
print("HOST: Connection received...")
count = 1;
while True:
data = self.request.recv(1024)
@ -32,7 +36,7 @@ class TCPEchoClient_Handler(BaseRequestHandler):
self.request.sendall(data)
if '{{end}}' in str(data):
print
print str(data)
print(str(data))
else:
if not count % 10:
sys.stdout.write('.')
@ -82,6 +86,7 @@ class TCPEchoClientTest():
# Returning none will suppress host test from printing success code
server = TCPServer((SERVER_IP, SERVER_PORT), TCPEchoClient_Handler)
print "HOST: Listening for TCP connections: " + SERVER_IP + ":" + str(SERVER_PORT)
print("HOST: Listening for TCP connections: %s:%s" %
(SERVER_IP, str(SERVER_PORT)))
self.send_server_ip_port(selftest, SERVER_IP, SERVER_PORT)
server.serve_forever()

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import re
import sys
@ -47,18 +48,18 @@ class TCPEchoServerTest():
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.ECHO_SERVER_ADDRESS, self.ECHO_PORT))
except Exception, e:
except Exception as e:
self.s = None
selftest.notify("HOST: Socket error: %s"% e)
return selftest.RESULT_ERROR
print 'HOST: Sending %d echo strings...'% self.ECHO_LOOPs,
print('HOST: Sending %d echo strings...'% self.ECHO_LOOPs,)
for i in range(0, self.ECHO_LOOPs):
TEST_STRING = str(uuid.uuid4())
try:
self.s.sendall(TEST_STRING)
data = self.s.recv(128)
except Exception, e:
except Exception as e:
self.s = None
selftest.notify("HOST: Socket error: %s"% e)
return selftest.RESULT_ERROR
@ -69,10 +70,10 @@ class TCPEchoServerTest():
stdout.flush()
result = True
else:
print "Expected: "
print "'%s'"% TEST_STRING
print "received: "
print "'%s'"% received_str
print("Expected: ")
print("'%s'"% TEST_STRING)
print("received: ")
print("'%s'"% received_str)
result = False
break

View File

@ -14,11 +14,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import socket
from sys import stdout
from SocketServer import BaseRequestHandler, UDPServer
try:
from SocketServer import BaseRequestHandler, UDPServer
except ImportError:
from socketserver import BaseRequestHandler, UDPServer
class UDPEchoClient_Handler(BaseRequestHandler):
def handle(self):
@ -27,8 +31,7 @@ class UDPEchoClient_Handler(BaseRequestHandler):
data, socket = self.request
socket.sendto(data, self.client_address)
if '{{end}}' in data:
print
print data
print("\n%s" % data)
else:
sys.stdout.write('.')
stdout.flush()
@ -72,6 +75,6 @@ class UDPEchoClientTest():
# Returning none will suppress host test from printing success code
server = UDPServer((SERVER_IP, SERVER_PORT), UDPEchoClient_Handler)
print "HOST: Listening for UDP connections..."
print("HOST: Listening for UDP connections...")
self.send_server_ip_port(selftest, SERVER_IP, SERVER_PORT)
server.serve_forever()

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import re
import sys
@ -45,7 +46,7 @@ class UDPEchoServerTest():
# We assume this test fails so can't send 'error' message to server
try:
self.s = socket(AF_INET, SOCK_DGRAM)
except Exception, e:
except Exception as e:
self.s = None
selftest.notify("HOST: Socket error: %s"% e)
return selftest.RESULT_ERROR

View File

@ -18,6 +18,7 @@ limitations under the License.
TEST BUILD & RUN
"""
from __future__ import print_function
import sys
import json
from time import sleep
@ -186,20 +187,20 @@ if __name__ == '__main__':
# Only prints matrix of supported toolchains
if options.supported_toolchains:
if options.supported_toolchains == "matrix":
print mcu_toolchain_matrix(platform_filter=options.general_filter_regex)
print(mcu_toolchain_matrix(platform_filter=options.general_filter_regex))
elif options.supported_toolchains == "toolchains":
toolchain_list = mcu_toolchain_list()
# Only print the lines that matter
for line in toolchain_list.split("\n"):
if not "mbed" in line:
print line
print(line)
elif options.supported_toolchains == "targets":
print mcu_target_list()
print(mcu_target_list())
exit(0)
# Print available tests in order and exit
if options.list_tests is True:
print '\n'.join(map(str, sorted(TEST_MAP.values())))
print('\n'.join(map(str, sorted(TEST_MAP.values()))))
sys.exit()
# force program to "0" if a source dir is specified
@ -245,7 +246,7 @@ if __name__ == '__main__':
search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
args_error(parser, "Could not find executable for %s.\n"
"Currently set search path: %s"
%(toolchain,search_path))
%(toolchain, search_path))
# Test
build_data_blob = {} if options.build_data else None
@ -259,7 +260,7 @@ if __name__ == '__main__':
if options.extra is not None: test.extra_files = options.extra
if not test.is_supported(mcu, toolchain):
print 'The selected test is not supported on target %s with toolchain %s' % (mcu, toolchain)
print('The selected test is not supported on target %s with toolchain %s' % (mcu, toolchain))
sys.exit()
# Linking with extra libraries
@ -294,7 +295,7 @@ if __name__ == '__main__':
options,
toolchain),
stats_depth=options.stats_depth)
print 'Image: %s'% bin_file
print('Image: %s'% bin_file)
if options.disk:
# Simple copy to the mbed disk
@ -328,16 +329,16 @@ if __name__ == '__main__':
sys.stdout.write(c)
sys.stdout.flush()
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except KeyboardInterrupt as e:
print("\n[CTRL+c] exit")
except NotSupportedException as e:
print "\nCould not compile for %s: %s" % (mcu, str(e))
except Exception,e:
print("\nCould not compile for %s: %s" % (mcu, str(e)))
except Exception as e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
print("[ERROR] %s" % str(e))
sys.exit(1)
if options.build_data:

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python
"""Memory Map File Analyser for ARM mbed"""
from __future__ import print_function, division, absolute_import
from abc import abstractmethod, ABCMeta
from sys import stdout, exit, argv
@ -15,8 +16,8 @@ from copy import deepcopy
from prettytable import PrettyTable
from tools.arm_pack_manager import Cache
from utils import argparse_filestring_type, \
argparse_lowercase_hyphen_type, argparse_uppercase_type
from .utils import (argparse_filestring_type, argparse_lowercase_hyphen_type,
argparse_uppercase_type)
class _Parser(object):
@ -132,7 +133,7 @@ class _GccParser(_Parser):
return join('[lib]', test_re_obj_name.group(2),
test_re_obj_name.group(3))
else:
print "Unknown object name found in GCC map file: %s" % line
print("Unknown object name found in GCC map file: %s" % line)
return '[misc]'
def parse_section(self, line):
@ -217,7 +218,7 @@ class _ArmccParser(_Parser):
if is_obj:
return join('[lib]', basename(is_obj.group(1)), is_obj.group(3))
else:
print "Malformed input found when parsing ARMCC map: %s" % line
print("Malformed input found when parsing ARMCC map: %s" % line)
return '[misc]'
def parse_section(self, line):
@ -246,8 +247,8 @@ class _ArmccParser(_Parser):
elif test_re.group(3) == 'Code':
section = '.text'
else:
print "Malformed input found when parsing armcc map: %s, %r" %\
(line, test_re.groups())
print("Malformed input found when parsing armcc map: %s, %r"
% (line, test_re.groups()))
return ["", 0, ""]
@ -352,7 +353,7 @@ class _IarParser(_Parser):
elif test_re.group(2) == 'inited':
section = '.data'
else:
print "Malformed input found when parsing IAR map: %s" % line
print("Malformed input found when parsing IAR map: %s" % line)
return ["", 0, ""]
# lookup object in dictionary and return module name
@ -407,7 +408,7 @@ class _IarParser(_Parser):
if (not arg.startswith("-")) and arg.endswith(".o"):
self.cmd_modules[basename(arg)] = arg
common_prefix = dirname(commonprefix(self.cmd_modules.values()))
common_prefix = dirname(commonprefix(list(self.cmd_modules.values())))
self.cmd_modules = {s: relpath(f, common_prefix)
for s, f in self.cmd_modules.items()}
@ -524,11 +525,11 @@ class MemapParser(object):
self.compute_report()
try:
if file_output:
file_desc = open(file_output, 'wb')
file_desc = open(file_output, 'w')
else:
file_desc = stdout
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
print("I/O error({0}): {1}".format(error.errno, error.strerror))
return False
to_call = {'json': self.generate_json,
@ -558,24 +559,24 @@ class MemapParser(object):
Positional arguments:
file_desc - the file to write out the final report to
"""
csv_writer = csv.writer(file_desc, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
writer = csv.writer(file_desc, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csv_module_section = []
csv_sizes = []
module_section = []
sizes = []
for i in sorted(self.short_modules):
for k in self.print_sections:
csv_module_section += [i+k]
csv_sizes += [self.short_modules[i][k]]
module_section.append((i + k))
sizes += [self.short_modules[i][k]]
csv_module_section += ['static_ram']
csv_sizes += [self.mem_summary['static_ram']]
module_section.append('static_ram')
sizes.append(self.mem_summary['static_ram'])
csv_module_section += ['total_flash']
csv_sizes += [self.mem_summary['total_flash']]
module_section.append('total_flash')
sizes.append(self.mem_summary['total_flash'])
csv_writer.writerow(csv_module_section)
csv_writer.writerow(csv_sizes)
writer.writerow(module_section)
writer.writerow(sizes)
return None
def generate_table(self, file_desc):
@ -736,7 +737,7 @@ class MemapParser(object):
return True
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
print("I/O error({0}): {1}".format(error.errno, error.strerror))
return False
def main():
@ -803,7 +804,7 @@ def main():
returned_string = memap.generate_output(args.export, depth)
if args.export == 'table' and returned_string:
print returned_string
print(returned_string)
exit(0)

View File

@ -43,13 +43,13 @@ class MyJSONEncoder(json.JSONEncoder):
break
output = []
if primitives_only and len(o) < 3:
for key, value in o.iteritems():
for key, value in o.items():
output.append(json.dumps(key) + ": " + self.encode(value))
return "{" + ", ".join(output) + "}"
else:
self.current_indent += self.indent
self.current_indent_str = " " * self.current_indent
for key, value in o.iteritems():
for key, value in o.items():
output.append(self.current_indent_str + json.dumps(key) + ": " + self.encode(value))
self.current_indent -= self.indent
self.current_indent_str = " " * self.current_indent
@ -141,7 +141,7 @@ def add_to_targets(targets, device_file, verbose=False, remove=False) :
print("[WARNING] device {} did not have an associated device.h".format(device))
else :
possible_matches = set([key for key in targets.keys() if stem_match(device, key)])
for key, value in targets.iteritems() :
for key, value in targets.items() :
for alt in value['extra_labels'] if 'extra_labels' in value else [] :
if stem_match(device, alt) : possible_matches.add(key)
for alt in value['extra_labels_add'] if 'extra_labels_add' in value else [] :

View File

@ -14,16 +14,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
from json import load
from os.path import join, dirname
from os import listdir
from argparse import ArgumentParser, ArgumentTypeError
from tools.toolchains import TOOLCHAINS
from tools.targets import TARGET_NAMES, Target, update_target_data
from tools.utils import argparse_force_uppercase_type, \
argparse_lowercase_hyphen_type, argparse_many, \
argparse_filestring_type, args_error, argparse_profile_filestring_type,\
argparse_deprecate
from .toolchains import TOOLCHAINS
from .targets import TARGET_NAMES, Target, update_target_data
from .utils import (argparse_force_uppercase_type, argparse_deprecate,
argparse_lowercase_hyphen_type, argparse_many,
argparse_filestring_type, args_error,
argparse_profile_filestring_type)
FLAGS_DEPRECATION_MESSAGE = "Please use the --profile argument instead.\n"\
"Documentation may be found in "\

View File

@ -14,7 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from os import getenv
from os.path import join, abspath, dirname, exists
import logging
@ -81,7 +81,8 @@ for _n in _ENV_PATHS:
if exists(getenv('MBED_'+_n)):
globals()[_n] = getenv('MBED_'+_n)
else:
print "WARNING: MBED_%s set as environment variable but doesn't exist" % _n
print("WARNING: MBED_%s set as environment variable but doesn't"
" exist" % _n)
##############################################################################

View File

@ -101,7 +101,7 @@ def compare(t1, t2, target):
benchmarks_data[name][t] = map(int, (code, data, bss, flash))
print "%s vs %s for %s" % (t1, t2, target)
for name, data in benchmarks_data.iteritems():
for name, data in benchmarks_data.items():
try:
# Check Size
code_a, data_a, bss_a, flash_a = data[t1]

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import binascii
@ -65,7 +66,7 @@ def cached(func):
"""
def wrapper(*args, **kwargs):
"""The wrapped function itself"""
if not CACHES.has_key((func.__name__, args)):
if (func.__name__, args) not in CACHES:
CACHES[(func.__name__, args)] = func(*args, **kwargs)
return CACHES[(func.__name__, args)]
return wrapper
@ -141,9 +142,10 @@ class Target(namedtuple("Target", "name json_data resolution_order resolution_or
Target.__targets_json_location_default)
for extra_target in Target.__extra_target_json_files:
for k, v in json_file_to_dict(extra_target).iteritems():
for k, v in json_file_to_dict(extra_target).items():
if k in targets:
print 'WARNING: Custom target "%s" cannot replace existing target.' % k
print('WARNING: Custom target "%s" cannot replace existing '
'target.' % k)
else:
targets[k] = v
@ -212,16 +214,16 @@ class Target(namedtuple("Target", "name json_data resolution_order resolution_or
# inheritance level, left to right order to figure out all the
# other classes that change the definition by adding or removing
# elements
for idx in xrange(self.resolution_order[def_idx][1] - 1, -1, -1):
for idx in range(self.resolution_order[def_idx][1] - 1, -1, -1):
same_level_targets = [tar[0] for tar in self.resolution_order
if tar[1] == idx]
for tar in same_level_targets:
data = tdata[tar]
# Do we have anything to add ?
if data.has_key(attrname + "_add"):
if (attrname + "_add") in data:
starting_value.extend(data[attrname + "_add"])
# Do we have anything to remove ?
if data.has_key(attrname + "_remove"):
if (attrname + "_remove") in data:
# Macros can be defined either without a value (MACRO)
# or with a value (MACRO=10). When removing, we specify
# only the name of the macro, without the value. So we
@ -258,19 +260,14 @@ class Target(namedtuple("Target", "name json_data resolution_order resolution_or
starting_value = None
for tgt in self.resolution_order:
data = tdata[tgt[0]]
if data.has_key(attrname):
starting_value = data[attrname]
break
try:
return data[attrname]
except KeyError:
pass
else: # Attribute not found
raise AttributeError(
"Attribute '%s' not found in target '%s'"
% (attrname, self.name))
# 'progen' needs the full path to the template (the path in JSON is
# relative to tools/export)
if attrname == "progen":
return self.__add_paths_to_progen(starting_value)
else:
return starting_value
def __getattr__(self, attrname):
""" Return the value of an attribute. This function only computes the
@ -338,7 +335,7 @@ class Target(namedtuple("Target", "name json_data resolution_order resolution_or
# "class_name" must refer to a class in this file, so check if the
# class exists
mdata = self.get_module_data()
if not mdata.has_key(class_name) or \
if class_name not in mdata or \
not inspect.isclass(mdata[class_name]):
raise HookError(
("Class '%s' required by '%s' in target '%s'"
@ -427,7 +424,7 @@ class MTSCode(object):
loader = os.path.join(TOOLS_BOOTLOADERS, target_name, "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
print("Can't find bootloader binary: " + loader)
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')

View File

@ -18,6 +18,7 @@ limitations under the License.
TEST BUILD & RUN
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import json
@ -36,8 +37,8 @@ from tools.build_api import merge_build_data
from tools.targets import TARGET_MAP
from tools.utils import mkdir, ToolException, NotSupportedException, args_error
from tools.test_exporters import ReportExporter, ResultExporterType
from utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
from utils import argparse_dir_not_parent
from tools.utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
from tools.utils import argparse_dir_not_parent
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES
from tools.settings import CLI_COLOR_MAP
@ -164,7 +165,8 @@ if __name__ == '__main__':
if fnmatch.fnmatch(testname, name):
tests[testname] = test
else:
print "[Warning] Test with name '%s' was not found in the available tests" % (name)
print("[Warning] Test with name '%s' was not found in the "
"available tests" % (name))
else:
tests = all_tests
@ -211,18 +213,18 @@ if __name__ == '__main__':
build_profile=profile)
library_build_success = True
except ToolException, e:
except ToolException as e:
# ToolException output is handled by the build log
pass
except NotSupportedException, e:
except NotSupportedException as e:
# NotSupportedException is handled by the build log
pass
except Exception, e:
except Exception as e:
# Some other exception occurred, print the error message
print e
print(e)
if not library_build_success:
print "Failed to build library"
print("Failed to build library")
else:
# Build all the tests
@ -252,9 +254,9 @@ if __name__ == '__main__':
try:
with open(options.test_spec, 'w') as f:
f.write(json.dumps(test_spec_data, indent=2))
except IOError, e:
print "[ERROR] Error writing test spec to file"
print e
except IOError as e:
print("[ERROR] Error writing test spec to file")
print(e)
# If a path to a JUnit build report spec is provided, write it to a file
if options.build_report_junit:
@ -264,7 +266,7 @@ if __name__ == '__main__':
# Print memory map summary on screen
if build_report:
print
print print_build_memory_usage(build_report)
print(print_build_memory_usage(build_report))
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
@ -276,13 +278,13 @@ if __name__ == '__main__':
else:
sys.exit(1)
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except ConfigException, e:
except KeyboardInterrupt as e:
print("\n[CTRL+c] exit")
except ConfigException as e:
# Catching ConfigException here to prevent a traceback
print "[ERROR] %s" % str(e)
except Exception,e:
print("[ERROR] %s" % str(e))
except Exception as e:
import traceback
traceback.print_exc(file=sys.stdout)
print "[ERROR] %s" % str(e)
print("[ERROR] %s" % str(e))
sys.exit(1)

View File

@ -59,7 +59,7 @@ class BuildApiTests(unittest.TestCase):
@patch('os.mkdir')
@patch('tools.toolchains.exists', return_value=True)
@patch('tools.toolchains.mbedToolchain.dump_build_profile')
@patch('tools.utils.run_cmd', return_value=("", "", 0))
@patch('tools.utils.run_cmd', return_value=(b'', b'', 0))
def test_always_complete_build(self, *_):
with MagicMock() as notify:
toolchain = prepare_toolchain(self.src_paths, self.build_path, self.target,

View File

@ -40,7 +40,7 @@ def compare_config(cfg, expected):
except KeyError:
return "Unexpected key '%s' in configuration data" % k
for k in expected:
if k not in ["expected_macros", "expected_features"] + cfg.keys():
if k not in ["expected_macros", "expected_features"] + list(cfg.keys()):
return "Expected key '%s' was not found in configuration data" % k
return ""
@ -85,7 +85,7 @@ def test_config(name):
if expected_features is not None:
assert sorted(expected_features) == sorted(features)
except ConfigException as e:
err_msg = e.message
err_msg = str(e)
if "exception_msg" not in expected:
assert not(err_msg), "Unexpected Error: %s" % e
else:

View File

@ -18,8 +18,9 @@ sys.path.insert(0, ROOT)
from tools.build_api import get_mbed_official_release
from tools.targets import TARGET_MAP
from tools.export import EXPORTERS
from tools.toolchains import TOOLCHAINS
SUPPORTED_TOOLCHAINS = ["ARM", "IAR", "GCC_ARM", "ARMC6"]
SUPPORTED_TOOLCHAINS = list(TOOLCHAINS - set(u'uARM'))
SUPPORTED_IDES = [exp for exp in EXPORTERS.keys() if exp != "cmsis" and exp != "zip"]
@ -36,7 +37,7 @@ def print_list(lst):
def print_category(results, index, message):
summary = [example for key, summ in results.iteritems()
summary = [example for key, summ in results.items()
for example in summ[index]]
if all(len(s) == 0 for s in summary):
return
@ -221,7 +222,7 @@ def get_num_failures(results, export=False):
"""
num_failures = 0
for key, val in results.iteritems():
for key, val in results.items():
num_failures = num_failures + len(val[3])
if export:
num_failures += len(val[4])

View File

@ -16,6 +16,7 @@ limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
from __future__ import print_function
import os
import re
@ -29,13 +30,15 @@ import datetime
import threading
import ctypes
import functools
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from copy import copy
from time import sleep, time
from Queue import Queue, Empty
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from os.path import join, exists, basename, relpath
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
@ -50,7 +53,7 @@ from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP
from tools.targets import TARGET_MAP, Target
import tools.test_configs as TestConfig
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
@ -100,7 +103,7 @@ class ProcessObserver(Thread):
self.active = False
try:
self.proc.terminate()
except Exception, _:
except Exception:
pass
@ -120,12 +123,14 @@ class SingleTestExecutor(threading.Thread):
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
print(self.single_test.generate_test_summary(test_summary,
shuffle_seed))
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed))
print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object):
@ -360,31 +365,40 @@ class SingleTestRunner(object):
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Target platform not found' %
(target)))
continue
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk or
self.opts_clean or clean)
profile = extract_profile(self.opts_parser, self.opts, toolchain)
stats_depth = self.opts.stats_depth or 2
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
build_mbed_libs_result = build_mbed_libs(
T, toolchain,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not '
'supported for this target'% (T.name, toolchain)))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building MBED libs for %s using %s'
% (target, toolchain)))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
@ -402,16 +416,22 @@ class SingleTestRunner(object):
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
@ -449,7 +469,9 @@ class SingleTestRunner(object):
build_profile=profile)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building library %s' % lib_id))
continue
@ -491,23 +513,29 @@ class SingleTestRunner(object):
project_description=test.get_description(),
build_profile=profile, stats_depth=stats_depth)
except Exception, e:
except Exception as e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building project %s' %
project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print self.logger.log_line(self.logger.LogType.INFO, 'The project %s is not supported'% (project_name_str))
print(elf.logger.log_line(
self.logger.LogType.INFO,
'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
(test_result, target, toolchain, test_id,
test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
@ -603,7 +631,7 @@ class SingleTestRunner(object):
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
@ -614,7 +642,7 @@ class SingleTestRunner(object):
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
@ -642,23 +670,33 @@ class SingleTestRunner(object):
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names]):
if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral test skipped for target %s' % target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Non automated test skipped for target %s' % target))
continue
if test.is_supported(target, toolchain):
@ -673,9 +711,15 @@ class SingleTestRunner(object):
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral %s test skipped for target %s' %
(",".join(test.peripherals), target)))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
print(self.logger.log_line(
self.logger.LogType.INFO,
'Test %s skipped for target %s' %
(test_id, target)))
continue
# The test has made it through all the filters, so add it to the valid tests list
@ -773,7 +817,7 @@ class SingleTestRunner(object):
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
@ -812,7 +856,7 @@ class SingleTestRunner(object):
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
except Exception as e:
resutl_msg = e
result = False
return result, resutl_msg
@ -828,7 +872,7 @@ class SingleTestRunner(object):
duration = data.get("duration", 10)
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
print("Error: No Mbed available: MUT[%s]" % data['mcu'])
return None
mcu = mut['mcu']
@ -864,7 +908,7 @@ class SingleTestRunner(object):
break
if not found:
print "Error: mbed not found with MBEDLS: %s" % data['mcu']
print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
return None
else:
mut = muts_list[1]
@ -895,7 +939,7 @@ class SingleTestRunner(object):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
print(single_test_output)
else:
# Host test execution
start_host_exec_time = time()
@ -930,8 +974,9 @@ class SingleTestRunner(object):
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
print(self.print_test_result(
single_test_result, target_name_unique, toolchain_name, test_id,
test_description, elapsed_time, single_timeout))
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
@ -972,7 +1017,7 @@ class SingleTestRunner(object):
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
@ -1027,7 +1072,7 @@ class SingleTestRunner(object):
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
except Empty:
c = None
return c
@ -1060,7 +1105,6 @@ class SingleTestRunner(object):
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
@ -1083,8 +1127,8 @@ class SingleTestRunner(object):
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
print("Test::Output::Start")
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
@ -1138,7 +1182,7 @@ class SingleTestRunner(object):
output.append(c)
if verbose:
print "Test::Output::Finish"
print("Test::Output::Finish")
# Stop test process
obs.stop()
@ -1150,7 +1194,7 @@ class SingleTestRunner(object):
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
@ -1205,9 +1249,10 @@ def show_json_file_format_error(json_spec_filename, line, column):
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
print('Line %d:\t'%line_no + json_line)
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1)))
break
line_no += 1
@ -1244,18 +1289,19 @@ def get_json_data_from_file(json_spec_filename, verbose=False):
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
print('JSON file %s parsing failed. Reason: %s' %
(json_spec_filename, json_error_msg))
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
print()
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg))
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
@ -1290,7 +1336,7 @@ def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filt
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
if isinstance(cell_val, list):
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
@ -1423,7 +1469,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
if isinstance(test[col], list):
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
@ -1502,13 +1548,14 @@ def singletest_in_cli_mode(single_test):
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
print(single_test.generate_test_summary(test_summary, shuffle_seed))
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print(single_test.generate_test_summary_by_target(test_summary,
shuffle_seed))
print "Completed in %.2f sec"% (elapsed_time)
print("Completed in %.2f sec" % elapsed_time)
print
# Write summary of the builds
@ -1628,19 +1675,19 @@ def detect_database_verbose(db_url):
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
print("Connecting to database '%s'..." % db_url)
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
print("ok")
print("Detecting database...")
print(db_.detect_database(verbose=True))
print("Disconnecting...")
db_.disconnect()
print "done"
print("done")
else:
print "Database type '%s' unknown"% db_type
print("Database type '%s' unknown" % db_type)
else:
print "Parse error: '%s' - DB Url error"% (db_url)
print("Parse error: '%s' - DB Url error" % db_url)
def get_module_avail(module_name):
@ -2089,13 +2136,14 @@ def print_tests(tests, format="list", sort=True):
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name][0]
print "Test Case:"
print " Name: %s" % test_name
print " Path: %s" % test_path
print("Test Case:")
print(" Name: %s" % test_name)
print(" Path: %s" % test_path)
elif format == "json":
print json.dumps({test_name: test_path[0] for test_name, test_paths in tests}, indent=2)
print(json.dumps({test_name: test_path[0] for test_name, test_paths
in tests}, indent=2))
else:
print "Unknown format '%s'" % format
print("Unknown format '%s'" % format)
sys.exit(1)
def norm_relative_path(path, start):
@ -2130,7 +2178,7 @@ def build_test_worker(*args, **kwargs):
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].iteritems():
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
@ -2141,11 +2189,11 @@ def build_test_worker(*args, **kwargs):
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException, e:
except NotSupportedException as e:
ret['reason'] = e
except ToolException, e:
except ToolException as e:
ret['reason'] = e
except KeyboardInterrupt, e:
except KeyboardInterrupt as e:
ret['reason'] = e
except:
# Print unhandled exceptions here
@ -2169,7 +2217,7 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
target_name = target if isinstance(target, str) else target.name
target_name = target.name if isinstance(target, Target) else target
cfg, _, _ = get_config(base_source_paths, target_name, toolchain_name)
baud_rate = 9600
@ -2190,8 +2238,8 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.iteritems():
if type(test_paths) != ListType:
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])
@ -2265,8 +2313,8 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
test_key = worker_result['kwargs']['project_id'].upper()
if report:
print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
print 'Image: %s\n' % bin_file
print(report[target_name][toolchain_name][test_key][0][0]['output'].rstrip())
print('Image: %s\n' % bin_file)
except:
if p._taskqueue.queue:

View File

@ -304,11 +304,13 @@ class ReportExporter():
def exporter_print_helper(self, array, print_log=False):
for item in array:
print " * %s::%s::%s" % (item["target_name"], item["toolchain_name"], item["id"])
print(" * %s::%s::%s" % (item["target_name"],
item["toolchain_name"],
item["id"]))
if print_log:
log_lines = item["output"].split("\n")
for log_line in log_lines:
print " %s" % log_line
print(" %s" % log_line)
def exporter_print(self, test_result_ext, print_log_for_failures=False):
""" Export test results in print format.
@ -343,15 +345,15 @@ class ReportExporter():
raise Exception("'test_run' did not have a 'result' value")
if successes:
print "\n\nBuild successes:"
print("\n\nBuild successes:")
self.exporter_print_helper(successes)
if skips:
print "\n\nBuild skips:"
print("\n\nBuild skips:")
self.exporter_print_helper(skips)
if failures:
print "\n\nBuild failures:"
print("\n\nBuild failures:")
self.exporter_print_helper(failures, print_log=print_log_for_failures)
return False
else:
@ -410,5 +412,5 @@ class ReportExporter():
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
return result

View File

@ -15,14 +15,9 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.paths import *
from tools.data.support import *
from tools.data.support import DEFAULT_SUPPORT, CORTEX_ARM_SUPPORT
from argparse import ArgumentTypeError
from utils import columnate
try:
import tools.private_settings as ps
except:
ps = object()
from tools.utils import columnate
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
@ -916,9 +911,7 @@ def test_known(string):
raise ArgumentTypeError("{0} does not index a test. The accepted range is 0 to {1}\nThe test mapping is:\n{2}".format(i, len(TEST_MAP) - 1, columnate([str(i) + ":" + t['id'] for i,t in zip(range(len(TESTS)), TESTS)])))
def test_name_known(string):
if string not in TEST_MAP.keys() and \
(getattr(ps, "test_alias", None) is None or \
ps.test_alias.get(string, "") not in TEST_MAP.keys()):
if string not in TEST_MAP.keys():
raise ArgumentTypeError("Program with name '{0}' not found. Supported tests are: \n{1}".format(string, columnate([t['id'] for t in TESTS])))
return TEST_MAP[string].n

View File

@ -14,29 +14,31 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import re
import sys
from os import stat, walk, getcwd, sep, remove
from copy import copy
from time import time, sleep
from types import ListType
from shutil import copyfile
from os.path import join, splitext, exists, relpath, dirname, basename, split, abspath, isfile, isdir, normcase
from os.path import (join, splitext, exists, relpath, dirname, basename, split,
abspath, isfile, isdir, normcase)
from itertools import chain
from inspect import getmro
from copy import deepcopy
from abc import ABCMeta, abstractmethod
from distutils.spawn import find_executable
from multiprocessing import Pool, cpu_count
from tools.utils import run_cmd, mkdir, rel_path, ToolException, NotSupportedException, split_path, compile_worker
from tools.settings import MBED_ORG_USER
import tools.hooks as hooks
from tools.memap import MemapParser
from hashlib import md5
import fnmatch
from ..utils import (run_cmd, mkdir, rel_path, ToolException,
NotSupportedException, split_path, compile_worker)
from ..settings import MBED_ORG_USER
from .. import hooks
from ..memap import MemapParser
#Disables multiprocessing if set to higher number than the host machine CPUs
CPU_COUNT_MIN = 1
@ -80,7 +82,7 @@ class LazyDict(dict):
def __str__(self):
return "Lazy{%s}" % (
", ".join("%r: %r" % (k, v) for k, v in
chain(self.eager.iteritems(), ((k, "not evaluated")
chain(self.eager.items(), ((k, "not evaluated")
for k in self.lazy))))
def update(self, other):
@ -90,10 +92,10 @@ class LazyDict(dict):
else:
self.eager.update(other)
def iteritems(self):
def items(self):
"""Warning: This forces the evaluation all of the items in this LazyDict
that are iterated over."""
for k, v in self.eager.iteritems():
for k, v in self.eager.items():
yield k, v
for k in self.lazy.keys():
yield k, self[k]
@ -103,11 +105,11 @@ class LazyDict(dict):
Does no computation now. Instead the comuptation is performed when a
consumer attempts to access a value in this LazyDict"""
new_lazy = {}
for k, f in self.lazy.iteritems():
for k, f in self.lazy.items():
def closure(f=f):
return fn(f())
new_lazy[k] = closure
for k, v in self.eager.iteritems():
for k, v in self.eager.items():
def closure(v=v):
return fn(v)
new_lazy[k] = closure
@ -221,13 +223,13 @@ class Resources:
"""
count = 0
dupe_dict, dupe_headers = self._collect_duplicates(dict(), dict())
for objname, filenames in dupe_dict.iteritems():
for objname, filenames in dupe_dict.items():
if len(filenames) > 1:
count+=1
toolchain.tool_error(
"Object file %s.o is not unique! It could be made from: %s"\
% (objname, " ".join(filenames)))
for headername, locations in dupe_headers.iteritems():
for headername, locations in dupe_headers.items():
if len(locations) > 1:
count+=1
toolchain.tool_error(
@ -472,7 +474,7 @@ class mbedToolchain:
if msg:
if not silent:
print msg
print(msg)
self.output += msg + "\n"
def print_notify_verbose(self, event, silent=False):
@ -489,7 +491,7 @@ class mbedToolchain:
event['toolchain_name'] = event['toolchain_name'].upper() if event['toolchain_name'] else "Unknown"
msg = '[%(severity)s] %(target_name)s::%(toolchain_name)s::%(file)s@%(line)s: %(message)s' % event
if not silent:
print msg
print(msg)
self.output += msg + "\n"
elif event['type'] == 'progress':
@ -590,7 +592,7 @@ class mbedToolchain:
if not d or not exists(d):
return True
if not self.stat_cache.has_key(d):
if d not in self.stat_cache:
self.stat_cache[d] = stat(d).st_mtime
if self.stat_cache[d] >= target_mod_time:
@ -793,14 +795,15 @@ class mbedToolchain:
def copy_files(self, files_paths, trg_path, resources=None, rel_path=None):
# Handle a single file
if type(files_paths) != ListType: files_paths = [files_paths]
if not isinstance(files_paths, list):
files_paths = [files_paths]
for source in files_paths:
if source is None:
files_paths.remove(source)
for source in files_paths:
if resources is not None and resources.file_basepath.has_key(source):
if resources is not None and source in resources.file_basepath:
relative_path = relpath(source, resources.file_basepath[source])
elif rel_path is not None:
relative_path = relpath(source, rel_path)
@ -830,7 +833,7 @@ class mbedToolchain:
def get_inc_file(self, includes):
include_file = join(self.build_dir, ".includes_%s.txt" % self.inc_md5)
if not exists(include_file):
with open(include_file, "wb") as f:
with open(include_file, "w") as f:
cmd_list = []
for c in includes:
if c:
@ -846,7 +849,7 @@ class mbedToolchain:
# ARM, GCC, IAR cross compatible
def get_link_file(self, cmd):
link_file = join(self.build_dir, ".link_files.txt")
with open(link_file, "wb") as f:
with open(link_file, "w") as f:
cmd_list = []
for c in cmd:
if c:
@ -862,7 +865,7 @@ class mbedToolchain:
# ARM, GCC, IAR cross compatible
def get_arch_file(self, objects):
archive_file = join(self.build_dir, ".archive_files.txt")
with open(archive_file, "wb") as f:
with open(archive_file, "w") as f:
o_list = []
for o in objects:
o_list.append('"%s"' % o)
@ -891,7 +894,7 @@ class mbedToolchain:
# Sort include paths for consistency
inc_paths = sorted(set(inc_paths))
# Unique id of all include paths
self.inc_md5 = md5(' '.join(inc_paths)).hexdigest()
self.inc_md5 = md5(' '.join(inc_paths).encode('utf-8')).hexdigest()
objects = []
queue = []
@ -967,7 +970,7 @@ class mbedToolchain:
sleep(0.01)
pending = 0
for r in results:
if r._ready is True:
if r.ready():
try:
result = r.get()
results.remove(r)
@ -982,7 +985,7 @@ class mbedToolchain:
res['command']
])
objects.append(result['object'])
except ToolException, err:
except ToolException as err:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
@ -1010,7 +1013,7 @@ class mbedToolchain:
dep_path = base + '.d'
try:
deps = self.parse_dependencies(dep_path) if (exists(dep_path)) else []
except IOError, IndexError:
except (IOError, IndexError):
deps = []
config_file = ([self.config.app_config_location]
if self.config.app_config_location else [])
@ -1054,7 +1057,7 @@ class mbedToolchain:
buff[0] = re.sub('^(.*?)\: ', '', buff[0])
for line in buff:
filename = line.replace('\\\n', '').strip()
if file:
if filename:
filename = filename.replace('\\ ', '\a')
dependencies.extend(((self.CHROOT if self.CHROOT else '') +
f.replace('\a', ' '))
@ -1177,7 +1180,7 @@ class mbedToolchain:
# ANY CHANGE OF PARAMETERS OR RETURN VALUES WILL BREAK COMPATIBILITY
def debug(self, message):
if self.VERBOSE:
if type(message) is ListType:
if isinstance(message, list):
message = ' '.join(message)
message = "[DEBUG] " + message
self.notify({'type': 'debug', 'message': message})
@ -1274,7 +1277,7 @@ class mbedToolchain:
self.config_file = join(self.build_dir, self.MBED_CONFIG_FILE_NAME)
# If the file exists, read its current content in prev_data
if exists(self.config_file):
with open(self.config_file, "rt") as f:
with open(self.config_file, "r") as f:
prev_data = f.read()
else:
prev_data = None
@ -1288,12 +1291,12 @@ class mbedToolchain:
self.config_file = None # this means "config file not present"
changed = True
elif crt_data != prev_data: # different content of config file
with open(self.config_file, "wt") as f:
with open(self.config_file, "w") as f:
f.write(crt_data)
changed = True
else: # a previous mbed_config.h does not exist
if crt_data is not None: # there's configuration data available
with open(self.config_file, "wt") as f:
with open(self.config_file, "w") as f:
f.write(crt_data)
changed = True
else:
@ -1318,7 +1321,7 @@ class mbedToolchain:
@staticmethod
def _overwrite_when_not_equal(filename, content):
if not exists(filename) or content != open(filename).read():
with open(filename, "wb") as out:
with open(filename, "w") as out:
out.write(content)
@staticmethod
@ -1605,11 +1608,11 @@ from tools.toolchains.gcc import GCC_ARM
from tools.toolchains.iar import IAR
TOOLCHAIN_CLASSES = {
'ARM': ARM_STD,
'uARM': ARM_MICRO,
'ARMC6': ARMC6,
'GCC_ARM': GCC_ARM,
'IAR': IAR
u'ARM': ARM_STD,
u'uARM': ARM_MICRO,
u'ARMC6': ARMC6,
u'GCC_ARM': GCC_ARM,
u'IAR': IAR
}
TOOLCHAINS = set(TOOLCHAIN_CLASSES.keys())

View File

@ -181,10 +181,10 @@ def format_project_run_data(project_run_data, limit):
ts_data = prep_ts_data()
ts_data['projectRuns'] = []
for hostOs_name, hostOs in project_run_data['projectRuns'].iteritems():
for platform_name, platform in hostOs.iteritems():
for toolchain_name, toolchain in platform.iteritems():
for project_name, project in toolchain.iteritems():
for hostOs_name, hostOs in project_run_data['projectRuns'].items():
for platform_name, platform in hostOs.items():
for toolchain_name, toolchain in platform.items():
for project_name, project in toolchain.items():
if current_limit_count >= limit:
finish_ts_data(ts_data, project_run_data)
all_ts_data.append(ts_data)

View File

@ -14,6 +14,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import sys
import inspect
import os
@ -30,6 +31,11 @@ from collections import OrderedDict
import logging
from intelhex import IntelHex
try:
unicode
except NameError:
unicode = str
def remove_if_in(lst, thing):
if thing in lst:
lst.remove(thing)
@ -66,14 +72,14 @@ def cmd(command, check=True, verbose=False, shell=False, cwd=None):
"""A wrapper to run a command as a blocking job"""
text = command if shell else ' '.join(command)
if verbose:
print text
print(text)
return_code = call(command, shell=shell, cwd=cwd)
if check and return_code != 0:
raise Exception('ERROR %d: "%s"' % (return_code, text))
def run_cmd(command, work_dir=None, chroot=None, redirect=False):
"""Run a command in the forground
"""Run a command in the foreground
Positional arguments:
command - the command to run
@ -100,7 +106,7 @@ def run_cmd(command, work_dir=None, chroot=None, redirect=False):
stderr=STDOUT if redirect else PIPE, cwd=work_dir)
_stdout, _stderr = process.communicate()
except OSError:
print "[OS ERROR] Command: "+(' '.join(command))
print("[OS ERROR] Command: "+(' '.join(command)))
raise
return _stdout, _stderr, process.returncode
@ -318,13 +324,13 @@ def check_required_modules(required_modules, verbose=True):
except ImportError as exc:
not_installed_modules.append(module_name)
if verbose:
print "Error: %s" % exc
print("Error: %s" % exc)
if verbose:
if not_installed_modules:
print ("Warning: Module(s) %s not installed. Please install " + \
"required module(s) before using this script.")\
% (', '.join(not_installed_modules))
print("Warning: Module(s) %s not installed. Please install "
"required module(s) before using this script."
% (', '.join(not_installed_modules)))
if not_installed_modules:
return False
@ -342,11 +348,11 @@ def dict_to_ascii(dictionary):
"""
if isinstance(dictionary, dict):
return OrderedDict([(dict_to_ascii(key), dict_to_ascii(value))
for key, value in dictionary.iteritems()])
for key, value in dictionary.items()])
elif isinstance(dictionary, list):
return [dict_to_ascii(element) for element in dictionary]
elif isinstance(dictionary, unicode):
return dictionary.encode('ascii')
return dictionary.encode('ascii').decode()
else:
return dictionary
@ -375,6 +381,8 @@ def argparse_type(casedness, prefer_hyphen=False):
the string, or the hyphens/underscores do not match the expected
style of the argument.
"""
if not isinstance(string, unicode):
string = string.decode()
if prefer_hyphen:
newstring = casedness(string).replace("_", "-")
else:
@ -393,10 +401,10 @@ def argparse_type(casedness, prefer_hyphen=False):
return middle
# short cuts for the argparse_type versions
argparse_uppercase_type = argparse_type(str.upper, False)
argparse_lowercase_type = argparse_type(str.lower, False)
argparse_uppercase_hyphen_type = argparse_type(str.upper, True)
argparse_lowercase_hyphen_type = argparse_type(str.lower, True)
argparse_uppercase_type = argparse_type(unicode.upper, False)
argparse_lowercase_type = argparse_type(unicode.lower, False)
argparse_uppercase_hyphen_type = argparse_type(unicode.upper, True)
argparse_lowercase_hyphen_type = argparse_type(unicode.lower, True)
def argparse_force_type(case):
""" validate that an argument passed in (as string) is a member of the list
@ -406,6 +414,8 @@ def argparse_force_type(case):
""" The parser type generator"""
def parse_type(string):
""" The parser type"""
if not isinstance(string, unicode):
string = string.decode()
for option in lst:
if case(string) == case(option):
return option
@ -416,8 +426,8 @@ def argparse_force_type(case):
return middle
# these two types convert the case of their arguments _before_ validation
argparse_force_uppercase_type = argparse_force_type(str.upper)
argparse_force_lowercase_type = argparse_force_type(str.lower)
argparse_force_uppercase_type = argparse_force_type(unicode.upper)
argparse_force_lowercase_type = argparse_force_type(unicode.lower)
def argparse_many(func):
""" An argument parser combinator that takes in an argument parser and