Merge pull request #2047 from PrzemekWirkus/devel_mmap_proj

[Tools] Add summary for test building
pull/2117/merge
Martin Kojtal 2016-07-07 09:35:19 +01:00 committed by GitHub
commit 9f33ba87b0
5 changed files with 186 additions and 109 deletions

View File

@ -176,7 +176,7 @@ def build_project(src_path, build_path, target, toolchain_name,
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
@ -232,6 +232,7 @@ def build_project(src_path, build_path, target, toolchain_name,
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
cur_result["memory_usage"] = toolchain.map_outputs
add_result_to_report(report, cur_result)
@ -294,7 +295,7 @@ def build_library(src_paths, build_path, target, toolchain_name,
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
@ -377,7 +378,7 @@ def build_library(src_paths, build_path, target, toolchain_name,
toolchain.copy_files(resources.libraries, build_path, resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path, resources=resources)
if resource.hex_files:
toolchain.copy_files(resources.hex_files, build_path, resources=resources)
@ -399,12 +400,12 @@ def build_library(src_paths, build_path, target, toolchain_name,
except Exception, e:
if report != None:
end = time()
if isinstance(e, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(e, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
@ -428,7 +429,7 @@ def build_lib(lib_id, target, toolchain_name, options=None, verbose=False, clean
if not lib.is_supported(target, toolchain_name):
print 'Library "%s" is not yet supported on target %s with toolchain %s' % (lib_id, target.name, toolchain)
return False
# We need to combine macros from parameter list with macros from library definition
MACROS = lib.macros if lib.macros else []
if macros:
@ -441,7 +442,7 @@ def build_lib(lib_id, target, toolchain_name, options=None, verbose=False, clean
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
""" src_path: the path of the source directory
build_path: the path of the build directory
target: ['LPC1768', 'LPC11U24', 'LPC2368']
@ -522,7 +523,7 @@ def build_lib(lib_id, target, toolchain_name, options=None, verbose=False, clean
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, resources=resource)
dependencies_include_dir.extend(toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
@ -716,7 +717,7 @@ def mcu_toolchain_matrix(verbose_html=False, platform_filter=None):
perm_counter += 1
else:
text = "-"
row.append(text)
pt.add_row(row)
@ -942,6 +943,49 @@ def print_build_results(result_list, build_name):
result += "\n"
return result
def print_build_memory_usage_results(report):
""" Generate result table with memory usage values for build results
Agregates (puts together) reports obtained from self.get_memory_summary()
@param report Report generated during build procedure. See
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'stack', 'heap', 'total_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are grabbing
# last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary']['static_ram'],
record['memory_usage'][-1]['summary']['stack'],
record['memory_usage'][-1]['summary']['heap'],
record['memory_usage'][-1]['summary']['total_ram'],
record['memory_usage'][-1]['summary']['total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
build_report_failing = []
build_report_passing = []
@ -963,14 +1007,14 @@ def write_build_report(build_report, template_filename, filename):
def scan_for_source_paths(path, exclude_paths=None):
ignorepatterns = []
paths = []
def is_ignored(file_path):
for pattern in ignorepatterns:
if fnmatch.fnmatch(file_path, pattern):
return True
return False
""" os.walk(top[, topdown=True[, onerror=None[, followlinks=False]]])
When topdown is True, the caller can modify the dirnames list in-place
(perhaps using del or slice assignment), and walk() will only recurse into

View File

@ -26,9 +26,9 @@ class MemapParser(object):
self.misc_flash_sections = ('.interrupts', '.flash_config')
self.other_sections = ('.interrupts_ram', '.init', '.ARM.extab', \
'.ARM.exidx', '.ARM.attributes', '.eh_frame', \
'.init_array', '.fini_array', '.jcr', '.stab', \
self.other_sections = ('.interrupts_ram', '.init', '.ARM.extab',
'.ARM.exidx', '.ARM.attributes', '.eh_frame',
'.init_array', '.fini_array', '.jcr', '.stab',
'.stabstr', '.ARM.exidx', '.ARM')
# sections to print info (generic for all toolchains)
@ -43,6 +43,9 @@ class MemapParser(object):
# list of all object files and mappting to module names
self.object_to_module = dict()
# Memory usage summary structure
self.mem_summary = dict()
def module_add(self, module_name, size, section):
"""
Adds a module / section to the list
@ -67,7 +70,7 @@ class MemapParser(object):
return i # should name of the section (assuming it's a known one)
if line.startswith('.'):
return 'unknown' # all others are clasified are unknown
return 'unknown' # all others are classified are unknown
else:
return False # everything else, means no change in section
@ -363,11 +366,12 @@ class MemapParser(object):
# Create table
columns = ['Module']
for i in list(self.print_sections):
columns.append(i)
columns.extend(self.print_sections)
table = PrettyTable(columns)
table.align["Module"] = "l"
for col in self.print_sections:
table.align[col] = 'r'
for i in list(self.print_sections):
table.align[i] = 'r'
@ -388,8 +392,12 @@ class MemapParser(object):
for k in self.print_sections:
row.append(self.modules[i][k])
json_obj.append({"module":i, "size":{\
k:self.modules[i][k] for k in self.print_sections}})
json_obj.append({
"module":i,
"size":{
k:self.modules[i][k] for k in self.print_sections
}
})
table.add_row(row)
@ -399,16 +407,19 @@ class MemapParser(object):
table.add_row(subtotal_row)
if export_format == 'json':
json_obj.append({\
'summary':{\
'total_static_ram':(subtotal['.data']+subtotal['.bss']),\
'allocated_heap':(subtotal['.heap']),\
'allocated_stack':(subtotal['.stack']),\
'total_ram':(subtotal['.data']+subtotal['.bss']+subtotal['.heap']+subtotal['.stack']),\
'total_flash':(subtotal['.text']+subtotal['.data']+misc_flash_mem),}})
summary = {
'summary':{
'static_ram':(subtotal['.data']+subtotal['.bss']),
'heap':(subtotal['.heap']),
'stack':(subtotal['.stack']),
'total_ram':(subtotal['.data']+subtotal['.bss']+subtotal['.heap']+subtotal['.stack']),
'total_flash':(subtotal['.text']+subtotal['.data']+misc_flash_mem),
}
}
file_desc.write(json.dumps(json_obj, indent=4))
if export_format == 'json':
json_to_file = json_obj + [summary]
file_desc.write(json.dumps(json_to_file, indent=4))
file_desc.write('\n')
elif export_format == 'csv-ci': # CSV format for the CI system
@ -467,33 +478,38 @@ class MemapParser(object):
if file_desc is not sys.stdout:
file_desc.close()
self.mem_summary = json_obj + [summary]
return True
def get_memory_summary(self):
"""! Object is available only after self.generate_output('json') is called
@return Return memory summary object
"""
return self.mem_summary
def parse(self, mapfile, toolchain):
"""
Parse and decode map file depending on the toolchain
"""
result = True
try:
file_input = open(mapfile, 'rt')
with open(mapfile, 'rt') as file_input:
if toolchain == "ARM" or toolchain == "ARM_STD" or toolchain == "ARM_MICRO":
self.search_objects(os.path.abspath(mapfile), "ARM")
self.parse_map_file_armcc(file_input)
elif toolchain == "GCC_ARM":
self.parse_map_file_gcc(file_input)
elif toolchain == "IAR":
self.search_objects(os.path.abspath(mapfile), toolchain)
self.parse_map_file_iar(file_input)
else:
result = False
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
return False
if toolchain == "ARM" or toolchain == "ARM_STD" or toolchain == "ARM_MICRO":
self.search_objects(os.path.abspath(mapfile), "ARM")
self.parse_map_file_armcc(file_input)
elif toolchain == "GCC_ARM":
self.parse_map_file_gcc(file_input)
elif toolchain == "IAR":
self.search_objects(os.path.abspath(mapfile), toolchain)
self.parse_map_file_iar(file_input)
else:
return False
file_input.close()
return True
result = False
return result
def main():

View File

@ -29,6 +29,7 @@ sys.path.insert(0, ROOT)
from tools.test_api import test_path_to_name, find_tests, print_tests, build_tests, test_spec_from_test_builds
from tools.options import get_default_options_parser
from tools.build_api import build_project, build_library
from tools.build_api import print_build_memory_usage_results
from tools.targets import TARGET_MAP
from tools.utils import mkdir, ToolException, NotSupportedException
from tools.test_exporters import ReportExporter, ResultExporterType
@ -37,12 +38,12 @@ if __name__ == '__main__':
try:
# Parse Options
parser = get_default_options_parser()
parser.add_option("-D", "",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_option("-j", "--jobs",
type="int",
dest="jobs",
@ -60,25 +61,25 @@ if __name__ == '__main__':
parser.add_option("-p", "--paths", dest="paths",
default=None, help="Limit the tests to those within the specified comma separated list of paths")
format_choices = ["list", "json"]
format_default_choice = "list"
format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
parser.add_option("-f", "--format", type="choice", dest="format",
choices=format_choices, default=format_default_choice, help=format_help)
parser.add_option("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
default=None, help="Continue trying to build all tests if a build failure occurs")
parser.add_option("-n", "--names", dest="names",
default=None, help="Limit the tests to a comma separated list of names")
parser.add_option("--test-spec", dest="test_spec",
default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
parser.add_option("--build-report-junit", dest="build_report_junit",
default=None, help="Destination path for a build report in the JUnit xml format")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
@ -87,24 +88,24 @@ if __name__ == '__main__':
(options, args) = parser.parse_args()
# Filter tests by path if specified
# Filter tests by path if specified
if options.paths:
all_paths = options.paths.split(",")
else:
all_paths = ["."]
all_tests = {}
tests = {}
# Find all tests in the relevant paths
for path in all_paths:
all_tests.update(find_tests(path))
# Filter tests by name if specified
if options.names:
all_names = options.names.split(",")
all_names = [x.lower() for x in all_names]
for name in all_names:
if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
for testname, test in all_tests.items():
@ -124,16 +125,16 @@ if __name__ == '__main__':
if not options.build_dir:
print "[ERROR] You must specify a build path"
sys.exit(1)
base_source_paths = options.source_dir
# Default base source path is the current directory
if not base_source_paths:
base_source_paths = ['.']
target = options.mcu
build_report = {}
build_properties = {}
@ -150,7 +151,7 @@ if __name__ == '__main__':
macros=options.macros,
verbose=options.verbose,
archive=False)
library_build_success = True
except ToolException, e:
# ToolException output is handled by the build log
@ -161,7 +162,7 @@ if __name__ == '__main__':
except Exception, e:
# Some other exception occurred, print the error message
print e
if not library_build_success:
print "Failed to build library"
else:
@ -175,32 +176,37 @@ if __name__ == '__main__':
verbose=options.verbose,
jobs=options.jobs,
continue_on_build_fail=options.continue_on_build_fail)
# If a path to a test spec is provided, write it to a file
if options.test_spec:
test_spec_data = test_spec_from_test_builds(test_build)
# Create the target dir for the test spec if necessary
# mkdir will not create the dir if it already exists
test_spec_dir = os.path.dirname(options.test_spec)
if test_spec_dir:
mkdir(test_spec_dir)
try:
with open(options.test_spec, 'w') as f:
f.write(json.dumps(test_spec_data, indent=2))
except IOError, e:
print "[ERROR] Error writing test spec to file"
print e
# If a path to a JUnit build report spec is provided, write it to a file
if options.build_report_junit:
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
# Print memory map summary on screen
if build_report:
print
print print_build_memory_usage_results(build_report)
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
if status:
sys.exit(0)
else:

View File

@ -46,6 +46,7 @@ from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
@ -1970,12 +1971,12 @@ def test_path_to_name(path):
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def find_tests(base_dir):
"""Given any directory, walk through the subdirectories and find all tests"""
def find_test_in_directory(directory, tests_path):
"""Given a 'TESTS' directory, return a dictionary of test names and test paths.
The formate of the dictionary is {"test-name": "./path/to/test"}"""
@ -1989,20 +1990,20 @@ def find_tests(base_dir):
"name": test_path_to_name(directory),
"path": directory
}
return test
tests_path = 'TESTS'
tests = {}
dirs = scan_for_source_paths(base_dir)
for directory in dirs:
test = find_test_in_directory(directory, tests_path)
if test:
tests[test['name']] = test['path']
return tests
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
@ -2033,12 +2034,11 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
continue_on_build_fail=False):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
target_name = target if isinstance(target, str) else target.name
@ -2051,9 +2051,10 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
"binary_type": "bootable",
"tests": {}
}
result = True
map_outputs_total = list()
for test_name, test_path in tests.iteritems():
test_build_path = os.path.join(build_path, test_path)
src_path = base_source_paths + [test_path]
@ -2072,21 +2073,21 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
except Exception, e:
if not isinstance(e, NotSupportedException):
result = False
if continue_on_build_fail:
continue
else:
break
# If a clean build was carried out last time, disable it for the next build.
# Otherwise the previously built test will be deleted.
if clean:
clean = False
# Normalize the path
if bin_file:
bin_file = norm_relative_path(bin_file, execution_directory)
test_build['tests'][test_name] = {
"binaries": [
{
@ -2094,15 +2095,15 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
}
]
}
print 'Image: %s'% bin_file
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
return {

View File

@ -233,28 +233,28 @@ class mbedToolchain:
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
self.target = target
self.name = self.__class__.__name__
# compile/assemble/link/binary hooks
self.hook = hooks.Hook(target, self)
# Toolchain flags
self.flags = deepcopy(self.DEFAULT_FLAGS)
# User-defined macros
self.macros = macros or []
# Macros generated from toolchain and target rules/features
self.symbols = None
# Labels generated from toolchain and target rules/features (used for selective build)
self.labels = None
# This will hold the configuration data (as returned by Config.get_config_data())
self.config_data = None
# Non-incremental compile
self.build_all = False
# Build output dir
self.build_dir = None
self.timestamp = time()
@ -265,7 +265,7 @@ class mbedToolchain:
# Number of concurrent build jobs. 0 means auto (based on host system cores)
self.jobs = 0
self.CHROOT = None
self.CHROOT = None
# Ignore patterns from .mbedignore files
self.ignore_patterns = []
@ -280,12 +280,13 @@ class mbedToolchain:
self.notify_fun = self.print_notify_verbose
else:
self.notify_fun = self.print_notify
# Silent builds (no output)
self.silent = silent
# Print output buffer
self.output = ""
self.output = str()
self.map_outputs = list() # Place to store memmap scan results in JSON like data structures
# Build options passed by -o flag
self.options = options if options is not None else []
@ -295,7 +296,7 @@ class mbedToolchain:
if self.options:
self.info("Build Options: %s" % (', '.join(self.options)))
# uVisor spepcific rules
if 'UVISOR' in self.target.features and 'UVISOR_SUPPORTED' in self.target.extra_labels:
self.target.core = re.sub(r"F$", '', self.target.core)
@ -310,10 +311,10 @@ class mbedToolchain:
if not self.VERBOSE and event['type'] == 'tool_error':
msg = event['message']
elif event['type'] in ['info', 'debug']:
msg = event['message']
elif event['type'] == 'cc':
event['severity'] = event['severity'].title()
event['file'] = basename(event['file'])
@ -615,7 +616,7 @@ class mbedToolchain:
def relative_object_path(self, build_path, base_dir, source):
source_dir, name, _ = split_path(source)
obj_dir = join(build_path, relpath(source_dir, base_dir))
mkdir(obj_dir)
return join(obj_dir, name + '.o')
@ -627,7 +628,7 @@ class mbedToolchain:
cmd_list = []
for c in includes:
if c:
cmd_list.append(('-I%s' % c).replace("\\", "/"))
cmd_list.append(('-I%s' % c).replace("\\", "/"))
string = " ".join(cmd_list)
f.write(string)
return include_file
@ -822,12 +823,12 @@ class mbedToolchain:
if self.target.OUTPUT_NAMING == "8.3":
name = name[0:8]
ext = ext[0:3]
# Create destination directory
head, tail = split(name)
new_path = join(tmp_path, head)
mkdir(new_path)
filename = name+'.'+ext
elf = join(tmp_path, name + '.elf')
bin = join(tmp_path, filename)
@ -844,7 +845,7 @@ class mbedToolchain:
self.binary(r, elf, bin)
self.mem_stats(map)
self.map_outputs = self.mem_stats(map)
self.var("compile_succeded", True)
self.var("binary", filename)
@ -900,7 +901,11 @@ class mbedToolchain:
self.notify({'type': 'var', 'key': key, 'val': value})
def mem_stats(self, map):
# Creates parser object
"""! Creates parser object
@param map Path to linker map file to parse and decode
@return Memory summary structure with memory usage statistics
None if map file can't be opened and processed
"""
toolchain = self.__class__.__name__
# Create memap object
@ -909,7 +914,7 @@ class mbedToolchain:
# Parse and decode a map file
if memap.parse(abspath(map), toolchain) is False:
self.info("Unknown toolchain for memory statistics %s" % toolchain)
return
return None
# Write output to stdout in text (pretty table) format
memap.generate_output('table')
@ -917,11 +922,16 @@ class mbedToolchain:
# Write output to file in JSON format
map_out = splitext(map)[0] + "_map.json"
memap.generate_output('json', map_out)
# Write output to file in CSV format for the CI
map_csv = splitext(map)[0] + "_map.csv"
memap.generate_output('csv-ci', map_csv)
# Here we return memory statistics structure (constructed after
# call to generate_output) which contains raw data in bytes
# about sections + summary
return memap.get_memory_summary()
# Set the configuration data
def set_config_data(self, config_data):
self.config_data = config_data