memap enhancements with depth level configurable

Removed heap/stack from report
Add --stats-depth option to mbed compile
Fix minor issues identified in review
Fix bug when parsing armcc libs
Fix code style
Fix minor issues on existing tests
Fix memap for automated TESTS
Fix stats-depth parameter for tests
pull/4392/head
Marcelo Salazar 2017-05-25 12:09:18 +01:00
parent acdd7dd424
commit 7053ef9412
6 changed files with 589 additions and 331 deletions

View File

@ -446,7 +446,7 @@ def build_project(src_paths, build_path, target, toolchain_name,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None):
app_config=None, build_profile=None, stats_depth=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
@ -475,6 +475,7 @@ def build_project(src_paths, build_path, target, toolchain_name,
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
"""
# Convert src_path to a list if needed
@ -553,18 +554,18 @@ def build_project(src_paths, build_path, target, toolchain_name,
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table')
memap_table = memap_instance.generate_output('table', stats_depth)
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', map_out)
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', map_csv)
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
resources.detect_duplicates(toolchain)
@ -1163,7 +1164,7 @@ def mcu_toolchain_list(release_version='5'):
def mcu_target_list(release_version='5'):
""" Shows target list
""" Shows target list
"""

View File

@ -58,51 +58,66 @@ if __name__ == '__main__':
# Parse Options
parser = get_default_options_parser(add_app_config=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("-p",
type=argparse_many(test_known),
dest="program",
help="The index of the desired test program: [0-%d]" % (len(TESTS)-1))
group.add_argument(
"-p",
type=argparse_many(test_known),
dest="program",
help="The index of the desired test program: [0-%d]" % (len(TESTS)-1))
group.add_argument("-n",
type=argparse_many(test_name_known),
dest="program",
help="The name of the desired test program")
group.add_argument(
"-n",
type=argparse_many(test_name_known),
dest="program",
help="The name of the desired test program")
parser.add_argument("-j", "--jobs",
type=int,
dest="jobs",
default=0,
help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_argument(
"-j", "--jobs",
type=int,
dest="jobs",
default=0,
help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument(
"-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument("--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_argument(
"--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_argument(
"-D",
action="append",
dest="macros",
help="Add a macro definition")
group.add_argument("-S", "--supported-toolchains",
dest="supported_toolchains",
default=False,
const="matrix",
choices=["matrix", "toolchains", "targets"],
nargs="?",
help="Displays supported matrix of MCUs and toolchains")
group.add_argument(
"-S", "--supported-toolchains",
dest="supported_toolchains",
default=False,
const="matrix",
choices=["matrix", "toolchains", "targets"],
nargs="?",
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument(
'-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument(
"--stats-depth",
type=int,
dest="stats_depth",
default=2,
help="Depth level for static memory report")
# Local run
parser.add_argument("--automated", action="store_true", dest="automated",
@ -277,7 +292,8 @@ if __name__ == '__main__':
inc_dirs=[dirname(MBED_LIBRARIES)],
build_profile=extract_profile(parser,
options,
toolchain))
toolchain),
stats_depth=options.stats_depth)
print 'Image: %s'% bin_file
if options.disk:
@ -322,7 +338,7 @@ if __name__ == '__main__':
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
sys.exit(1)
if options.build_data:
merge_build_data(options.build_data, build_data_blob, "application")

View File

@ -8,13 +8,12 @@ import re
import csv
import json
import argparse
from copy import deepcopy
from prettytable import PrettyTable
from utils import argparse_filestring_type, \
argparse_lowercase_hyphen_type, argparse_uppercase_type
DEBUG = False
RE_ARMCC = re.compile(
r'^\s+0x(\w{8})\s+0x(\w{8})\s+(\w+)\s+(\w+)\s+(\d+)\s+[*]?.+\s+(.+)$')
RE_IAR = re.compile(
@ -38,22 +37,18 @@ class MemapParser(object):
# sections to print info (generic for all toolchains)
sections = ('.text', '.data', '.bss', '.heap', '.stack')
def __init__(self, detailed_misc=False):
def __init__(self):
""" General initialization
"""
#
self.detailed_misc = detailed_misc
# list of all modules and their sections
self.modules = dict()
self.modules = dict() # full list - doesn't change with depth
self.short_modules = dict() # short version with specific depth
# sections must be defined in this order to take irrelevant out
self.all_sections = self.sections + self.other_sections + \
self.misc_flash_sections + ('unknown', 'OUTPUT')
# list of all object files and mappting to module names
self.object_to_module = dict()
# Memory report (sections + summary)
self.mem_report = []
@ -62,23 +57,68 @@ class MemapParser(object):
self.subtotal = dict()
def module_add(self, module_name, size, section):
self.misc_flash_mem = 0
def remove_unused_modules(self):
""" Removes modules/objects that were compiled but are not used
"""
# Using keys to be able to remove entry
for i in self.modules.keys():
size = 0
for k in self.print_sections:
size += self.modules[i][k]
if size == 0:
del self.modules[i]
def module_init(self, object_name):
""" Initialize a module. Just adds the name of the module
Positional arguments:
object_name - name of the entry to add
"""
if object_name not in self.modules:
temp_dic = dict()
for section_idx in self.all_sections:
temp_dic[section_idx] = 0
self.modules[object_name] = temp_dic
def module_add(self, object_name, size, section):
""" Adds a module / section to the list
Positional arguments:
module_name - name of the module to add
object_name - name of the entry to add
size - the size of the module being added
section - the section the module contributes to
"""
if module_name in self.modules:
self.modules[module_name][section] += size
else:
temp_dic = dict()
for section_idx in self.all_sections:
temp_dic[section_idx] = 0
temp_dic[section] = size
self.modules[module_name] = temp_dic
# Check if object is a sub-string of key
for module_path in self.modules:
# this is required to differenciate: main.o vs xxxmain.o
module_split = os.path.basename(module_path)
obj_split = os.path.basename(object_name)
if module_split == obj_split:
self.modules[module_path][section] += size
return
new_module = dict()
for section_idx in self.all_sections:
new_module[section_idx] = 0
new_module[section] = size
self.modules[object_name] = new_module
def module_replace(self, old_object, new_object):
""" Replaces an object name with a new one
"""
# Check if object is a sub-string of key
if old_object in self.modules:
self.modules[new_object] = self.modules[old_object]
del self.modules[old_object]
def check_new_section_gcc(self, line):
""" Check whether a new section in a map file has been detected (only
@ -98,44 +138,42 @@ class MemapParser(object):
else:
return False # everything else, means no change in section
def path_object_to_module_name(self, txt):
""" Parse a path to object file to extract it's module and object data
def parse_object_name_gcc(self, line):
""" Parse a path to object file
Positional arguments:
txt - the path to parse the object and module name from
"""
txt = txt.replace('\\', '/')
rex_mbed_os_name = r'^.+mbed-os\/(.+)\/(.+\.o)$'
test_rex_mbed_os_name = re.match(rex_mbed_os_name, txt)
line = line.replace('\\', '/')
RE_OBJECT_FILE = r'^.+\/(.+\.o)$'
test_re_mbed_os_name = re.match(RE_OBJECT_FILE, line)
if test_rex_mbed_os_name:
if test_re_mbed_os_name:
object_name = test_rex_mbed_os_name.group(2)
data = test_rex_mbed_os_name.group(1).split('/')
ndata = len(data)
object_name = test_re_mbed_os_name.group(1)
# corner case: certain objects are provided by the GCC toolchain
if 'arm-none-eabi' in line:
object_name = '[lib]/misc/' + object_name
return object_name
else:
RE_LIBRARY_OBJECT_FILE = r'^.+\/(lib.+\.a)\((.+\.o)\)$'
test_re_obj_name = re.match(RE_LIBRARY_OBJECT_FILE, line)
if test_re_obj_name:
object_name = test_re_obj_name.group(1) + '/' + \
test_re_obj_name.group(2)
return '[lib]/' + object_name
if ndata == 1:
module_name = data[0]
else:
module_name = data[0] + '/' + data[1]
if self.detailed_misc:
return [module_name + '/' + object_name, object_name]
else:
return [module_name, object_name]
elif self.detailed_misc:
rex_obj_name = r'^.+\/(.+\.o\)*)$'
test_rex_obj_name = re.match(rex_obj_name, txt)
if test_rex_obj_name:
object_name = test_rex_obj_name.group(1)
return ['Misc/' + object_name, ""]
return ['Misc', ""]
else:
return ['Misc', ""]
print "Malformed input found when parsing GCC map: %s" % line
return '[misc]'
def parse_section_gcc(self, line):
""" Parse data from a section of gcc map file
@ -147,37 +185,42 @@ class MemapParser(object):
Positional arguments:
line - the line to parse a section from
"""
rex_address_len_name = re.compile(
RE_STD_SECTION_GCC = re.compile(
r'^\s+.*0x(\w{8,16})\s+0x(\w+)\s(.+)$')
test_address_len_name = re.match(rex_address_len_name, line)
test_address_len_name = re.match(RE_STD_SECTION_GCC, line)
if test_address_len_name:
if int(test_address_len_name.group(2), 16) == 0: # size == 0
return ["", 0] # no valid entry
else:
m_name, _ = self.path_object_to_module_name(
o_name = self.parse_object_name_gcc(\
test_address_len_name.group(3))
m_size = int(test_address_len_name.group(2), 16)
return [m_name, m_size]
o_size = int(test_address_len_name.group(2), 16)
return [o_name, o_size]
else: # special corner case for *fill* sections
# example
# *fill* 0x0000abe4 0x4
rex_address_len = r'^\s+\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$'
test_address_len = re.match(rex_address_len, line)
RE_FILL_SECTION_GCC = r'^\s+\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$'
test_address_len = re.match(RE_FILL_SECTION_GCC, line)
if test_address_len:
if int(test_address_len.group(2), 16) == 0: # size == 0
return ["", 0] # no valid entry
else:
m_name = 'Fill'
m_size = int(test_address_len.group(2), 16)
return [m_name, m_size]
o_name = '[fill]'
o_size = int(test_address_len.group(2), 16)
return [o_name, o_size]
else:
return ["", 0] # no valid entry
def parse_map_file_gcc(self, file_desc):
""" Main logic to decode gcc map files
@ -205,18 +248,40 @@ class MemapParser(object):
elif change_section != False:
current_section = change_section
[module_name, module_size] = self.parse_section_gcc(line)
[object_name, object_size] = self.parse_section_gcc(line)
if module_size == 0 or module_name == "":
if object_size == 0 or object_name == "":
pass
else:
self.module_add(module_name, module_size, current_section)
self.module_add(object_name, object_size,\
current_section)
def parse_object_name_armcc(self, line):
""" Parse object file
Positional arguments:
line - the line containing the object or library
"""
# simple object (not library)
if line[-2] == '.' and line[-1] == 'o':
return line
else:
RE_OBJECT_ARMCC = r'(.+\.l)\((.+\.o)\)'
test_re_obj_name = re.match(RE_OBJECT_ARMCC, line)
if test_re_obj_name:
object_name = test_re_obj_name.group(1) + '/' + \
test_re_obj_name.group(2)
return '[lib]/' + object_name
else:
print "Malformed input found when parsing ARMCC map: %s" % line
return '[misc]'
if DEBUG:
print "Line: %s" % line,
print "Module: %s\tSection: %s\tSize: %s" % \
(module_name, current_section, module_size)
raw_input("----------")
def parse_section_armcc(self, line):
""" Parse data from an armcc map file
@ -230,34 +295,47 @@ class MemapParser(object):
line - the line to parse the section data from
"""
test_rex_armcc = re.match(RE_ARMCC, line)
test_re_armcc = re.match(RE_ARMCC, line)
if test_rex_armcc:
if test_re_armcc:
size = int(test_rex_armcc.group(2), 16)
size = int(test_re_armcc.group(2), 16)
if test_rex_armcc.group(4) == 'RO':
if test_re_armcc.group(4) == 'RO':
section = '.text'
else:
if test_rex_armcc.group(3) == 'Data':
if test_re_armcc.group(3) == 'Data':
section = '.data'
elif test_rex_armcc.group(3) == 'Zero':
elif test_re_armcc.group(3) == 'Zero':
section = '.bss'
else:
print "BUG armcc map parser"
raw_input()
print "Malformed input found when parsing armcc map: %s" %\
line
# lookup object in dictionary and return module name
object_name = test_rex_armcc.group(6)
if object_name in self.object_to_module:
module_name = self.object_to_module[object_name]
else:
module_name = 'Misc'
# check name of object or library
object_name = self.parse_object_name_armcc(\
test_re_armcc.group(6))
return [module_name, size, section]
return [object_name, size, section]
else:
return ["", 0, ""] # no valid entry
return ["", 0, ""]
def parse_object_name_iar(self, line):
""" Parse object file
Positional arguments:
line - the line containing the object or library
"""
# simple object (not library)
if line[-2] == '.' and line[-1] == 'o':
object_name = line
return object_name
else:
return '[misc]'
def parse_section_iar(self, line):
""" Parse data from an IAR map file
@ -277,38 +355,34 @@ class MemapParser(object):
line - the line to parse section data from
"""
test_rex_iar = re.match(RE_IAR, line)
test_re_iar = re.match(RE_IAR, line)
if test_rex_iar:
if test_re_iar:
size = int(test_rex_iar.group(4), 16)
size = int(test_re_iar.group(4), 16)
if test_rex_iar.group(2) == 'const' or \
test_rex_iar.group(2) == 'ro code':
if test_re_iar.group(2) == 'const' or \
test_re_iar.group(2) == 'ro code':
section = '.text'
elif test_rex_iar.group(2) == 'zero' or \
test_rex_iar.group(2) == 'uninit':
if test_rex_iar.group(1)[0:4] == 'HEAP':
elif test_re_iar.group(2) == 'zero' or \
test_re_iar.group(2) == 'uninit':
if test_re_iar.group(1)[0:4] == 'HEAP':
section = '.heap'
elif test_rex_iar.group(1)[0:6] == 'CSTACK':
elif test_re_iar.group(1)[0:6] == 'CSTACK':
section = '.stack'
else:
section = '.bss' # default section
elif test_rex_iar.group(2) == 'inited':
elif test_re_iar.group(2) == 'inited':
section = '.data'
else:
print "BUG IAR map parser"
raw_input()
print "Malformed input found when parsing IAR map: %s" % line
# lookup object in dictionary and return module name
object_name = test_rex_iar.group(5)
if object_name in self.object_to_module:
module_name = self.object_to_module[object_name]
else:
module_name = 'Misc'
temp = test_re_iar.group(5)
object_name = self.parse_object_name_iar(temp)
return [module_name, size, section]
return [object_name, size, section]
else:
return ["", 0, ""] # no valid entry
@ -330,12 +404,51 @@ class MemapParser(object):
# Start decoding the map file
for line in infile:
[name, size, section] = self.parse_section_armcc(line)
[object_name, object_size, section] = \
self.parse_section_armcc(line)
if size == 0 or name == "" or section == "":
if object_size == 0 or object_name == "" or section == "":
pass
else:
self.module_add(name, size, section)
self.module_add(object_name, object_size, section)
def check_new_library_iar(self, line):
"""
Searches for libraries and returns name. Example:
m7M_tls.a: [43]
"""
RE_LIBRARY_IAR = re.compile(r'^(.+\.a)\:.+$')
test_address_line = re.match(RE_LIBRARY_IAR, line)
if test_address_line:
return test_address_line.group(1)
else:
return ""
def check_new_object_lib_iar(self, line):
"""
Searches for objects within a library section and returns name. Example:
rt7M_tl.a: [44]
ABImemclr4.o 6
ABImemcpy_unaligned.o 118
ABImemset48.o 50
I64DivMod.o 238
I64DivZer.o 2
"""
RE_OBJECT_LIBRARY_IAR = re.compile(r'^\s+(.+\.o)\s.*')
test_address_line = re.match(RE_OBJECT_LIBRARY_IAR, line)
if test_address_line:
return test_address_line.group(1)
else:
return ""
def parse_map_file_iar(self, file_desc):
""" Main logic to decode IAR map files
@ -344,8 +457,8 @@ class MemapParser(object):
file_desc - a file like object to parse as an IAR map file
"""
# first round, search for objects
with file_desc as infile:
# Search area to parse
for line in infile:
if line.startswith(' Section '):
@ -361,45 +474,142 @@ class MemapParser(object):
else:
self.module_add(name, size, section)
def search_objects(self, path):
""" Searches for object files and creates mapping: object --> module
if line.startswith('*** MODULE SUMMARY'): # finish section
break
# Start decoding the map file
current_library = ""
for line in infile:
library = self.check_new_library_iar(line)
if library != "":
current_library = library
object_name = self.check_new_object_lib_iar(line)
if object_name != "" and current_library != "":
temp = '[lib]' + '/'+ current_library + '/'+ object_name
self.module_replace(object_name, temp)
export_formats = ["json", "csv-ci", "table"]
def list_dir_obj(self, path):
""" Searches all objects in BUILD directory and creates list
Positional arguments:
path - the path to an object file
path - the path to a map file
"""
path = path.replace('\\', '/')
# check location of map file
rex = r'^(.+)' + r'\/(.+\.map)$'
test_rex = re.match(rex, path)
RE_PATH_MAP_FILE = r'^(.+)\/.+\.map$'
test_re = re.match(RE_PATH_MAP_FILE, path)
if test_rex:
search_path = test_rex.group(1) + '/mbed-os/'
if test_re:
search_path = test_re.group(1)
else:
print "Warning: this doesn't look like an mbed project"
return
# create empty disctionary
self.modules = dict()
# search for object files
for root, _, obj_files in os.walk(search_path):
for obj_file in obj_files:
if obj_file.endswith(".o"):
module_name, object_name = self.path_object_to_module_name(
os.path.join(root, obj_file))
if object_name in self.object_to_module:
if DEBUG:
print "WARNING: multiple usages of object file: %s"\
% object_name
print " Current: %s" % \
self.object_to_module[object_name]
print " New: %s" % module_name
print " "
else:
self.object_to_module.update({object_name:module_name})
txt = os.path.join(root, obj_file)
txt = txt.replace('\\', '/')
# add relative path + object to list
self.module_init(txt[len(search_path)+1:])
# The code below is a special case for TESTS.
# mbed-os lives in a separate location and we need to explicitly search
# their object files skiping the TESTS folder (already scanned above)
# check location of mbed-os
RE_PATH_MAP_FILE = r'^(.+)\/mbed-os\/.*TESTS\/.+\.map$'
test_re = re.match(RE_PATH_MAP_FILE, path)
if test_re == None:
return
search_path = test_re.group(1)
# search for object files
for root, _, obj_files in os.walk(search_path):
for obj_file in obj_files:
if 'TESTS' not in root and obj_file.endswith(".o"):
txt = os.path.join(root, obj_file)
txt = txt.replace('\\', '/')
# add relative path + object to list
self.module_init(txt[len(search_path)+1:])
def reduce_depth(self, depth):
"""
prints list of directories and objects. Examples:
(1) depth = 1:
main.o
mbed-os
(2) depth = 2:
main.o
mbed-os/test.o
mbed-os/drivers
"""
# depth 0 or None shows all entries
if depth == 0 or depth == None:
self.short_modules = deepcopy(self.modules)
return
self.short_modules = dict()
# create reduced list
for line in self.modules:
data = line.split('/')
ndir = len(data)
temp = ''
count = 0
# iterate until the max depth level
max_level = min(depth, ndir)
# rebuild the path based on depth level
while count < max_level:
if count > 0: # ignore '/' from first entry
temp = temp + '/'
temp = temp + data[count]
count += 1
if temp not in self.short_modules:
temp_dic = dict()
for section_idx in self.all_sections:
temp_dic[section_idx] = 0
self.short_modules[temp] = temp_dic
for section_idx in self.all_sections:
self.short_modules[temp][section_idx] += \
self.modules[line][section_idx]
export_formats = ["json", "csv-ci", "table"]
def generate_output(self, export_format, file_output=None):
def generate_output(self, export_format, depth, file_output=None):
""" Generates summary of memory map data
Positional arguments:
@ -407,10 +617,14 @@ class MemapParser(object):
Keyword arguments:
file_desc - descriptor (either stdout or file)
depth - directory depth on report
Returns: generated string for the 'table' format, otherwise None
"""
self.reduce_depth(depth)
self.compute_report()
try:
if file_output:
file_desc = open(file_output, 'wb')
@ -452,29 +666,14 @@ class MemapParser(object):
csv_module_section = []
csv_sizes = []
for i in sorted(self.modules):
for i in sorted(self.short_modules):
for k in self.print_sections:
csv_module_section += [i+k]
csv_sizes += [self.modules[i][k]]
csv_sizes += [self.short_modules[i][k]]
csv_module_section += ['static_ram']
csv_sizes += [self.mem_summary['static_ram']]
csv_module_section += ['heap']
if self.mem_summary['heap'] == 0:
csv_sizes += ['unknown']
else:
csv_sizes += [self.mem_summary['heap']]
csv_module_section += ['stack']
if self.mem_summary['stack'] == 0:
csv_sizes += ['unknown']
else:
csv_sizes += [self.mem_summary['stack']]
csv_module_section += ['total_ram']
csv_sizes += [self.mem_summary['total_ram']]
csv_module_section += ['total_flash']
csv_sizes += [self.mem_summary['total_flash']]
@ -486,9 +685,6 @@ class MemapParser(object):
def generate_table(self, file_desc):
"""Generate a table from a memoy map
Positional arguments:
file_desc - the file to write out the final report to
Returns: string of the generated table
"""
# Create table
@ -503,11 +699,11 @@ class MemapParser(object):
for i in list(self.print_sections):
table.align[i] = 'r'
for i in sorted(self.modules):
for i in sorted(self.short_modules):
row = [i]
for k in self.print_sections:
row.append(self.modules[i][k])
row.append(self.short_modules[i][k])
table.add_row(row)
@ -520,23 +716,9 @@ class MemapParser(object):
output = table.get_string()
output += '\n'
if self.mem_summary['heap'] == 0:
output += "Allocated Heap: unknown\n"
else:
output += "Allocated Heap: %s bytes\n" % \
str(self.mem_summary['heap'])
if self.mem_summary['stack'] == 0:
output += "Allocated Stack: unknown\n"
else:
output += "Allocated Stack: %s bytes\n" % \
str(self.mem_summary['stack'])
output += "Total Static RAM memory (data + bss): %s bytes\n" % \
str(self.mem_summary['static_ram'])
output += "Total RAM memory (data + bss + heap + stack): %s bytes\n" % \
str(self.mem_summary['total_ram'])
output += "Total Flash memory (text + data + misc): %s bytes\n" % \
output += "Total Flash memory (text + data): %s bytes\n" % \
str(self.mem_summary['total_flash'])
return output
@ -544,36 +726,27 @@ class MemapParser(object):
toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "GCC_CR", "IAR"]
def compute_report(self):
""" Generates summary of memory usage for main areas
"""
for k in self.sections:
self.subtotal[k] = 0
for i in sorted(self.modules):
for i in sorted(self.short_modules):
for k in self.sections:
self.subtotal[k] += self.modules[i][k]
# Calculate misc flash sections
self.misc_flash_mem = 0
for i in self.modules:
for k in self.misc_flash_sections:
if self.modules[i][k]:
self.misc_flash_mem += self.modules[i][k]
self.subtotal[k] += self.short_modules[i][k]
self.mem_summary = {
'static_ram': (self.subtotal['.data'] + self.subtotal['.bss']),
'heap': (self.subtotal['.heap']),
'stack': (self.subtotal['.stack']),
'total_ram': (self.subtotal['.data'] + self.subtotal['.bss'] +
self.subtotal['.heap']+self.subtotal['.stack']),
'total_flash': (self.subtotal['.text'] + self.subtotal['.data'] +
self.misc_flash_mem),
'total_flash': (self.subtotal['.text'] + self.subtotal['.data']),
}
self.mem_report = []
for i in sorted(self.modules):
for i in sorted(self.short_modules):
self.mem_report.append({
"module":i,
"size":{
k:self.modules[i][k] for k in self.print_sections
k:self.short_modules[i][k] for k in self.print_sections
}
})
@ -592,20 +765,22 @@ class MemapParser(object):
result = True
try:
with open(mapfile, 'r') as file_input:
# Common to all toolchains: first search for objects in BUILD
self.list_dir_obj(os.path.abspath(mapfile))
if toolchain == "ARM" or toolchain == "ARM_STD" or\
toolchain == "ARM_MICRO":
self.search_objects(os.path.abspath(mapfile))
toolchain == "ARM_MICRO":
self.parse_map_file_armcc(file_input)
elif toolchain == "GCC_ARM" or toolchain == "GCC_CR":
self.parse_map_file_gcc(file_input)
elif toolchain == "IAR":
self.search_objects(os.path.abspath(mapfile))
self.parse_map_file_iar(file_input)
else:
result = False
self.compute_report()
self.remove_unused_modules()
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
result = False
@ -614,7 +789,7 @@ class MemapParser(object):
def main():
"""Entry Point"""
version = '0.3.12'
version = '0.4.0'
# Parser handling
parser = argparse.ArgumentParser(
@ -631,6 +806,10 @@ def main():
required=True,
type=argparse_uppercase_type(MemapParser.toolchains, "toolchain"))
parser.add_argument(
'-d', '--depth', dest='depth', type=int,
help='specify directory depth level to display report', required=False)
parser.add_argument(
'-o', '--output', help='output file name', required=False)
@ -642,31 +821,34 @@ def main():
", ".join(MemapParser.export_formats))
parser.add_argument('-v', '--version', action='version', version=version)
parser.add_argument('-d', '--detailed', action='store_true', help='Displays the elements in "Misc" in a detailed fashion', required=False)
# Parse/run command
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# Create memap object
memap = MemapParser(detailed_misc=args.detailed)
memap = MemapParser()
# Parse and decode a map file
if args.file and args.toolchain:
if memap.parse(args.file, args.toolchain) is False:
sys.exit(0)
if args.depth is None:
depth = 2 # default depth level
else:
depth = args.depth
returned_string = None
# Write output in file
if args.output != None:
returned_string = memap.generate_output(args.export, args.output)
returned_string = memap.generate_output(args.export, \
depth, args.output)
else: # Write output in screen
returned_string = memap.generate_output(args.export)
returned_string = memap.generate_output(args.export, depth)
if args.export == 'table' and returned_string:
print returned_string

View File

@ -44,12 +44,12 @@ if __name__ == '__main__':
try:
# Parse Options
parser = get_default_options_parser(add_app_config=True)
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_argument("-j", "--jobs",
type=int,
dest="jobs",
@ -76,30 +76,36 @@ if __name__ == '__main__':
parser.add_argument("-f", "--format", dest="format",
type=argparse_lowercase_type(format_choices, "format"),
default=format_default_choice, help=format_help)
parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
default=None, help="Continue trying to build all tests if a build failure occurs")
#TODO validate the names instead of just passing through str
parser.add_argument("-n", "--names", dest="names", type=argparse_many(str),
default=None, help="Limit the tests to a comma separated list of names")
parser.add_argument("--test-spec", dest="test_spec",
default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
parser.add_argument("--build-report-junit", dest="build_report_junit",
default=None, help="Destination path for a build report in the JUnit xml format")
parser.add_argument("--build-data",
dest="build_data",
default=None,
help="Dump build_data to this file")
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument("--stats-depth",
type=int,
dest="stats_depth",
default=2,
help="Depth level for static memory report")
options = parser.parse_args()
# Filter tests by path if specified
@ -129,7 +135,7 @@ if __name__ == '__main__':
# Find all tests in the relevant paths
for path in all_paths:
all_tests.update(find_tests(path, mcu, toolchain,
all_tests.update(find_tests(path, mcu, toolchain,
app_config=options.app_config))
# Filter tests by name if specified
@ -172,7 +178,7 @@ if __name__ == '__main__':
# Default base source path is the current directory
if not base_source_paths:
base_source_paths = ['.']
build_report = {}
build_properties = {}
@ -214,8 +220,9 @@ if __name__ == '__main__':
notify=notify,
jobs=options.jobs,
continue_on_build_fail=options.continue_on_build_fail,
app_config=options.app_config,
build_profile=profile)
app_config=options.app_config,
build_profile=profile,
stats_depth=options.stats_depth)
# If a path to a test spec is provided, write it to a file
if options.test_spec:

View File

@ -16,6 +16,7 @@ limitations under the License.
"""
import sys
import os
import json
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
@ -40,57 +41,105 @@ class MemapParserTests(unittest.TestCase):
:return:
"""
self.memap_parser = MemapParser()
self.memap_parser.modules = {
"Misc": {
"unknown": 0,
".ARM": 8,
".ARM.extab": 0,
".init": 12,
"OUTPUT": 0,
".stack": 0,
".eh_frame": 0,
".fini_array": 4,
"mbed-os/targets/TARGET/TARGET_MCUS/api/pinmap.o": {
".text": 1,
".data": 2,
".bss": 3,
".heap": 0,
".stabstr": 0,
".interrupts_ram": 0,
".init_array": 0,
".stab": 0,
".ARM.attributes": 7347,
".bss": 8517,
".flash_config": 16,
".interrupts": 1024,
".data": 2325,
".ARM.exidx": 0,
".text": 59906,
".jcr": 0
},
"Fill": {
"unknown": 12,
".ARM": 0,
".ARM.extab": 0,
".init": 0,
"OUTPUT": 0,
".stack": 0,
".eh_frame": 0,
".fini_array": 0,
".heap": 65536,
".stabstr": 0,
".interrupts_ram": 1024,
".init_array": 0,
".stab": 0,
".ARM.attributes": 0,
".bss": 2235,
".flash_config": 0,
".interrupts": 0,
".data": 3,
".ARM.exidx": 0,
".text": 136,
".jcr": 0
}
".interrupts_ram":0,
".init":0,
".ARM.extab":0,
".ARM.exidx":0,
".ARM.attributes":0,
".eh_frame":0,
".init_array":0,
".fini_array":0,
".jcr":0,
".stab":0,
".stabstr":0,
".ARM.exidx":0,
".ARM":0,
".interrupts":0,
".flash_config":0,
"unknown":0,
"OUTPUT":0,
},
"[lib]/libc.a/lib_a-printf.o": {
".text": 4,
".data": 5,
".bss": 6,
".heap": 0,
".stack": 0,
".interrupts_ram":0,
".init":0,
".ARM.extab":0,
".ARM.exidx":0,
".ARM.attributes":0,
".eh_frame":0,
".init_array":0,
".fini_array":0,
".jcr":0,
".stab":0,
".stabstr":0,
".ARM.exidx":0,
".ARM":0,
".interrupts":0,
".flash_config":0,
"unknown":0,
"OUTPUT":0,
},
"main.o": {
".text": 7,
".data": 8,
".bss": 0,
".heap": 0,
".stack": 0,
".interrupts_ram":0,
".init":0,
".ARM.extab":0,
".ARM.exidx":0,
".ARM.attributes":0,
".eh_frame":0,
".init_array":0,
".fini_array":0,
".jcr":0,
".stab":0,
".stabstr":0,
".ARM.exidx":0,
".ARM":0,
".interrupts":0,
".flash_config":0,
"unknown":0,
"OUTPUT":0,
},
"test.o": {
".text": 0,
".data": 0,
".bss": 0,
".heap": 0,
".stack": 0,
".interrupts_ram":0,
".init":0,
".ARM.extab":0,
".ARM.exidx":0,
".ARM.attributes":0,
".eh_frame":0,
".init_array":0,
".fini_array":0,
".jcr":0,
".stab":0,
".stabstr":0,
".ARM.exidx":0,
".ARM":0,
".interrupts":0,
".flash_config":0,
"unknown":0,
"OUTPUT":0,
},
}
self.memap_parser.compute_report()
def tearDown(self):
"""
@ -99,70 +148,71 @@ class MemapParserTests(unittest.TestCase):
:return:
"""
pass
def generate_test_helper(self, output_type, file_output=None):
def generate_test_helper(self, output_type, depth, file_output=None):
"""
Helper that ensures that the member variables "modules", "mem_report",
and "mem_summary" are unchanged after calling "generate_output"
Helper that ensures that the member variables "modules" is
unchanged after calling "generate_output"
:param output_type: type string that is passed to "generate_output"
:param file_output: path to output file that is passed to "generate_output"
:param file_output: path to output file that is passed to "generate_output"
:return:
"""
old_modules = deepcopy(self.memap_parser.modules)
old_report = deepcopy(self.memap_parser.mem_report)
old_summary = deepcopy(self.memap_parser.mem_summary)
self.memap_parser.generate_output(output_type, file_output)
self.memap_parser.generate_output(output_type, depth, file_output)
self.assertEqual(self.memap_parser.modules, old_modules,
"generate_output modified the 'modules' property")
self.assertEqual(self.memap_parser.mem_report, old_report,
"generate_output modified the 'mem_report' property")
self.assertEqual(self.memap_parser.mem_summary, old_summary,
"generate_output modified the 'mem_summary' property")
"generate_output modified the 'modules' property")
def test_report_computed(self):
"""
Test ensures the report and summary are computed
:return:
"""
self.assertTrue(self.memap_parser.mem_report)
self.memap_parser.generate_output('table', 2)
# Report is created after generating output
self.assertTrue(self.memap_parser.mem_summary)
self.assertEqual(self.memap_parser.mem_report[-1]['summary'],
self.memap_parser.mem_summary,
"mem_report did not contain a correct copy of mem_summary")
self.assertTrue(self.memap_parser.mem_report)
def test_generate_output_table(self):
"""
Test ensures that an output of type "table" can be generated correctly
:return:
"""
self.generate_test_helper('table')
depth = 2
self.generate_test_helper('table', depth)
def test_generate_output_json(self):
"""
Test ensures that an output of type "json" can be generated correctly
:return:
"""
file_name = '.json_test_output.json'
self.generate_test_helper('json', file_output=file_name)
depth = 2
self.generate_test_helper('json', depth, file_output=file_name)
self.assertTrue(os.path.exists(file_name), "Failed to create json file")
os.remove(file_name)
def test_generate_output_csv_ci(self):
"""
Test ensures that an output of type "csv-ci" can be generated correctly
:return:
"""
file_name = '.csv_ci_test_output.csv'
self.generate_test_helper('csv-ci', file_output=file_name)
depth = 2
self.generate_test_helper('csv-ci', depth, file_output=file_name)
self.assertTrue(os.path.exists(file_name), "Failed to create csv-ci file")
os.remove(file_name)
if __name__ == '__main__':
unittest.main()

View File

@ -496,7 +496,8 @@ class SingleTestRunner(object):
properties=build_properties,
project_id=test_id,
project_description=test.get_description(),
build_profile=profile)
build_profile=profile,
stats_depth=stats_depth)
except Exception, e:
project_name_str = project_name if project_name is not None else test_id
@ -2038,7 +2039,7 @@ def find_tests(base_dir, target_name, toolchain_name, app_config=None):
if path_depth == 2:
test_group_directory_path, test_case_directory = os.path.split(d)
test_group_directory = os.path.basename(test_group_directory_path)
# Check to make sure discoverd folder is not in a host test directory
if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
test_name = test_path_to_name(d, base_dir)
@ -2122,7 +2123,7 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, verbose=False, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None):
build_profile=None, stats_depth=None):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
@ -2158,7 +2159,7 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
src_path = base_source_paths + [test_path]
bin_file = None
test_case_folder_name = os.path.basename(test_path)
args = (src_path, test_build_path, target, toolchain_name)
kwargs = {
'jobs': 1,
@ -2172,9 +2173,10 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
'app_config': app_config,
'build_profile': build_profile,
'silent': True,
'toolchain_paths': TOOLCHAIN_PATHS
'toolchain_paths': TOOLCHAIN_PATHS,
'stats_depth': stats_depth
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
@ -2199,7 +2201,7 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and
not worker_result['reason'] and