2016-06-10 14:19:02 +00:00
|
|
|
#!/usr/bin/env python
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
"""Memory Map File Analyser for ARM mbed"""
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import re
|
2016-06-10 14:19:02 +00:00
|
|
|
import csv
|
2016-06-09 22:50:03 +00:00
|
|
|
import json
|
2016-06-10 14:19:02 +00:00
|
|
|
import argparse
|
2017-05-25 11:09:18 +00:00
|
|
|
from copy import deepcopy
|
2016-06-09 22:50:03 +00:00
|
|
|
from prettytable import PrettyTable
|
|
|
|
|
2016-09-28 09:58:27 +00:00
|
|
|
from utils import argparse_filestring_type, \
|
2016-08-15 18:43:52 +00:00
|
|
|
argparse_lowercase_hyphen_type, argparse_uppercase_type
|
|
|
|
|
|
|
|
RE_ARMCC = re.compile(
|
|
|
|
r'^\s+0x(\w{8})\s+0x(\w{8})\s+(\w+)\s+(\w+)\s+(\d+)\s+[*]?.+\s+(.+)$')
|
|
|
|
RE_IAR = re.compile(
|
|
|
|
r'^\s+(.+)\s+(zero|const|ro code|inited|uninit)\s'
|
|
|
|
r'+0x(\w{8})\s+0x(\w+)\s+(.+)\s.+$')
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
class MemapParser(object):
|
2016-08-15 18:43:52 +00:00
|
|
|
"""An object that represents parsed results, parses the memory map files,
|
|
|
|
and writes out different file types of memory results
|
|
|
|
"""
|
|
|
|
|
|
|
|
print_sections = ('.text', '.data', '.bss')
|
|
|
|
|
|
|
|
misc_flash_sections = ('.interrupts', '.flash_config')
|
|
|
|
|
|
|
|
other_sections = ('.interrupts_ram', '.init', '.ARM.extab',
|
|
|
|
'.ARM.exidx', '.ARM.attributes', '.eh_frame',
|
|
|
|
'.init_array', '.fini_array', '.jcr', '.stab',
|
|
|
|
'.stabstr', '.ARM.exidx', '.ARM')
|
|
|
|
|
|
|
|
# sections to print info (generic for all toolchains)
|
|
|
|
sections = ('.text', '.data', '.bss', '.heap', '.stack')
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
def __init__(self):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" General initialization
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
2017-05-25 11:09:18 +00:00
|
|
|
|
2016-06-09 22:50:03 +00:00
|
|
|
# list of all modules and their sections
|
2017-05-25 11:09:18 +00:00
|
|
|
self.modules = dict() # full list - doesn't change with depth
|
|
|
|
self.short_modules = dict() # short version with specific depth
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
# sections must be defined in this order to take irrelevant out
|
2016-06-09 22:50:03 +00:00
|
|
|
self.all_sections = self.sections + self.other_sections + \
|
|
|
|
self.misc_flash_sections + ('unknown', 'OUTPUT')
|
|
|
|
|
2016-09-27 21:51:16 +00:00
|
|
|
# Memory report (sections + summary)
|
|
|
|
self.mem_report = []
|
|
|
|
|
|
|
|
# Just the memory summary section
|
2016-06-28 15:34:28 +00:00
|
|
|
self.mem_summary = dict()
|
|
|
|
|
2016-09-27 21:51:16 +00:00
|
|
|
self.subtotal = dict()
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
self.misc_flash_mem = 0
|
|
|
|
|
|
|
|
|
|
|
|
def remove_unused_modules(self):
|
|
|
|
""" Removes modules/objects that were compiled but are not used
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Using keys to be able to remove entry
|
|
|
|
for i in self.modules.keys():
|
|
|
|
size = 0
|
|
|
|
for k in self.print_sections:
|
|
|
|
size += self.modules[i][k]
|
|
|
|
if size == 0:
|
|
|
|
del self.modules[i]
|
|
|
|
|
|
|
|
def module_init(self, object_name):
|
|
|
|
""" Initialize a module. Just adds the name of the module
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
Positional arguments:
|
2017-05-25 11:09:18 +00:00
|
|
|
object_name - name of the entry to add
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if object_name not in self.modules:
|
2016-06-09 22:50:03 +00:00
|
|
|
temp_dic = dict()
|
2016-06-10 14:19:02 +00:00
|
|
|
for section_idx in self.all_sections:
|
|
|
|
temp_dic[section_idx] = 0
|
2017-05-25 11:09:18 +00:00
|
|
|
self.modules[object_name] = temp_dic
|
|
|
|
|
|
|
|
def module_add(self, object_name, size, section):
|
|
|
|
""" Adds a module / section to the list
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
object_name - name of the entry to add
|
|
|
|
size - the size of the module being added
|
|
|
|
section - the section the module contributes to
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Check if object is a sub-string of key
|
|
|
|
for module_path in self.modules:
|
|
|
|
|
|
|
|
# this is required to differenciate: main.o vs xxxmain.o
|
|
|
|
module_split = os.path.basename(module_path)
|
|
|
|
obj_split = os.path.basename(object_name)
|
|
|
|
|
|
|
|
if module_split == obj_split:
|
|
|
|
self.modules[module_path][section] += size
|
|
|
|
return
|
|
|
|
|
|
|
|
new_module = dict()
|
|
|
|
for section_idx in self.all_sections:
|
|
|
|
new_module[section_idx] = 0
|
|
|
|
new_module[section] = size
|
|
|
|
self.modules[object_name] = new_module
|
|
|
|
|
|
|
|
def module_replace(self, old_object, new_object):
|
|
|
|
""" Replaces an object name with a new one
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Check if object is a sub-string of key
|
|
|
|
if old_object in self.modules:
|
|
|
|
self.modules[new_object] = self.modules[old_object]
|
|
|
|
del self.modules[old_object]
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def check_new_section_gcc(self, line):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Check whether a new section in a map file has been detected (only
|
|
|
|
applies to gcc)
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
line - the line to check for a new section
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
for i in self.all_sections:
|
|
|
|
if line.startswith(i):
|
2016-08-15 18:43:52 +00:00
|
|
|
# should name of the section (assuming it's a known one)
|
|
|
|
return i
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
if line.startswith('.'):
|
2016-06-28 15:34:28 +00:00
|
|
|
return 'unknown' # all others are classified are unknown
|
2016-06-09 22:50:03 +00:00
|
|
|
else:
|
|
|
|
return False # everything else, means no change in section
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
def parse_object_name_gcc(self, line):
|
|
|
|
""" Parse a path to object file
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
txt - the path to parse the object and module name from
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
line = line.replace('\\', '/')
|
|
|
|
RE_OBJECT_FILE = r'^.+\/(.+\.o)$'
|
|
|
|
test_re_mbed_os_name = re.match(RE_OBJECT_FILE, line)
|
2016-10-12 19:48:38 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_mbed_os_name:
|
2016-10-12 19:48:38 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
object_name = test_re_mbed_os_name.group(1)
|
2016-10-12 19:48:38 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
# corner case: certain objects are provided by the GCC toolchain
|
|
|
|
if 'arm-none-eabi' in line:
|
|
|
|
object_name = '[lib]/misc/' + object_name
|
|
|
|
|
|
|
|
return object_name
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
RE_LIBRARY_OBJECT_FILE = r'^.+\/(lib.+\.a)\((.+\.o)\)$'
|
|
|
|
test_re_obj_name = re.match(RE_LIBRARY_OBJECT_FILE, line)
|
|
|
|
|
|
|
|
if test_re_obj_name:
|
|
|
|
object_name = test_re_obj_name.group(1) + '/' + \
|
|
|
|
test_re_obj_name.group(2)
|
|
|
|
|
|
|
|
return '[lib]/' + object_name
|
2016-10-12 19:48:38 +00:00
|
|
|
|
2017-01-23 16:02:15 +00:00
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
print "Malformed input found when parsing GCC map: %s" % line
|
|
|
|
return '[misc]'
|
2016-06-10 14:19:02 +00:00
|
|
|
|
|
|
|
def parse_section_gcc(self, line):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Parse data from a section of gcc map file
|
|
|
|
|
|
|
|
examples:
|
2016-11-18 05:57:26 +00:00
|
|
|
0x00004308 0x7c ./BUILD/K64F/GCC_ARM/mbed-os/hal/targets/hal/TARGET_Freescale/TARGET_KPSDK_MCUS/spi_api.o
|
|
|
|
.text 0x00000608 0x198 ./BUILD/K64F/GCC_ARM/mbed-os/core/mbed-rtos/rtx/TARGET_CORTEX_M/TARGET_RTOS_M4_M7/TOOLCHAIN_GCC/HAL_CM4.o
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
line - the line to parse a section from
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
RE_STD_SECTION_GCC = re.compile(
|
2016-08-15 18:43:52 +00:00
|
|
|
r'^\s+.*0x(\w{8,16})\s+0x(\w+)\s(.+)$')
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
test_address_len_name = re.match(RE_STD_SECTION_GCC, line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
if test_address_len_name:
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
if int(test_address_len_name.group(2), 16) == 0: # size == 0
|
|
|
|
return ["", 0] # no valid entry
|
2016-06-09 22:50:03 +00:00
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
o_name = self.parse_object_name_gcc(\
|
2016-08-15 18:43:52 +00:00
|
|
|
test_address_len_name.group(3))
|
2017-05-25 11:09:18 +00:00
|
|
|
o_size = int(test_address_len_name.group(2), 16)
|
|
|
|
|
|
|
|
return [o_name, o_size]
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
else: # special corner case for *fill* sections
|
2016-06-09 22:50:03 +00:00
|
|
|
# example
|
|
|
|
# *fill* 0x0000abe4 0x4
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
RE_FILL_SECTION_GCC = r'^\s+\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$'
|
|
|
|
|
|
|
|
test_address_len = re.match(RE_FILL_SECTION_GCC, line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
if test_address_len:
|
2016-06-10 14:19:02 +00:00
|
|
|
if int(test_address_len.group(2), 16) == 0: # size == 0
|
|
|
|
return ["", 0] # no valid entry
|
2016-06-09 22:50:03 +00:00
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
o_name = '[fill]'
|
|
|
|
o_size = int(test_address_len.group(2), 16)
|
|
|
|
return [o_name, o_size]
|
2016-06-09 22:50:03 +00:00
|
|
|
else:
|
2016-06-10 14:19:02 +00:00
|
|
|
return ["", 0] # no valid entry
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse_map_file_gcc(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Main logic to decode gcc map files
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
file_desc - a stream object to parse as a gcc map file
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
current_section = 'unknown'
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
with file_desc as infile:
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
# Search area to parse
|
|
|
|
for line in infile:
|
2016-06-10 14:19:02 +00:00
|
|
|
if line.startswith('Linker script and memory map'):
|
2016-06-09 22:50:03 +00:00
|
|
|
current_section = "unknown"
|
|
|
|
break
|
|
|
|
|
|
|
|
# Start decoding the map file
|
|
|
|
for line in infile:
|
|
|
|
|
|
|
|
change_section = self.check_new_section_gcc(line)
|
|
|
|
|
|
|
|
if change_section == "OUTPUT": # finish parsing file: exit
|
|
|
|
break
|
2016-06-10 14:19:02 +00:00
|
|
|
elif change_section != False:
|
2016-06-09 22:50:03 +00:00
|
|
|
current_section = change_section
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
[object_name, object_size] = self.parse_section_gcc(line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if object_size == 0 or object_name == "":
|
2016-06-09 22:50:03 +00:00
|
|
|
pass
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
self.module_add(object_name, object_size,\
|
|
|
|
current_section)
|
|
|
|
|
|
|
|
def parse_object_name_armcc(self, line):
|
|
|
|
""" Parse object file
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
line - the line containing the object or library
|
|
|
|
"""
|
|
|
|
|
|
|
|
# simple object (not library)
|
|
|
|
if line[-2] == '.' and line[-1] == 'o':
|
|
|
|
return line
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
RE_OBJECT_ARMCC = r'(.+\.l)\((.+\.o)\)'
|
|
|
|
test_re_obj_name = re.match(RE_OBJECT_ARMCC, line)
|
|
|
|
|
|
|
|
if test_re_obj_name:
|
|
|
|
object_name = test_re_obj_name.group(1) + '/' + \
|
|
|
|
test_re_obj_name.group(2)
|
|
|
|
|
|
|
|
return '[lib]/' + object_name
|
|
|
|
else:
|
|
|
|
print "Malformed input found when parsing ARMCC map: %s" % line
|
|
|
|
return '[misc]'
|
|
|
|
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse_section_armcc(self, line):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Parse data from an armcc map file
|
|
|
|
|
|
|
|
Examples of armcc map file:
|
|
|
|
Base_Addr Size Type Attr Idx E Section Name Object
|
|
|
|
0x00000000 0x00000400 Data RO 11222 RESET startup_MK64F12.o
|
|
|
|
0x00000410 0x00000008 Code RO 49364 * !!!main c_w.l(__main.o)
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
line - the line to parse the section data from
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
test_re_armcc = re.match(RE_ARMCC, line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_armcc:
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
size = int(test_re_armcc.group(2), 16)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_armcc.group(4) == 'RO':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.text'
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_armcc.group(3) == 'Data':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.data'
|
2017-05-25 11:09:18 +00:00
|
|
|
elif test_re_armcc.group(3) == 'Zero':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.bss'
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
print "Malformed input found when parsing armcc map: %s" %\
|
|
|
|
line
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
# check name of object or library
|
|
|
|
object_name = self.parse_object_name_armcc(\
|
|
|
|
test_re_armcc.group(6))
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
return [object_name, size, section]
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
return ["", 0, ""]
|
|
|
|
|
|
|
|
def parse_object_name_iar(self, line):
|
|
|
|
""" Parse object file
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
line - the line containing the object or library
|
|
|
|
"""
|
|
|
|
|
|
|
|
# simple object (not library)
|
|
|
|
if line[-2] == '.' and line[-1] == 'o':
|
|
|
|
object_name = line
|
|
|
|
return object_name
|
|
|
|
|
|
|
|
else:
|
|
|
|
return '[misc]'
|
|
|
|
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse_section_iar(self, line):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Parse data from an IAR map file
|
|
|
|
|
|
|
|
Examples of IAR map file:
|
|
|
|
Section Kind Address Size Object
|
|
|
|
.intvec ro code 0x00000000 0x198 startup_MK64F12.o [15]
|
|
|
|
.rodata const 0x00000198 0x0 zero_init3.o [133]
|
|
|
|
.iar.init_table const 0x00008384 0x2c - Linker created -
|
|
|
|
Initializer bytes const 0x00000198 0xb2 <for P3 s0>
|
|
|
|
.data inited 0x20000000 0xd4 driverAtmelRFInterface.o [70]
|
|
|
|
.bss zero 0x20000598 0x318 RTX_Conf_CM.o [4]
|
|
|
|
.iar.dynexit uninit 0x20001448 0x204 <Block tail>
|
|
|
|
HEAP uninit 0x20001650 0x10000 <Block tail>
|
|
|
|
|
|
|
|
Positional_arguments:
|
|
|
|
line - the line to parse section data from
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
2016-08-15 18:43:52 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
test_re_iar = re.match(RE_IAR, line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_iar:
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
size = int(test_re_iar.group(4), 16)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re_iar.group(2) == 'const' or \
|
|
|
|
test_re_iar.group(2) == 'ro code':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.text'
|
2017-05-25 11:09:18 +00:00
|
|
|
elif test_re_iar.group(2) == 'zero' or \
|
|
|
|
test_re_iar.group(2) == 'uninit':
|
|
|
|
if test_re_iar.group(1)[0:4] == 'HEAP':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.heap'
|
2017-05-25 11:09:18 +00:00
|
|
|
elif test_re_iar.group(1)[0:6] == 'CSTACK':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.stack'
|
|
|
|
else:
|
|
|
|
section = '.bss' # default section
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
elif test_re_iar.group(2) == 'inited':
|
2016-06-09 22:50:03 +00:00
|
|
|
section = '.data'
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
print "Malformed input found when parsing IAR map: %s" % line
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
# lookup object in dictionary and return module name
|
2017-05-25 11:09:18 +00:00
|
|
|
temp = test_re_iar.group(5)
|
|
|
|
object_name = self.parse_object_name_iar(temp)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
return [object_name, size, section]
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
else:
|
2016-06-10 14:19:02 +00:00
|
|
|
return ["", 0, ""] # no valid entry
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse_map_file_armcc(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Main logic to decode armc5 map files
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
file_desc - a file like object to parse as an armc5 map file
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
with file_desc as infile:
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
# Search area to parse
|
|
|
|
for line in infile:
|
2016-06-10 14:19:02 +00:00
|
|
|
if line.startswith(' Base Addr Size'):
|
2016-06-09 22:50:03 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
# Start decoding the map file
|
|
|
|
for line in infile:
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
[object_name, object_size, section] = \
|
|
|
|
self.parse_section_armcc(line)
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if object_size == 0 or object_name == "" or section == "":
|
2016-06-09 22:50:03 +00:00
|
|
|
pass
|
|
|
|
else:
|
2017-05-25 11:09:18 +00:00
|
|
|
self.module_add(object_name, object_size, section)
|
|
|
|
|
|
|
|
|
|
|
|
def check_new_library_iar(self, line):
|
|
|
|
"""
|
|
|
|
Searches for libraries and returns name. Example:
|
|
|
|
m7M_tls.a: [43]
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
RE_LIBRARY_IAR = re.compile(r'^(.+\.a)\:.+$')
|
|
|
|
|
|
|
|
test_address_line = re.match(RE_LIBRARY_IAR, line)
|
|
|
|
|
|
|
|
if test_address_line:
|
|
|
|
return test_address_line.group(1)
|
|
|
|
else:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
def check_new_object_lib_iar(self, line):
|
|
|
|
"""
|
|
|
|
Searches for objects within a library section and returns name. Example:
|
|
|
|
rt7M_tl.a: [44]
|
|
|
|
ABImemclr4.o 6
|
|
|
|
ABImemcpy_unaligned.o 118
|
|
|
|
ABImemset48.o 50
|
|
|
|
I64DivMod.o 238
|
|
|
|
I64DivZer.o 2
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
RE_OBJECT_LIBRARY_IAR = re.compile(r'^\s+(.+\.o)\s.*')
|
|
|
|
|
|
|
|
test_address_line = re.match(RE_OBJECT_LIBRARY_IAR, line)
|
|
|
|
|
|
|
|
if test_address_line:
|
|
|
|
return test_address_line.group(1)
|
|
|
|
else:
|
|
|
|
return ""
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse_map_file_iar(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Main logic to decode IAR map files
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
file_desc - a file like object to parse as an IAR map file
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
# first round, search for objects
|
2016-06-10 14:19:02 +00:00
|
|
|
with file_desc as infile:
|
2016-06-09 22:50:03 +00:00
|
|
|
# Search area to parse
|
|
|
|
for line in infile:
|
2016-06-10 14:19:02 +00:00
|
|
|
if line.startswith(' Section '):
|
2016-06-09 22:50:03 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
# Start decoding the map file
|
|
|
|
for line in infile:
|
|
|
|
|
|
|
|
[name, size, section] = self.parse_section_iar(line)
|
|
|
|
|
|
|
|
if size == 0 or name == "" or section == "":
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.module_add(name, size, section)
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if line.startswith('*** MODULE SUMMARY'): # finish section
|
|
|
|
break
|
|
|
|
|
|
|
|
# Start decoding the map file
|
|
|
|
current_library = ""
|
|
|
|
for line in infile:
|
|
|
|
|
|
|
|
library = self.check_new_library_iar(line)
|
|
|
|
|
|
|
|
if library != "":
|
|
|
|
current_library = library
|
|
|
|
|
|
|
|
object_name = self.check_new_object_lib_iar(line)
|
|
|
|
|
|
|
|
if object_name != "" and current_library != "":
|
|
|
|
temp = '[lib]' + '/'+ current_library + '/'+ object_name
|
|
|
|
self.module_replace(object_name, temp)
|
|
|
|
|
|
|
|
|
|
|
|
export_formats = ["json", "csv-ci", "table"]
|
|
|
|
|
|
|
|
def list_dir_obj(self, path):
|
|
|
|
""" Searches all objects in BUILD directory and creates list
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
Positional arguments:
|
2017-05-25 11:09:18 +00:00
|
|
|
path - the path to a map file
|
2016-06-09 22:50:03 +00:00
|
|
|
"""
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
path = path.replace('\\', '/')
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-10-12 19:48:38 +00:00
|
|
|
# check location of map file
|
2017-05-25 11:09:18 +00:00
|
|
|
RE_PATH_MAP_FILE = r'^(.+)\/.+\.map$'
|
|
|
|
test_re = re.match(RE_PATH_MAP_FILE, path)
|
2016-10-12 19:48:38 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if test_re:
|
|
|
|
search_path = test_re.group(1)
|
2016-10-12 19:48:38 +00:00
|
|
|
else:
|
|
|
|
print "Warning: this doesn't look like an mbed project"
|
|
|
|
return
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
# create empty disctionary
|
|
|
|
self.modules = dict()
|
|
|
|
|
|
|
|
# search for object files
|
2016-08-15 18:43:52 +00:00
|
|
|
for root, _, obj_files in os.walk(search_path):
|
2016-06-10 14:19:02 +00:00
|
|
|
for obj_file in obj_files:
|
|
|
|
if obj_file.endswith(".o"):
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
txt = os.path.join(root, obj_file)
|
|
|
|
|
|
|
|
txt = txt.replace('\\', '/')
|
|
|
|
|
|
|
|
# add relative path + object to list
|
|
|
|
self.module_init(txt[len(search_path)+1:])
|
|
|
|
|
|
|
|
# The code below is a special case for TESTS.
|
|
|
|
# mbed-os lives in a separate location and we need to explicitly search
|
|
|
|
# their object files skiping the TESTS folder (already scanned above)
|
|
|
|
|
|
|
|
# check location of mbed-os
|
|
|
|
RE_PATH_MAP_FILE = r'^(.+)\/mbed-os\/.*TESTS\/.+\.map$'
|
|
|
|
test_re = re.match(RE_PATH_MAP_FILE, path)
|
|
|
|
|
|
|
|
if test_re == None:
|
|
|
|
return
|
|
|
|
|
|
|
|
search_path = test_re.group(1)
|
|
|
|
|
|
|
|
# search for object files
|
|
|
|
for root, _, obj_files in os.walk(search_path):
|
|
|
|
for obj_file in obj_files:
|
|
|
|
if 'TESTS' not in root and obj_file.endswith(".o"):
|
|
|
|
|
|
|
|
txt = os.path.join(root, obj_file)
|
|
|
|
txt = txt.replace('\\', '/')
|
|
|
|
|
|
|
|
# add relative path + object to list
|
|
|
|
self.module_init(txt[len(search_path)+1:])
|
|
|
|
|
|
|
|
|
|
|
|
def reduce_depth(self, depth):
|
|
|
|
"""
|
|
|
|
prints list of directories and objects. Examples:
|
|
|
|
|
|
|
|
(1) depth = 1:
|
|
|
|
main.o
|
|
|
|
mbed-os
|
|
|
|
|
|
|
|
(2) depth = 2:
|
|
|
|
main.o
|
|
|
|
mbed-os/test.o
|
|
|
|
mbed-os/drivers
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
# depth 0 or None shows all entries
|
|
|
|
if depth == 0 or depth == None:
|
|
|
|
self.short_modules = deepcopy(self.modules)
|
|
|
|
return
|
|
|
|
|
|
|
|
self.short_modules = dict()
|
|
|
|
|
|
|
|
# create reduced list
|
|
|
|
for line in self.modules:
|
|
|
|
|
|
|
|
data = line.split('/')
|
|
|
|
ndir = len(data)
|
|
|
|
|
|
|
|
temp = ''
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
# iterate until the max depth level
|
|
|
|
max_level = min(depth, ndir)
|
|
|
|
|
|
|
|
# rebuild the path based on depth level
|
|
|
|
while count < max_level:
|
|
|
|
if count > 0: # ignore '/' from first entry
|
|
|
|
temp = temp + '/'
|
|
|
|
|
|
|
|
temp = temp + data[count]
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
if temp not in self.short_modules:
|
|
|
|
temp_dic = dict()
|
|
|
|
for section_idx in self.all_sections:
|
|
|
|
temp_dic[section_idx] = 0
|
|
|
|
self.short_modules[temp] = temp_dic
|
|
|
|
|
|
|
|
for section_idx in self.all_sections:
|
|
|
|
self.short_modules[temp][section_idx] += \
|
|
|
|
self.modules[line][section_idx]
|
|
|
|
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-24 22:15:01 +00:00
|
|
|
export_formats = ["json", "csv-ci", "table"]
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
def generate_output(self, export_format, depth, file_output=None):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Generates summary of memory map data
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
export_format - the format to dump
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
Keyword arguments:
|
|
|
|
file_desc - descriptor (either stdout or file)
|
2017-05-25 11:09:18 +00:00
|
|
|
depth - directory depth on report
|
2016-10-11 23:37:54 +00:00
|
|
|
|
|
|
|
Returns: generated string for the 'table' format, otherwise None
|
2016-06-10 14:19:02 +00:00
|
|
|
"""
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
self.reduce_depth(depth)
|
|
|
|
self.compute_report()
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
try:
|
|
|
|
if file_output:
|
|
|
|
file_desc = open(file_output, 'wb')
|
|
|
|
else:
|
|
|
|
file_desc = sys.stdout
|
|
|
|
except IOError as error:
|
|
|
|
print "I/O error({0}): {1}".format(error.errno, error.strerror)
|
|
|
|
return False
|
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
to_call = {'json': self.generate_json,
|
|
|
|
'csv-ci': self.generate_csv,
|
|
|
|
'table': self.generate_table}[export_format]
|
2016-10-11 23:37:54 +00:00
|
|
|
output = to_call(file_desc)
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
if file_desc is not sys.stdout:
|
|
|
|
file_desc.close()
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-10-11 23:37:54 +00:00
|
|
|
return output
|
2016-10-11 19:33:24 +00:00
|
|
|
|
2016-09-27 21:51:16 +00:00
|
|
|
def generate_json(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
"""Generate a json file from a memory map
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
Positional arguments:
|
|
|
|
file_desc - the file to write out the final report to
|
|
|
|
"""
|
2016-09-27 21:51:16 +00:00
|
|
|
file_desc.write(json.dumps(self.mem_report, indent=4))
|
2016-08-15 18:43:52 +00:00
|
|
|
file_desc.write('\n')
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-10-11 23:37:54 +00:00
|
|
|
return None
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-09-27 21:51:16 +00:00
|
|
|
def generate_csv(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
"""Generate a CSV file from a memoy map
|
2016-06-27 12:31:56 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
Positional arguments:
|
|
|
|
file_desc - the file to write out the final report to
|
|
|
|
"""
|
|
|
|
csv_writer = csv.writer(file_desc, delimiter=',',
|
2016-10-07 15:25:17 +00:00
|
|
|
quoting=csv.QUOTE_MINIMAL)
|
2016-06-27 12:31:56 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
csv_module_section = []
|
|
|
|
csv_sizes = []
|
2017-05-25 11:09:18 +00:00
|
|
|
for i in sorted(self.short_modules):
|
2016-08-15 18:43:52 +00:00
|
|
|
for k in self.print_sections:
|
|
|
|
csv_module_section += [i+k]
|
2017-05-25 11:09:18 +00:00
|
|
|
csv_sizes += [self.short_modules[i][k]]
|
2016-06-27 12:31:56 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
csv_module_section += ['static_ram']
|
2016-09-27 21:51:16 +00:00
|
|
|
csv_sizes += [self.mem_summary['static_ram']]
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
csv_module_section += ['total_flash']
|
2016-09-27 21:51:16 +00:00
|
|
|
csv_sizes += [self.mem_summary['total_flash']]
|
2016-06-27 12:31:56 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
csv_writer.writerow(csv_module_section)
|
|
|
|
csv_writer.writerow(csv_sizes)
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-10-11 23:37:54 +00:00
|
|
|
return None
|
2016-10-11 19:33:24 +00:00
|
|
|
|
2016-09-27 21:51:16 +00:00
|
|
|
def generate_table(self, file_desc):
|
2016-08-15 18:43:52 +00:00
|
|
|
"""Generate a table from a memoy map
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-10-11 23:37:54 +00:00
|
|
|
Returns: string of the generated table
|
2016-08-15 18:43:52 +00:00
|
|
|
"""
|
|
|
|
# Create table
|
|
|
|
columns = ['Module']
|
|
|
|
columns.extend(self.print_sections)
|
2016-06-28 15:34:28 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
table = PrettyTable(columns)
|
|
|
|
table.align["Module"] = "l"
|
|
|
|
for col in self.print_sections:
|
|
|
|
table.align[col] = 'r'
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
for i in list(self.print_sections):
|
|
|
|
table.align[i] = 'r'
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
for i in sorted(self.short_modules):
|
2016-08-18 18:25:58 +00:00
|
|
|
row = [i]
|
|
|
|
|
|
|
|
for k in self.print_sections:
|
2017-05-25 11:09:18 +00:00
|
|
|
row.append(self.short_modules[i][k])
|
2016-08-18 18:25:58 +00:00
|
|
|
|
|
|
|
table.add_row(row)
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
subtotal_row = ['Subtotals']
|
|
|
|
for k in self.print_sections:
|
2016-09-27 21:51:16 +00:00
|
|
|
subtotal_row.append(self.subtotal[k])
|
2016-08-15 18:43:52 +00:00
|
|
|
|
|
|
|
table.add_row(subtotal_row)
|
|
|
|
|
2016-10-11 19:33:24 +00:00
|
|
|
output = table.get_string()
|
|
|
|
output += '\n'
|
2016-08-15 18:43:52 +00:00
|
|
|
|
2016-10-11 19:33:24 +00:00
|
|
|
output += "Total Static RAM memory (data + bss): %s bytes\n" % \
|
|
|
|
str(self.mem_summary['static_ram'])
|
2017-05-25 11:09:18 +00:00
|
|
|
output += "Total Flash memory (text + data): %s bytes\n" % \
|
2016-10-11 19:33:24 +00:00
|
|
|
str(self.mem_summary['total_flash'])
|
|
|
|
|
|
|
|
return output
|
2016-06-28 15:34:28 +00:00
|
|
|
|
2017-02-07 14:05:32 +00:00
|
|
|
toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "GCC_CR", "IAR"]
|
2016-06-24 22:15:01 +00:00
|
|
|
|
2016-09-28 22:20:42 +00:00
|
|
|
def compute_report(self):
|
2017-05-25 11:09:18 +00:00
|
|
|
""" Generates summary of memory usage for main areas
|
|
|
|
"""
|
|
|
|
|
2016-09-28 22:20:42 +00:00
|
|
|
for k in self.sections:
|
|
|
|
self.subtotal[k] = 0
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
for i in sorted(self.short_modules):
|
2016-09-28 22:20:42 +00:00
|
|
|
for k in self.sections:
|
2017-05-25 11:09:18 +00:00
|
|
|
self.subtotal[k] += self.short_modules[i][k]
|
2016-09-28 22:20:42 +00:00
|
|
|
|
|
|
|
self.mem_summary = {
|
|
|
|
'static_ram': (self.subtotal['.data'] + self.subtotal['.bss']),
|
2017-05-25 11:09:18 +00:00
|
|
|
'total_flash': (self.subtotal['.text'] + self.subtotal['.data']),
|
2016-09-28 22:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self.mem_report = []
|
2017-05-25 11:09:18 +00:00
|
|
|
for i in sorted(self.short_modules):
|
2016-09-28 22:20:42 +00:00
|
|
|
self.mem_report.append({
|
|
|
|
"module":i,
|
|
|
|
"size":{
|
2017-05-25 11:09:18 +00:00
|
|
|
k:self.short_modules[i][k] for k in self.print_sections
|
2016-09-28 22:20:42 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
self.mem_report.append({
|
|
|
|
'summary': self.mem_summary
|
|
|
|
})
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
def parse(self, mapfile, toolchain):
|
2016-08-15 18:43:52 +00:00
|
|
|
""" Parse and decode map file depending on the toolchain
|
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
mapfile - the file name of the memory map file
|
|
|
|
toolchain - the toolchain used to create the file
|
2016-06-10 14:19:02 +00:00
|
|
|
"""
|
|
|
|
|
2016-06-28 15:34:28 +00:00
|
|
|
result = True
|
2016-06-10 14:19:02 +00:00
|
|
|
try:
|
2016-08-15 18:43:52 +00:00
|
|
|
with open(mapfile, 'r') as file_input:
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
# Common to all toolchains: first search for objects in BUILD
|
|
|
|
self.list_dir_obj(os.path.abspath(mapfile))
|
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
if toolchain == "ARM" or toolchain == "ARM_STD" or\
|
2017-05-25 11:09:18 +00:00
|
|
|
toolchain == "ARM_MICRO":
|
2016-06-28 15:34:28 +00:00
|
|
|
self.parse_map_file_armcc(file_input)
|
2017-02-07 14:05:32 +00:00
|
|
|
elif toolchain == "GCC_ARM" or toolchain == "GCC_CR":
|
2016-06-28 15:34:28 +00:00
|
|
|
self.parse_map_file_gcc(file_input)
|
|
|
|
elif toolchain == "IAR":
|
|
|
|
self.parse_map_file_iar(file_input)
|
|
|
|
else:
|
|
|
|
result = False
|
2017-05-25 11:09:18 +00:00
|
|
|
|
|
|
|
self.remove_unused_modules()
|
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
except IOError as error:
|
|
|
|
print "I/O error({0}): {1}".format(error.errno, error.strerror)
|
2016-06-28 15:34:28 +00:00
|
|
|
result = False
|
|
|
|
return result
|
2016-06-10 14:19:02 +00:00
|
|
|
|
2016-06-09 22:50:03 +00:00
|
|
|
def main():
|
2016-08-15 18:43:52 +00:00
|
|
|
"""Entry Point"""
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
version = '0.4.0'
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
# Parser handling
|
2016-08-15 18:43:52 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description="Memory Map File Analyser for ARM mbed\nversion %s" %
|
|
|
|
version)
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
'file', type=argparse_filestring_type, help='memory map file')
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
'-t', '--toolchain', dest='toolchain',
|
|
|
|
help='select a toolchain used to build the memory map file (%s)' %
|
|
|
|
", ".join(MemapParser.toolchains),
|
|
|
|
required=True,
|
|
|
|
type=argparse_uppercase_type(MemapParser.toolchains, "toolchain"))
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-d', '--depth', dest='depth', type=int,
|
|
|
|
help='specify directory depth level to display report', required=False)
|
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-o', '--output', help='output file name', required=False)
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
'-e', '--export', dest='export', required=False, default='table',
|
|
|
|
type=argparse_lowercase_hyphen_type(MemapParser.export_formats,
|
|
|
|
'export format'),
|
|
|
|
help="export format (examples: %s: default)" %
|
|
|
|
", ".join(MemapParser.export_formats))
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
parser.add_argument('-v', '--version', action='version', version=version)
|
|
|
|
|
|
|
|
# Parse/run command
|
|
|
|
if len(sys.argv) <= 1:
|
|
|
|
parser.print_help()
|
|
|
|
sys.exit(1)
|
|
|
|
|
2016-08-15 18:43:52 +00:00
|
|
|
args = parser.parse_args()
|
2016-06-09 22:50:03 +00:00
|
|
|
|
2016-06-10 14:19:02 +00:00
|
|
|
# Create memap object
|
2017-05-25 11:09:18 +00:00
|
|
|
memap = MemapParser()
|
2016-06-10 14:19:02 +00:00
|
|
|
|
|
|
|
# Parse and decode a map file
|
|
|
|
if args.file and args.toolchain:
|
|
|
|
if memap.parse(args.file, args.toolchain) is False:
|
|
|
|
sys.exit(0)
|
|
|
|
|
2017-05-25 11:09:18 +00:00
|
|
|
if args.depth is None:
|
|
|
|
depth = 2 # default depth level
|
|
|
|
else:
|
|
|
|
depth = args.depth
|
|
|
|
|
2016-10-11 23:37:54 +00:00
|
|
|
returned_string = None
|
2016-06-09 22:50:03 +00:00
|
|
|
# Write output in file
|
|
|
|
if args.output != None:
|
2017-05-25 11:09:18 +00:00
|
|
|
returned_string = memap.generate_output(args.export, \
|
|
|
|
depth, args.output)
|
2016-06-09 22:50:03 +00:00
|
|
|
else: # Write output in screen
|
2017-05-25 11:09:18 +00:00
|
|
|
returned_string = memap.generate_output(args.export, depth)
|
2016-10-11 23:37:54 +00:00
|
|
|
|
|
|
|
if args.export == 'table' and returned_string:
|
|
|
|
print returned_string
|
2016-06-09 22:50:03 +00:00
|
|
|
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2016-06-10 14:19:02 +00:00
|
|
|
main()
|