mirror of https://github.com/ARMmbed/mbed-os.git
Reorder post-build and managed bootloader mode merging
Priously, post-bulid was run before the merge from managed bootloader mode. This renders many post-build scripts less than useful, as most of them compute a digest of the ROM image generated in a build. This reorders the post-build scripts to come after the managed bootloader mode so that post-build script digests are useful again.pull/9738/head
parent
aeeb43fb3c
commit
4cac89c22b
|
|
@ -27,7 +27,6 @@ from os.path import join, exists, dirname, basename, abspath, normpath, splitext
|
|||
from os.path import relpath
|
||||
from os import linesep, remove, makedirs
|
||||
from time import time
|
||||
from intelhex import IntelHex
|
||||
from json import load, dump
|
||||
from jinja2 import FileSystemLoader
|
||||
from jinja2.environment import Environment
|
||||
|
|
@ -35,7 +34,7 @@ from jinja2.environment import Environment
|
|||
from .arm_pack_manager import Cache
|
||||
from .utils import (mkdir, run_cmd, run_cmd_ext, NotSupportedException,
|
||||
ToolException, InvalidReleaseTargetException,
|
||||
intelhex_offset, integer, generate_update_filename, copy_when_different)
|
||||
copy_when_different)
|
||||
from .paths import (MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,
|
||||
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL,
|
||||
MBED_CONFIG_FILE, MBED_LIBRARIES_DRIVERS,
|
||||
|
|
@ -393,124 +392,6 @@ def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
|
|||
|
||||
return toolchain
|
||||
|
||||
def _printihex(ihex):
|
||||
import pprint
|
||||
pprint.PrettyPrinter().pprint(ihex.todict())
|
||||
|
||||
def _real_region_size(region):
|
||||
try:
|
||||
part = intelhex_offset(region.filename, offset=region.start)
|
||||
return (part.maxaddr() - part.minaddr()) + 1
|
||||
except AttributeError:
|
||||
return region.size
|
||||
|
||||
|
||||
def _fill_header(region_list, current_region):
|
||||
"""Fill an application header region
|
||||
|
||||
This is done it three steps:
|
||||
* Fill the whole region with zeros
|
||||
* Fill const, timestamp and size entries with their data
|
||||
* Fill the digests using this header as the header region
|
||||
"""
|
||||
region_dict = {r.name: r for r in region_list}
|
||||
header = IntelHex()
|
||||
header.puts(current_region.start, b'\x00' * current_region.size)
|
||||
start = current_region.start
|
||||
for member in current_region.filename:
|
||||
_, type, subtype, data = member
|
||||
member_size = Config.header_member_size(member)
|
||||
if type == "const":
|
||||
fmt = {
|
||||
"8le": ">B", "16le": "<H", "32le": "<L", "64le": "<Q",
|
||||
"8be": "<B", "16be": ">H", "32be": ">L", "64be": ">Q"
|
||||
}[subtype]
|
||||
header.puts(start, struct.pack(fmt, integer(data, 0)))
|
||||
elif type == "timestamp":
|
||||
fmt = {"32le": "<L", "64le": "<Q",
|
||||
"32be": ">L", "64be": ">Q"}[subtype]
|
||||
header.puts(start, struct.pack(fmt, int(time())))
|
||||
elif type == "size":
|
||||
fmt = {"32le": "<L", "64le": "<Q",
|
||||
"32be": ">L", "64be": ">Q"}[subtype]
|
||||
size = sum(_real_region_size(region_dict[r]) for r in data)
|
||||
header.puts(start, struct.pack(fmt, size))
|
||||
elif type == "digest":
|
||||
if data == "header":
|
||||
ih = header[:start]
|
||||
else:
|
||||
ih = intelhex_offset(region_dict[data].filename, offset=region_dict[data].start)
|
||||
if subtype.startswith("CRCITT32"):
|
||||
fmt = {"CRCITT32be": ">L", "CRCITT32le": "<L"}[subtype]
|
||||
crc_val = zlib.crc32(ih.tobinarray()) & 0xffffffff
|
||||
header.puts(start, struct.pack(fmt, crc_val))
|
||||
elif subtype.startswith("SHA"):
|
||||
if subtype == "SHA256":
|
||||
hash = hashlib.sha256()
|
||||
elif subtype == "SHA512":
|
||||
hash = hashlib.sha512()
|
||||
hash.update(ih.tobinarray())
|
||||
header.puts(start, hash.digest())
|
||||
start += Config.header_member_size(member)
|
||||
return header
|
||||
|
||||
|
||||
def merge_region_list(region_list, destination, notify, config, padding=b'\xFF'):
|
||||
"""Merge the region_list into a single image
|
||||
|
||||
Positional Arguments:
|
||||
region_list - list of regions, which should contain filenames
|
||||
destination - file name to write all regions to
|
||||
padding - bytes to fill gaps with
|
||||
"""
|
||||
merged = IntelHex()
|
||||
_, format = splitext(destination)
|
||||
notify.info("Merging Regions")
|
||||
# Merged file list: Keep track of binary/hex files that we have already
|
||||
# merged. e.g In some cases, bootloader may be split into multiple parts, but
|
||||
# all internally referring to the same bootloader file.
|
||||
merged_list = []
|
||||
|
||||
for region in region_list:
|
||||
if region.active and not region.filename:
|
||||
raise ToolException("Active region has no contents: No file found.")
|
||||
if isinstance(region.filename, list):
|
||||
header_basename, _ = splitext(destination)
|
||||
header_filename = header_basename + "_header.hex"
|
||||
_fill_header(region_list, region).tofile(header_filename, format='hex')
|
||||
region = region._replace(filename=header_filename)
|
||||
if region.filename and (region.filename not in merged_list):
|
||||
notify.info(" Filling region %s with %s" % (region.name, region.filename))
|
||||
part = intelhex_offset(region.filename, offset=region.start)
|
||||
part.start_addr = None
|
||||
# Normally, we assume that part.maxddr() can be beyond
|
||||
# end of rom. However, if the size is restricted with config, do check.
|
||||
if config.target.restrict_size is not None:
|
||||
part_size = (part.maxaddr() - part.minaddr()) + 1
|
||||
if part_size > region.size:
|
||||
raise ToolException("Contents of region %s does not fit"
|
||||
% region.name)
|
||||
merged_list.append(region.filename)
|
||||
merged.merge(part)
|
||||
elif region.filename in merged_list:
|
||||
notify.info(" Skipping %s as it is merged previously" % (region.name))
|
||||
|
||||
# Hex file can have gaps, so no padding needed. While other formats may
|
||||
# need padding. Iterate through segments and pad the gaps.
|
||||
if format != ".hex":
|
||||
# begin patching from the end of the first segment
|
||||
_, begin = merged.segments()[0]
|
||||
for start, stop in merged.segments()[1:]:
|
||||
pad_size = start - begin
|
||||
merged.puts(begin, padding * pad_size)
|
||||
begin = stop + 1
|
||||
|
||||
if not exists(dirname(destination)):
|
||||
makedirs(dirname(destination))
|
||||
notify.info("Space used after regions merged: 0x%x" %
|
||||
(merged.maxaddr() - merged.minaddr() + 1))
|
||||
merged.tofile(destination, format=format.strip("."))
|
||||
|
||||
|
||||
UPDATE_WHITELIST = (
|
||||
"application",
|
||||
|
|
@ -605,27 +486,7 @@ def build_project(src_paths, build_path, target, toolchain_name,
|
|||
objects = toolchain.compile_sources(resources, sorted(resources.get_file_paths(FileType.INC_DIR)))
|
||||
resources.add_files_to_type(FileType.OBJECT, objects)
|
||||
|
||||
# Link Program
|
||||
if toolchain.config.has_regions:
|
||||
binary, _ = toolchain.link_program(resources, build_path, name + "_application")
|
||||
region_list = list(toolchain.config.regions)
|
||||
region_list = [r._replace(filename=binary) if r.active else r
|
||||
for r in region_list]
|
||||
res = "%s.%s" % (join(build_path, name),
|
||||
getattr(toolchain.target, "OUTPUT_EXT", "bin"))
|
||||
merge_region_list(region_list, res, notify, toolchain.config)
|
||||
update_regions = [
|
||||
r for r in region_list if r.name in UPDATE_WHITELIST
|
||||
]
|
||||
if update_regions:
|
||||
update_res = join(build_path, generate_update_filename(name, toolchain.target))
|
||||
merge_region_list(update_regions, update_res, notify, toolchain.config)
|
||||
res = (res, update_res)
|
||||
else:
|
||||
res = (res, None)
|
||||
else:
|
||||
res, _ = toolchain.link_program(resources, build_path, name)
|
||||
res = (res, None)
|
||||
res = toolchain.link_program(resources, build_path, name)
|
||||
|
||||
into_dir, extra_artifacts = toolchain.config.deliver_into()
|
||||
if into_dir:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,174 @@
|
|||
# mbed SDK
|
||||
# Copyright (c) 2011-2013 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Utilities for working with region lists.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import struct
|
||||
import zlib
|
||||
from time import time
|
||||
from os.path import splitext, exists, dirname
|
||||
from os import makedirs
|
||||
from .config import Config
|
||||
from .utils import (
|
||||
ToolException,
|
||||
intelhex_offset,
|
||||
integer
|
||||
)
|
||||
from intelhex import IntelHex
|
||||
|
||||
UPDATE_WHITELIST = (
|
||||
"application"
|
||||
)
|
||||
|
||||
|
||||
def _printihex(ihex):
|
||||
import pprint
|
||||
pprint.PrettyPrinter().pprint(ihex.todict())
|
||||
|
||||
|
||||
def _real_region_size(region):
|
||||
try:
|
||||
part = intelhex_offset(region.filename, offset=region.start)
|
||||
return (part.maxaddr() - part.minaddr()) + 1
|
||||
except AttributeError:
|
||||
return region.size
|
||||
|
||||
|
||||
def _fill_header(region_list, current_region):
|
||||
"""Fill an application header region
|
||||
|
||||
This is done it three steps:
|
||||
* Fill the whole region with zeros
|
||||
* Fill const, timestamp and size entries with their data
|
||||
* Fill the digests using this header as the header region
|
||||
"""
|
||||
region_dict = {r.name: r for r in region_list}
|
||||
header = IntelHex()
|
||||
header.puts(current_region.start, b'\x00' * current_region.size)
|
||||
start = current_region.start
|
||||
for member in current_region.filename:
|
||||
_, type, subtype, data = member
|
||||
if type == "const":
|
||||
fmt = {
|
||||
"8le": ">B", "16le": "<H", "32le": "<L", "64le": "<Q",
|
||||
"8be": "<B", "16be": ">H", "32be": ">L", "64be": ">Q"
|
||||
}[subtype]
|
||||
header.puts(start, struct.pack(fmt, integer(data, 0)))
|
||||
elif type == "timestamp":
|
||||
fmt = {"32le": "<L", "64le": "<Q",
|
||||
"32be": ">L", "64be": ">Q"}[subtype]
|
||||
header.puts(start, struct.pack(fmt, int(time())))
|
||||
elif type == "size":
|
||||
fmt = {"32le": "<L", "64le": "<Q",
|
||||
"32be": ">L", "64be": ">Q"}[subtype]
|
||||
size = sum(_real_region_size(region_dict[r]) for r in data)
|
||||
header.puts(start, struct.pack(fmt, size))
|
||||
elif type == "digest":
|
||||
if data == "header":
|
||||
ih = header[:start]
|
||||
else:
|
||||
ih = intelhex_offset(
|
||||
region_dict[data].filename,
|
||||
offset=region_dict[data].start
|
||||
)
|
||||
if subtype.startswith("CRCITT32"):
|
||||
fmt = {"CRCITT32be": ">L", "CRCITT32le": "<L"}[subtype]
|
||||
crc_val = zlib.crc32(ih.tobinarray()) & 0xffffffff
|
||||
header.puts(start, struct.pack(fmt, crc_val))
|
||||
elif subtype.startswith("SHA"):
|
||||
if subtype == "SHA256":
|
||||
hash = hashlib.sha256()
|
||||
elif subtype == "SHA512":
|
||||
hash = hashlib.sha512()
|
||||
hash.update(ih.tobinarray())
|
||||
header.puts(start, hash.digest())
|
||||
start += Config.header_member_size(member)
|
||||
return header
|
||||
|
||||
|
||||
def merge_region_list(
|
||||
region_list,
|
||||
destination,
|
||||
notify,
|
||||
config,
|
||||
padding=b'\xFF'
|
||||
):
|
||||
"""Merge the region_list into a single image
|
||||
|
||||
Positional Arguments:
|
||||
region_list - list of regions, which should contain filenames
|
||||
destination - file name to write all regions to
|
||||
padding - bytes to fill gaps with
|
||||
"""
|
||||
merged = IntelHex()
|
||||
_, format = splitext(destination)
|
||||
notify.info("Merging Regions")
|
||||
# Merged file list: Keep track of binary/hex files that we have already
|
||||
# merged. e.g In some cases, bootloader may be split into multiple parts,
|
||||
# but all internally referring to the same bootloader file.
|
||||
merged_list = []
|
||||
|
||||
for region in region_list:
|
||||
if region.active and not region.filename:
|
||||
raise ToolException(
|
||||
"Active region has no contents: No file found."
|
||||
)
|
||||
if isinstance(region.filename, list):
|
||||
header_basename, _ = splitext(destination)
|
||||
header_filename = header_basename + "_header.hex"
|
||||
_fill_header(region_list, region).tofile(
|
||||
header_filename, format='hex'
|
||||
)
|
||||
region = region._replace(filename=header_filename)
|
||||
if region.filename and (region.filename not in merged_list):
|
||||
notify.info(" Filling region %s with %s" % (
|
||||
region.name, region.filename
|
||||
))
|
||||
part = intelhex_offset(region.filename, offset=region.start)
|
||||
part.start_addr = None
|
||||
# Normally, we assume that part.maxddr() can be beyond
|
||||
# end of rom. If the size is restricted with config, don't
|
||||
# allow this.
|
||||
if config.target.restrict_size is not None:
|
||||
part_size = (part.maxaddr() - part.minaddr()) + 1
|
||||
if part_size > region.size:
|
||||
raise ToolException(
|
||||
"Contents of region %s does not fit" % region.name
|
||||
)
|
||||
merged_list.append(region.filename)
|
||||
merged.merge(part)
|
||||
elif region.filename in merged_list:
|
||||
notify.info(
|
||||
" Skipping %s as it is merged previously" % (region.name)
|
||||
)
|
||||
|
||||
# Hex file can have gaps, so no padding needed. While other formats may
|
||||
# need padding. Iterate through segments and pad the gaps.
|
||||
if format != ".hex":
|
||||
# begin patching from the end of the first segment
|
||||
_, begin = merged.segments()[0]
|
||||
for start, stop in merged.segments()[1:]:
|
||||
pad_size = start - begin
|
||||
merged.puts(begin, padding * pad_size)
|
||||
begin = stop + 1
|
||||
|
||||
if not exists(dirname(destination)):
|
||||
makedirs(dirname(destination))
|
||||
notify.info("Space used after regions merged: 0x%x" %
|
||||
(merged.maxaddr() - merged.minaddr() + 1))
|
||||
merged.tofile(destination, format=format.strip("."))
|
||||
|
|
@ -33,13 +33,22 @@ from distutils.spawn import find_executable
|
|||
from multiprocessing import Pool, cpu_count
|
||||
from hashlib import md5
|
||||
|
||||
from ..utils import (run_cmd, mkdir, rel_path, ToolException,
|
||||
NotSupportedException, split_path, compile_worker)
|
||||
from ..utils import (
|
||||
run_cmd,
|
||||
mkdir,
|
||||
rel_path,
|
||||
ToolException,
|
||||
NotSupportedException,
|
||||
split_path,
|
||||
compile_worker,
|
||||
generate_update_filename,
|
||||
)
|
||||
from ..settings import MBED_ORG_USER, PRINT_COMPILER_OUTPUT_AS_LINK
|
||||
from ..notifier.term import TerminalNotifier
|
||||
from ..resources import FileType
|
||||
from ..memap import MemapParser
|
||||
from ..config import (ConfigException, RAM_ALL_MEMORIES, ROM_ALL_MEMORIES)
|
||||
from ..regions import (UPDATE_WHITELIST, merge_region_list)
|
||||
from ..settings import COMPARE_FIXED
|
||||
|
||||
|
||||
|
|
@ -599,11 +608,33 @@ class mbedToolchain:
|
|||
|
||||
return needed_update
|
||||
|
||||
def _do_region_merge(self, name, binary, ext):
|
||||
region_list = list(self.config.regions)
|
||||
region_list = [r._replace(filename=binary) if r.active else r
|
||||
for r in region_list]
|
||||
res = "{}.{}".format(join(self.build_dir, name), ext)
|
||||
merge_region_list(region_list, res, self.notify, self.config)
|
||||
update_regions = [
|
||||
r for r in region_list if r.name in UPDATE_WHITELIST
|
||||
]
|
||||
if update_regions:
|
||||
update_res = join(
|
||||
self.build_dir,
|
||||
generate_update_filename(name, self.target)
|
||||
)
|
||||
merge_region_list(
|
||||
update_regions,
|
||||
update_res,
|
||||
self.notify,
|
||||
self.config
|
||||
)
|
||||
return res, update_res
|
||||
else:
|
||||
return res, None
|
||||
|
||||
def link_program(self, r, tmp_path, name):
|
||||
needed_update = False
|
||||
ext = 'bin'
|
||||
if hasattr(self.target, 'OUTPUT_EXT'):
|
||||
ext = self.target.OUTPUT_EXT
|
||||
ext = getattr(self.target, "OUTPUT_EXT", "bin")
|
||||
|
||||
if hasattr(self.target, 'OUTPUT_NAMING'):
|
||||
self.notify.var("binary_naming", self.target.OUTPUT_NAMING)
|
||||
|
|
@ -616,12 +647,13 @@ class mbedToolchain:
|
|||
new_path = join(tmp_path, head)
|
||||
mkdir(new_path)
|
||||
|
||||
filename = name+'.'+ext
|
||||
# Absolute path of the final linked file
|
||||
full_path = join(tmp_path, filename)
|
||||
elf = join(tmp_path, name + '.elf')
|
||||
bin = None if ext == 'elf' else full_path
|
||||
mapfile = join(tmp_path, name + '.map')
|
||||
if self.config.has_regions:
|
||||
elf = join(tmp_path, name + '_application.elf')
|
||||
mapfile = join(tmp_path, name + '_application.map')
|
||||
else:
|
||||
elf = join(tmp_path, name + '.elf')
|
||||
mapfile = join(tmp_path, name + '.map')
|
||||
|
||||
objects = sorted(set(r.get_file_paths(FileType.OBJECT)))
|
||||
config_file = ([self.config.app_config_location]
|
||||
|
|
@ -647,21 +679,34 @@ class mbedToolchain:
|
|||
self.progress("link", name)
|
||||
self.link(elf, objects, libraries, lib_dirs, linker_script)
|
||||
|
||||
if bin and self.need_update(bin, [elf]):
|
||||
needed_update = True
|
||||
self.progress("elf2bin", name)
|
||||
self.binary(r, elf, bin)
|
||||
if ext != 'elf':
|
||||
if self.config.has_regions:
|
||||
filename = "{}_application.{}".format(name, ext)
|
||||
else:
|
||||
filename = "{}.{}".format(name, ext)
|
||||
full_path = join(tmp_path, filename)
|
||||
if full_path and self.need_update(full_path, [elf]):
|
||||
needed_update = True
|
||||
self.progress("elf2bin", name)
|
||||
self.binary(r, elf, full_path)
|
||||
if self.config.has_regions:
|
||||
full_path, updatable = self._do_region_merge(name, full_path, ext)
|
||||
else:
|
||||
updatable = None
|
||||
else:
|
||||
full_path = None
|
||||
updatable = None
|
||||
|
||||
if self._post_build_hook:
|
||||
self.progress("post-build", name)
|
||||
self._post_build_hook(self, r, elf, bin)
|
||||
self._post_build_hook(self, r, elf, full_path)
|
||||
# Initialize memap and process map file. This doesn't generate output.
|
||||
self.mem_stats(mapfile)
|
||||
|
||||
self.notify.var("compile_succeded", True)
|
||||
self.notify.var("binary", filename)
|
||||
|
||||
return full_path, needed_update
|
||||
return full_path, updatable
|
||||
|
||||
# THIS METHOD IS BEING OVERRIDDEN BY THE MBED ONLINE BUILD SYSTEM
|
||||
# ANY CHANGE OF PARAMETERS OR RETURN VALUES WILL BREAK COMPATIBILITY
|
||||
|
|
|
|||
Loading…
Reference in New Issue