Python2+3: iteritems -> items

pull/5848/head
Jimmy Brisson 2018-01-12 15:46:44 -06:00
parent 45bdd98a85
commit cca4425af6
22 changed files with 47 additions and 47 deletions

View File

@ -69,7 +69,7 @@ def fuzzy_find(matches, urls) :
for key, value in process.extract(match, urls, limit=None) :
choices.setdefault(key, 0)
choices[key] += value
choices = sorted([(v, k) for k, v in choices.iteritems()], reverse=True)
choices = sorted([(v, k) for k, v in choices.items()], reverse=True)
if not choices : return []
elif len(choices) == 1 : return [choices[0][1]]
elif choices[0][0] > choices[1][0] : choices = choices[:1]
@ -155,7 +155,7 @@ def command_dump_parts (cache, out, parts, intersection=False) :
else :
for part in parts :
index.update(dict(cache.find_device(part)))
for n, p in index.iteritems() :
for n, p in index.items() :
try :
if not exists(join(out, dirname(p['algorithm']['file']))) :
makedirs(join(out, dirname(p['algorithm']['file'])))

View File

@ -154,7 +154,7 @@ if __name__ == '__main__':
test_builds = {}
total_build_success = True
for target_name, target_toolchains in build_config.iteritems():
for target_name, target_toolchains in build_config.items():
target = TARGET_MAP[target_name]
for target_toolchain in target_toolchains:

View File

@ -673,7 +673,7 @@ class Config(object):
" is only allowed at the application level")
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
for attr, cumulatives in self.cumulative_overrides.items():
if 'target.'+attr in overrides:
key = 'target.' + attr
if not isinstance(overrides[key], list):

View File

@ -226,7 +226,7 @@ def zip_export(file_name, prefix, resources, project_files, inc_repos):
with zipfile.ZipFile(file_name, "w") as zip_file:
for prj_file in project_files:
zip_file.write(prj_file, join(prefix, basename(prj_file)))
for loc, res in resources.iteritems():
for loc, res in resources.items():
to_zip = (
res.headers + res.s_sources + res.c_sources +\
res.cpp_sources + res.libraries + res.hex_files + \
@ -320,7 +320,7 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
# Call unified scan_resources
resource_dict = {loc: scan_resources(path, toolchain, inc_dirs=inc_dirs, collect_ignores=True)
for loc, path in src_paths.iteritems()}
for loc, path in src_paths.items()}
resources = Resources()
toolchain.build_dir = export_path
config_header = toolchain.get_config_header()
@ -329,12 +329,12 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
if zip_proj:
subtract_basepath(resources, ".")
for loc, res in resource_dict.iteritems():
for loc, res in resource_dict.items():
temp = copy.deepcopy(res)
subtract_basepath(temp, ".", loc)
resources.add(temp)
else:
for _, res in resource_dict.iteritems():
for _, res in resource_dict.items():
resources.add(res)
# Change linker script if specified
@ -347,7 +347,7 @@ def export_project(src_paths, export_path, target, ide, libraries_paths=None,
files.append(config_header)
if zip_proj:
for resource in resource_dict.values():
for label, res in resource.features.iteritems():
for label, res in resource.features.items():
if label not in toolchain.target.features:
resource.add(res)
if isinstance(zip_proj, basestring):

View File

@ -74,13 +74,13 @@ class CoIDE(Exporter):
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in CoIDE.FILE_TYPES.iteritems():
for r_type, n in CoIDE.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
header_files = []
for r_type, n in CoIDE.FILE_TYPES2.iteritems():
for r_type, n in CoIDE.FILE_TYPES2.items():
for file in getattr(self.resources, r_type):
header_files.append({
'name': basename(file), 'type': n, 'path': file

View File

@ -50,7 +50,7 @@ class DS5_5(Exporter):
def generate(self):
source_files = []
for r_type, n in DS5_5.FILE_TYPES.iteritems():
for r_type, n in DS5_5.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file

View File

@ -52,7 +52,7 @@ class EmBitz(Exporter):
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in self.FILE_TYPES.iteritems():
for r_type, n in self.FILE_TYPES.items():
for file in getattr(self.resources, r_type):
source_files.append({
'name': file, 'type': n

View File

@ -104,7 +104,7 @@ class Exporter(object):
"""
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
in self.toolchain.flags.items()}
asm_defines = self.toolchain.get_compile_options(
self.toolchain.get_symbols(for_asm=True),
filter(None, self.resources.inc_dirs),

View File

@ -92,7 +92,7 @@ class GNUARMEclipse(Exporter):
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
in self.toolchain.flags.items()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
@ -117,7 +117,7 @@ class GNUARMEclipse(Exporter):
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in toolchain.flags.iteritems()}
in toolchain.flags.items()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])

View File

@ -6,7 +6,7 @@ mbedclean:
$(RM) $(SECONDARY_FLASH)$(SECONDARY_SIZE) {{name}}.* linker-script-*.ld
-@echo ' '
{% for config, data in options.iteritems() %}
{% for config, data in options.items() %}
linker-script-{{config}}.ld: ../{{ld_script}}
{{data.cpp_cmd}} {{data.ld.other}} $< -o $@
{{name}}.elf: linker-script-{{config}}.ld

View File

@ -140,7 +140,7 @@ class Makefile(Exporter):
def format_flags(self):
"""Format toolchain flags for Makefile"""
flags = {}
for k, v in self.flags.iteritems():
for k, v in self.flags.items():
if k in ['asm_flags', 'c_flags', 'cxx_flags']:
flags[k] = map(lambda x: x.replace('"', '\\"'), v)
else:

View File

@ -6,7 +6,7 @@ mbedclean:
$(RM) $(EXECUTABLES) {{name}}.* linker-script-*.ld
-@echo ' '
{% for config, data in options.iteritems() %}
{% for config, data in options.items() %}
linker-script-{{config}}.ld: ../{{ld_script}}
{{data.cpp_cmd}} {{data.ld.other}} $< -o $@
{{name}}.elf: linker-script-{{config}}.ld

View File

@ -3,11 +3,11 @@
PREPROC_CMD ?= {{cpp_cmd}}
ldclean:
{% for config, opts in options.iteritems() %}
{% for config, opts in options.items() %}
$(RM) {{opts['ld']['script']}}
{% endfor %}
{% for config, opts in options.iteritems() %}
{% for config, opts in options.items() %}
{{opts['ld']['script']}}: ../{{ld_script}}
$(PREPROC_CMD) {{opts.ld.other}} $< -o $@

View File

@ -215,7 +215,7 @@ class Uvision(Exporter):
'name': self.project_name,
# project_files => dict of generators - file group to generator of
# UVFile tuples defined above
'project_files': sorted(list(self.format_src(srcs).iteritems()),
'project_files': sorted(list(self.format_src(srcs).items()),
key=lambda (group, _): group.lower()),
'linker_script':self.toolchain.correct_scatter_shebang(
self.resources.linker_script),

View File

@ -43,13 +43,13 @@ class MyJSONEncoder(json.JSONEncoder):
break
output = []
if primitives_only and len(o) < 3:
for key, value in o.iteritems():
for key, value in o.items():
output.append(json.dumps(key) + ": " + self.encode(value))
return "{" + ", ".join(output) + "}"
else:
self.current_indent += self.indent
self.current_indent_str = " " * self.current_indent
for key, value in o.iteritems():
for key, value in o.items():
output.append(self.current_indent_str + json.dumps(key) + ": " + self.encode(value))
self.current_indent -= self.indent
self.current_indent_str = " " * self.current_indent
@ -141,7 +141,7 @@ def add_to_targets(targets, device_file, verbose=False, remove=False) :
print("[WARNING] device {} did not have an associated device.h".format(device))
else :
possible_matches = set([key for key in targets.keys() if stem_match(device, key)])
for key, value in targets.iteritems() :
for key, value in targets.items() :
for alt in value['extra_labels'] if 'extra_labels' in value else [] :
if stem_match(device, alt) : possible_matches.add(key)
for alt in value['extra_labels_add'] if 'extra_labels_add' in value else [] :

View File

@ -101,7 +101,7 @@ def compare(t1, t2, target):
benchmarks_data[name][t] = map(int, (code, data, bss, flash))
print "%s vs %s for %s" % (t1, t2, target)
for name, data in benchmarks_data.iteritems():
for name, data in benchmarks_data.items():
try:
# Check Size
code_a, data_a, bss_a, flash_a = data[t1]

View File

@ -142,7 +142,7 @@ class Target(namedtuple("Target", "name json_data resolution_order resolution_or
Target.__targets_json_location_default)
for extra_target in Target.__extra_target_json_files:
for k, v in json_file_to_dict(extra_target).iteritems():
for k, v in json_file_to_dict(extra_target).items():
if k in targets:
print('WARNING: Custom target "%s" cannot replace existing '
'target.' % k)

View File

@ -36,7 +36,7 @@ def print_list(lst):
def print_category(results, index, message):
summary = [example for key, summ in results.iteritems()
summary = [example for key, summ in results.items()
for example in summ[index]]
if all(len(s) == 0 for s in summary):
return
@ -221,7 +221,7 @@ def get_num_failures(results, export=False):
"""
num_failures = 0
for key, val in results.iteritems():
for key, val in results.items():
num_failures = num_failures + len(val[3])
if export:
num_failures += len(val[4])

View File

@ -630,7 +630,7 @@ class SingleTestRunner(object):
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
@ -641,7 +641,7 @@ class SingleTestRunner(object):
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
@ -816,7 +816,7 @@ class SingleTestRunner(object):
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
@ -1016,7 +1016,7 @@ class SingleTestRunner(object):
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
@ -1193,7 +1193,7 @@ class SingleTestRunner(object):
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
@ -2177,7 +2177,7 @@ def build_test_worker(*args, **kwargs):
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].iteritems():
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
@ -2237,8 +2237,8 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.iteritems():
if type(test_paths) != ListType:
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])

View File

@ -412,5 +412,5 @@ class ReportExporter():
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
return result

View File

@ -80,7 +80,7 @@ class LazyDict(dict):
def __str__(self):
return "Lazy{%s}" % (
", ".join("%r: %r" % (k, v) for k, v in
chain(self.eager.iteritems(), ((k, "not evaluated")
chain(self.eager.items(), ((k, "not evaluated")
for k in self.lazy))))
def update(self, other):
@ -90,10 +90,10 @@ class LazyDict(dict):
else:
self.eager.update(other)
def iteritems(self):
def items(self):
"""Warning: This forces the evaluation all of the items in this LazyDict
that are iterated over."""
for k, v in self.eager.iteritems():
for k, v in self.eager.items():
yield k, v
for k in self.lazy.keys():
yield k, self[k]
@ -103,11 +103,11 @@ class LazyDict(dict):
Does no computation now. Instead the comuptation is performed when a
consumer attempts to access a value in this LazyDict"""
new_lazy = {}
for k, f in self.lazy.iteritems():
for k, f in self.lazy.items():
def closure(f=f):
return fn(f())
new_lazy[k] = closure
for k, v in self.eager.iteritems():
for k, v in self.eager.items():
def closure(v=v):
return fn(v)
new_lazy[k] = closure

View File

@ -181,10 +181,10 @@ def format_project_run_data(project_run_data, limit):
ts_data = prep_ts_data()
ts_data['projectRuns'] = []
for hostOs_name, hostOs in project_run_data['projectRuns'].iteritems():
for platform_name, platform in hostOs.iteritems():
for toolchain_name, toolchain in platform.iteritems():
for project_name, project in toolchain.iteritems():
for hostOs_name, hostOs in project_run_data['projectRuns'].items():
for platform_name, platform in hostOs.items():
for toolchain_name, toolchain in platform.items():
for project_name, project in toolchain.items():
if current_limit_count >= limit:
finish_ts_data(ts_data, project_run_data)
all_ts_data.append(ts_data)