python scripts : table print with github policy

pull/7720/head
jeromecoutant 2018-08-07 14:30:23 +02:00
parent 1e676f6eda
commit a42c73df1b
3 changed files with 14 additions and 14 deletions

View File

@ -1162,7 +1162,7 @@ def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version - get the matrix for this major version number release_version - get the matrix for this major version number
""" """
# Only use it in this function so building works without extra modules # Only use it in this function so building works without extra modules
from prettytable import PrettyTable from prettytable import PrettyTable, HEADER
release_version = _lowercase_release_version(release_version) release_version = _lowercase_release_version(release_version)
version_release_targets = {} version_release_targets = {}
version_release_target_names = {} version_release_target_names = {}
@ -1184,7 +1184,7 @@ def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
# All tests status table print # All tests status table print
columns = prepend_columns + unique_supported_toolchains columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns) table_printer = PrettyTable(columns, junction_char="|", hrules=HEADER)
# Align table # Align table
for col in columns: for col in columns:
table_printer.align[col] = "c" table_printer.align[col] = "c"
@ -1272,10 +1272,10 @@ def print_build_memory_usage(report):
Positional arguments: Positional arguments:
report - Report generated during build procedure. report - Report generated during build procedure.
""" """
from prettytable import PrettyTable from prettytable import PrettyTable, HEADER
columns_text = ['name', 'target', 'toolchain'] columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash'] columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int) table = PrettyTable(columns_text + columns_int, junction_char="|", hrules=HEADER)
for col in columns_text: for col in columns_text:
table.align[col] = 'l' table.align[col] = 'l'

View File

@ -14,7 +14,7 @@ import json
from argparse import ArgumentParser from argparse import ArgumentParser
from copy import deepcopy from copy import deepcopy
from collections import defaultdict from collections import defaultdict
from prettytable import PrettyTable from prettytable import PrettyTable, HEADER
from jinja2 import FileSystemLoader, StrictUndefined from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment from jinja2.environment import Environment
@ -669,7 +669,7 @@ class MemapParser(object):
columns = ['Module'] columns = ['Module']
columns.extend(self.print_sections) columns.extend(self.print_sections)
table = PrettyTable(columns) table = PrettyTable(columns, junction_char="|", hrules=HEADER)
table.align["Module"] = "l" table.align["Module"] = "l"
for col in self.print_sections: for col in self.print_sections:
table.align[col] = 'r' table.align[col] = 'r'

View File

@ -32,7 +32,7 @@ import threading
import ctypes import ctypes
import functools import functools
from colorama import Fore, Back, Style from colorama import Fore, Back, Style
from prettytable import PrettyTable from prettytable import PrettyTable, HEADER
from copy import copy, deepcopy from copy import copy, deepcopy
from time import sleep, time from time import sleep, time
@ -765,7 +765,7 @@ class SingleTestRunner(object):
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX] result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols) pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols: for col in pt_cols:
pt.align[col] = "l" pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default) pt.padding_width = 1 # One space between column edges and contents (default)
@ -793,7 +793,7 @@ class SingleTestRunner(object):
result = "Test summary:\n" result = "Test summary:\n"
# Pretty table package is used to print results # Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"]) "Elapsed Time (sec)", "Timeout (sec)", "Loops"], junction_char="|", hrules=HEADER)
pt.align["Result"] = "l" # Left align pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align pt.align["Toolchain"] = "l" # Left align
@ -1327,7 +1327,7 @@ def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filt
# Prepare pretty table object to display all MUTs # Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols) pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols: for col in pt_cols:
pt.align[col] = "l" pt.align[col] = "l"
@ -1365,7 +1365,7 @@ def print_test_configuration_from_json(json_data, join_delim=", "):
# Prepare pretty table object to display test specification # Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols) pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols) pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols: for col in pt_cols:
pt.align[col] = "l" pt.align[col] = "l"
@ -1454,7 +1454,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
'duration'] if cols is None else cols 'duration'] if cols is None else cols
# All tests status table print # All tests status table print
pt = PrettyTable(test_properties) pt = PrettyTable(test_properties, junction_char="|", hrules=HEADER)
for col in test_properties: for col in test_properties:
pt.align[col] = "l" pt.align[col] = "l"
pt.align['duration'] = "r" pt.align['duration'] = "r"
@ -1494,7 +1494,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
if result_summary and not platform_filter: if result_summary and not platform_filter:
# Automation result summary # Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress'] test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols) pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['automated'] = "r" pt.align['automated'] = "r"
pt.align['all'] = "r" pt.align['all'] = "r"
pt.align['percent [%]'] = "r" pt.align['percent [%]'] = "r"
@ -1508,7 +1508,7 @@ def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','
# Test automation coverage table print # Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress'] test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols) pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['id'] = "l" pt.align['id'] = "l"
pt.align['automated'] = "r" pt.align['automated'] = "r"
pt.align['all'] = "r" pt.align['all'] = "r"