1#!/usr/bin/env python
2#
3# esp-idf alternative to "size" to print ELF file sizes, also analyzes
4# the linker map file to dump higher resolution details.
5#
6# Includes information which is not shown in "xtensa-esp32-elf-size",
7# or easy to parse from "xtensa-esp32-elf-objdump" or raw map files.
8#
9# SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
10# SPDX-License-Identifier: Apache-2.0
11#
12from __future__ import division, print_function, unicode_literals
13
14import argparse
15import collections
16import json
17import os.path
18import re
19import sys
20from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, TextIO, Tuple, Union
21
22import yaml
23from future.utils import iteritems
24
25Section = Dict[str, Union[str, int]]
26SectionDict = Dict[str, Section]
27
28
29try:
30    basestring
31except NameError:
32    basestring = str
33
34
35GLOBAL_JSON_INDENT = 4
36GLOBAL_JSON_SEPARATORS = (',', ': ')
37
38
39class MemRegions(object):
40    """
41    Regions determined by the chip target.
42    """
43
44    # DIRAM is not added here. The DIRAM is indicated by the `secondary_addr` of each MemRegDef
45    (DRAM_ID, IRAM_ID, CACHE_D_ID, CACHE_I_ID, RTC_FAST_D_ID, RTC_FAST_I_ID, RTC_SLOW_D_ID) = range(7)
46
47    # The order of variables in the tuple is the same as in the soc_memory_layout.c files
48    MemRegDef = collections.namedtuple('MemRegDef', ['primary_addr', 'length', 'type', 'secondary_addr'])
49
50    class Region(object):
51        # Helper class to store region information
52        def __init__(self, start: int, length: int, region: 'MemRegions.MemRegDef', section: Optional[str]=None) -> None:
53            self.start = start
54            self.len = length
55            self.region = region
56            self.section = section
57
58    @staticmethod
59    def get_mem_regions(target: str) -> List:
60        """
61        Get memory regions for specific target
62        """
63        # The target specific memory structure is deduced from soc_memory_types defined in
64        # $IDF_PATH/components/soc/**/soc_memory_layout.c files.
65
66        MemRegDef = MemRegions.MemRegDef
67
68        def change_to_proper_format(length: Union[str, bytes]) -> Any:
69            '''
70            Change `length` if it is string like `'0x8000 + 6 * 0x10000'` to resolve of this math equation
71            or if `length` is number function return it without changing.
72            '''
73            try:
74                return eval(length)
75            except TypeError:
76                return length
77
78        def get_mem_reg_def(chip_info: Dict, memory_reg: str) -> Tuple:
79            chip_info[memory_reg]['secondary_address'] = chip_info[memory_reg].get('secondary_address') or 0
80            return MemRegDef(chip_info[memory_reg]['primary_address'], change_to_proper_format(chip_info[memory_reg]['length']),
81                             getattr(MemRegions, memory_reg.strip('_12') + '_ID'), chip_info[memory_reg]['secondary_address'])
82        try:
83            with open(os.path.join(os.path.dirname(__file__), 'idf_size_yaml', target + '_data_info.yaml'), 'r') as stream:
84                chip_info = (yaml.safe_load(stream))
85        except FileNotFoundError:
86            raise RuntimeError('Target not detected.')
87        return sorted([get_mem_reg_def(chip_info, item) for item in chip_info])
88
89    def __init__(self, target: str) -> None:
90        self.chip_mem_regions = self.get_mem_regions(target)
91        if not self.chip_mem_regions:
92            raise RuntimeError('Target {} is not implemented in idf_size'.format(target))
93
94    def _get_first_region(self, start: int, length: int) -> Tuple[Union['MemRegions.MemRegDef', None], int]:
95        for region in self.chip_mem_regions:  # type: ignore
96            if region.primary_addr <= start < region.primary_addr + region.length:
97                return (region, length)
98            if region.secondary_addr and region.secondary_addr <= start < region.secondary_addr + region.length:
99                return (region, length)
100        print('WARNING: Given section not found in any memory region.')
101        print('Check whether the LD file is compatible with the definitions in get_mem_regions in idf_size.py')
102        return (None, length)
103
104    def _get_regions(self, start: int, length: int, name: Optional[str]=None) -> List:
105        ret = []
106        while length > 0:
107            (region, cur_len) = self._get_first_region(start, length)
108            if region is None:
109                # skip regions that not in given section
110                length -= cur_len
111                start += cur_len
112                continue
113            ret.append(MemRegions.Region(start, cur_len, region, name))
114            length -= cur_len
115            start += cur_len
116
117        return ret
118
119    def fit_segments_into_regions(self, segments: Dict) -> List:
120        region_list = []
121
122        for segment in segments.values():
123            sorted_segments = self._get_regions(segment['origin'], segment['length'])
124            region_list.extend(sorted_segments)
125
126        return region_list
127
128    def fit_sections_into_regions(self, sections: Dict) -> List:
129        region_list = []
130
131        for section in sections.values():
132            sorted_sections = self._get_regions(section['address'], section['size'], section['name'])
133            region_list.extend(sorted_sections)
134
135        return region_list
136
137
138class LinkingSections(object):
139
140    _section_type_dict = {key: re.compile(value) for key, value in {
141        'text': r'.*\.text',
142        'data': r'.*\.data',
143        'bss': r'.*\.bss',
144        'rodata': r'.*\.rodata',
145        'noinit': r'.*noinit',
146        'vectors': r'.*\.vectors',
147        'flash': r'.*flash.*',
148    }.items()}
149
150    @staticmethod
151    def in_section(section: str, section_name_or_list: Union[str, Iterable]) -> bool:
152        """
153        Check if section in section_name_or_list
154        """
155        if isinstance(section_name_or_list, basestring):
156            section_name_or_list = [section_name_or_list]
157
158        for section_name in section_name_or_list:
159            if LinkingSections._section_type_dict[section_name].match(section):
160                return True
161        return False
162
163    @staticmethod
164    def filter_sections(sections: Dict) -> Dict:
165        return {key: v for key, v in sections.items()
166                if LinkingSections.in_section(key, LinkingSections._section_type_dict.keys())}
167
168    @staticmethod
169    def get_display_name_order(section_name_list: List[str]) -> Tuple[List[str], List[str]]:
170        '''
171        Return two lists, in the suggested display order.
172        First list is the reordered section_name_list, second list is the suggested display name, corresponding to the first list
173        '''
174
175        def get_memory_name(split_name: List) -> Tuple[str, str]:
176            memory_name = '.{}'.format(split_name[1])
177            display_name = section
178            for seg_name in ['iram','dram','flash']:
179                if seg_name in split_name[1]:
180                    memory_name = '.{}'.format(seg_name)
181                    seg_name = seg_name.upper() if seg_name != 'flash' else seg_name.capitalize()
182                    display_name = ''.join([seg_name,
183                                            split_name[1].replace('iram', '') if seg_name == 'IRAM' else '',
184                                            ' .{}'.format(split_name[2]) if len(split_name) > 2 else ''])
185            return memory_name, display_name
186
187        ordered_name_list = sorted(section_name_list)
188        display_name_list = ordered_name_list.copy()
189
190        memory_name = ''
191        ordered_name_list = sort_dict(ordered_name_list)
192        for i, section in enumerate(ordered_name_list):
193            if memory_name and section.startswith(memory_name):
194                # If the section has same memory type with the previous one, use shorter name
195                display_name_list[i] = section.replace(memory_name, '& ')
196                continue
197
198            memory_name = ''
199
200            split_name = section.split('.')
201            if len(split_name) > 1:
202                # If the section has a memory type, update the type and try to display the type properly
203                assert split_name[0] == '', 'Unexpected section name "{}"'.format(section)
204                memory_name, display_name_list[i] = get_memory_name(split_name)
205                continue
206
207            # Otherwise use its original name
208            display_name_list[i] = section
209
210        return ordered_name_list, display_name_list
211
212
213def scan_to_header(file: Iterable, header_line: str) -> None:
214    """ Scan forward in a file until you reach 'header_line', then return """
215    for line in file:
216        if line.strip() == header_line:
217            return
218    raise RuntimeError("Didn't find line '%s' in file" % header_line)
219
220
221def format_json(json_object: Dict) -> str:
222    return json.dumps(json_object,
223                      allow_nan=True,
224                      indent=GLOBAL_JSON_INDENT,
225                      separators=GLOBAL_JSON_SEPARATORS) + os.linesep
226
227
228def load_map_data(map_file: TextIO) -> Tuple[str, Dict, Dict]:
229    segments = load_segments(map_file)
230    detected_chip = detect_target_chip(map_file)
231    sections = load_sections(map_file)
232
233    # Exclude the dummy and .text_end section, which usually means shared region among I/D buses
234    for key in list(sections.keys()):
235        if key.endswith(('dummy', '.text_end')):
236            sections.pop(key)
237    return detected_chip, segments, sections
238
239
240def load_segments(map_file: TextIO) -> Dict:
241    """ Memory Configuration section is the total size of each segment """
242    result = {}  # type: Dict[Any, Dict]
243    scan_to_header(map_file, 'Memory Configuration')
244    RE_MEMORY_SECTION = re.compile(r'(?P<name>[^ ]+) +0x(?P<origin>[\da-f]+) +0x(?P<length>[\da-f]+)')
245
246    for line in map_file:
247        match_section = RE_MEMORY_SECTION.match(line)
248        if match_section is None:
249            if len(result) == 0:
250                continue  # whitespace or a header, before the content we want
251            else:
252                return result  # we're at the end of the Memory Configuration
253        segment = {
254            'name': match_section.group('name'),
255            'origin': int(match_section.group('origin'), 16),
256            'length': int(match_section.group('length'), 16),
257        }
258        if segment['name'] != '*default*':
259            result[segment['name']] = segment
260    raise RuntimeError('End of file while scanning memory configuration?')
261
262
263def detect_target_chip(map_file: Iterable) -> str:
264    ''' Detect target chip based on the target archive name in the linker script part of the MAP file '''
265    scan_to_header(map_file, 'Linker script and memory map')
266
267    RE_TARGET = re.compile(r'project_elf_src_(.*)\.c.obj')
268    # For back-compatible with make
269    RE_TARGET_MAKE = re.compile(r'^LOAD .*?/xtensa-([^-]+)-elf/')
270
271    for line in map_file:
272        match_target = RE_TARGET.search(line)
273        if match_target:
274            return match_target.group(1)
275
276        match_target = RE_TARGET_MAKE.search(line)
277        if match_target:
278            return match_target.group(1)
279
280        line = line.strip()
281        # There could be empty line(s) between the "Linker script and memory map" header and "LOAD lines". Therefore,
282        # line stripping and length is checked as well. The "LOAD lines" are between START GROUP and END GROUP for
283        # older MAP files.
284        if not line.startswith(('LOAD', 'START GROUP', 'END GROUP')) and len(line) > 0:
285            # This break is a failsafe to not process anything load_sections() might want to analyze.
286            break
287
288    raise RuntimeError('Target not detected')
289
290
291def load_sections(map_file: TextIO) -> Dict:
292    """ Load section size information from the MAP file.
293
294    Returns a dict of 'sections', where each key is a section name and the value
295    is a dict with details about this section, including a "sources" key which holds a list of source file line
296    information for each symbol linked into the section.
297
298    There are two kinds of lines:
299        - symbol_only: [optional space]<sym_name>
300        - full line: [optional space][optional sym_name] <address> <size> [optional file_info]
301          If <sym_name> doesn't exist, ues the symbol name from the symbol_only line above
302          If the line is the starting of a section, the <file> should be empty, otherwise if the line is for a source
303          line, the <file> must exist, or the <sym_name> should be is no *fill*. This rule is used to tell sections from
304          source lines.
305    """
306
307    # Check for lines which only contain the sym name (and rest is on following lines)
308    RE_SYMBOL_ONLY_LINE = re.compile(r'^\s*(?P<sym_name>\S*)$')
309
310    # Fast check to see if line is a potential source line before running the slower full regex against it
311    RE_PRE_FILTER = re.compile(r'.*0x[\da-f]+\s*0x[\da-f]+.*')
312
313    # source file line, ie
314    # 0x0000000040080400       0xa4 /home/gus/esp/32/idf/examples/get-started/hello_world/build/esp32/libesp32.a(cpu_start.o)
315    # cmake build system links some object files directly, not part of any archive, so make that part optional
316    #  .xtensa.info   0x0000000000000000       0x38 CMakeFiles/hello_world.elf.dir/project_elf_src.c.obj
317    #  *fill*         0x00000000400e2967        0x1
318    RE_FULL_LINE = re.compile(r'\s*(?P<sym_name>\S*) +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+)\s*(?P<file>.*)$')
319
320    # Extract archive and object_file from the file_info field
321    # The object file extention (.obj or .o) is optional including the dot. This is necessary for some third-party
322    # libraries. Since the dot is optional and the search gready the parsing of the object name must stop at ). Hence
323    # the [^ )] part of the regex.
324    RE_FILE = re.compile(r'((?P<archive>[^ ]+\.a)?\(?(?P<object_file>[^ )]+(\.(o|obj))?)\)?)')
325
326    def dump_src_line(src: Dict) -> str:
327        return '%s(%s) addr: 0x%08x, size: 0x%x+%d' % (src['sym_name'], src['file'], src['address'], src['size'], src['fill'])
328
329    sections = {}  # type: Dict[Any, Dict]
330    section = {}  # type: Dict[str, Any]
331    sym_backup = ''
332    for line in map_file:
333        if line.strip() == 'Cross Reference Table':
334            # Stop processing lines because we are at the next section in the map file
335            break
336
337        match_line = RE_SYMBOL_ONLY_LINE.match(line)
338        if match_line:
339            # In some cases the section name appears on the previous line, back it up in here
340            sym_backup = match_line.group('sym_name')
341            continue
342
343        if not RE_PRE_FILTER.match(line):
344            # Line does not match our quick check, so skip to next line
345            continue
346
347        match_line = RE_FULL_LINE.match(line)
348        if not match_line:
349            assert not sym_backup, 'Symbol only line must be followed by a line with address and size'
350            continue
351
352        name = match_line.group('sym_name') if match_line.group('sym_name') else sym_backup
353        sym_backup = ''
354
355        is_section = not match_line.group('file') and name != '*fill*'
356        if is_section:
357            # section
358
359            section = {
360                'name': name,
361                'address': int(match_line.group('address'), 16),
362                'size': int(match_line.group('size'), 16),
363                'sources': [],
364            }
365            sections[name] = section
366
367        else:
368            # symbol
369            if not section:
370                continue
371
372            # There are some source lines in rodata section doesn't actually take any space, but have size
373            # Make size of those sections zero
374            srcs = section['sources']  # type: List[Dict]
375            if srcs:
376                last_src = srcs[-1]
377                if last_src['size'] > 0 and last_src['address'] == int(match_line.group('address'), 16):
378                    if '.comment' != section['name'] and '.debug_str' != section['name'] and\
379                            'rodata' not in last_src['sym_name']:
380
381                        raise RuntimeError('Due to overlap with following lines, size of the line set to 0:\n    %s' % dump_src_line(last_src))
382
383                    last_src['size'] = 0
384
385            # Count the padding size into the last valid (size > 0) source in the section
386            if name == '*fill*':
387                for src in reversed(srcs):
388                    if src['size'] > 0:
389                        src['fill'] += int(match_line.group('size'), 16)
390                        break
391                continue
392
393            # Extract archive and file information
394            match_arch_and_file = RE_FILE.match(match_line.group('file'))
395            assert match_arch_and_file, 'Archive and file information not found for "{}"'.format(match_line.group('file'))
396
397            archive = match_arch_and_file.group('archive')
398            if archive is None:
399                # optional named group "archive" was not matched, so assign a value to it
400                archive = '(exe)'
401
402            file = match_arch_and_file.group('object_file')
403
404            assert name
405            source = {
406                'size': int(match_line.group('size'), 16),
407                'address': int(match_line.group('address'), 16),
408                'archive': os.path.basename(archive),
409                'object_file': os.path.basename(file),
410                'sym_name': name,
411                'fill': 0,  # padding size ofter the source
412            }
413            source['file'] = '%s:%s' % (source['archive'], source['object_file'])
414
415            section['sources'].append(source)  # type: ignore
416
417    # Validate the map file
418    for section in sections.values():
419        src_curr = {}  # type: Dict[str, Any]
420        for src in section['sources']:
421            if src['size'] == 0:
422                continue
423
424            expected_addr = src_curr['address'] + src_curr['size'] + src_curr['fill'] if src_curr else section['sources'][0]['address']
425            if src['address'] != expected_addr:
426                print('Warning: source line overlap:')
427                print('    ' + dump_src_line(src_curr))
428                print('    ' + dump_src_line(src))
429
430            src_curr = src
431
432    return sections
433
434
435def check_target(target: str, map_file: TextIO) -> None:
436    if target is None:
437        raise RuntimeError('The target chip cannot be detected for {}. '
438                           'Please report the issue.'.format(map_file.name))
439
440
441def main() -> None:
442    parser = argparse.ArgumentParser(description='idf_size - a tool to print size information from an IDF MAP file')
443
444    parser.add_argument(
445        '--json',
446        help='Output results as JSON',
447        action='store_true')
448
449    parser.add_argument(
450        'map_file', help='MAP file produced by linker',
451        type=argparse.FileType('r'))
452
453    parser.add_argument(
454        '--archives', help='Print per-archive sizes', action='store_true')
455
456    parser.add_argument(
457        '--archive_details', help='Print detailed symbols per archive')
458
459    parser.add_argument(
460        '--files', help='Print per-file sizes', action='store_true')
461
462    parser.add_argument(
463        '--target', help='Set target chip', default=None)
464
465    parser.add_argument(
466        '--diff', help='Show the differences in comparison with another MAP file',
467        metavar='ANOTHER_MAP_FILE',
468        default=None,
469        dest='another_map_file')
470
471    parser.add_argument(
472        '-o',
473        '--output-file',
474        type=argparse.FileType('w'),
475        default=sys.stdout,
476        help='Print output to the specified file instead of stdout')
477
478    args = parser.parse_args()
479
480    detected_target, segments, sections = load_map_data(args.map_file)
481    args.map_file.close()
482    check_target(detected_target, args.map_file)
483
484    if args.another_map_file:
485        with open(args.another_map_file, 'r') as f:
486            detected_target_diff, segments_diff, sections_diff = load_map_data(f)
487            check_target(detected_target_diff, f)
488            if detected_target_diff != detected_target:
489                print('WARNING: The target of the reference and other MAP files is {} and {}, respectively.'
490                      ''.format(detected_target, detected_target_diff))
491    else:
492        segments_diff, sections_diff, detected_target_diff = {}, {}, ''
493
494    if args.target is not None:
495        if args.target != detected_target or (detected_target_diff and args.target != detected_target_diff):
496            print('WARNING: The detected chip target overwritten to {} by command line argument!'.format(args.target))
497
498        detected_target = args.target
499        detected_target_diff = args.target
500
501    output = ''
502
503    if not args.json or not (args.archives or args.files or args.archive_details):
504        output += get_summary(args.map_file.name, segments, sections, detected_target,
505                              args.json,
506                              args.another_map_file, segments_diff, sections_diff, detected_target_diff, not (args.archives or args.files))
507
508    if args.archives:
509        output += get_detailed_sizes(sections, 'archive', 'Archive File', args.json, sections_diff)
510    if args.files:
511        output += get_detailed_sizes(sections, 'file', 'Object File', args.json, sections_diff)
512
513    if args.archive_details:
514        output += get_archive_symbols(sections, args.archive_details, args.json, sections_diff)
515
516    args.output_file.write(output)
517    args.output_file.close()
518
519
520class StructureForSummary(object):
521    used_dram_data, used_dram_bss, used_dram_rodata, used_dram_other, used_dram, dram_total, dram_remain = (0, ) * 7
522
523    used_dram_ratio = 0.
524    used_iram_vectors, used_iram_text, used_iram_other, used_iram, iram_total, iram_remain = (0, ) * 6
525    used_iram_ratio = 0.
526    used_diram_data, used_diram_bss, used_diram_text, used_diram_vectors, used_diram_rodata, used_diram_other, diram_total, used_diram, diram_remain = (0, ) * 9
527    used_diram_ratio = 0.
528    used_flash_text, used_flash_rodata, used_flash_other, used_flash, total_size = (0, ) * 5
529
530    def __sub__(self, rhs: 'StructureForSummary') -> 'StructureForSummary':
531        assert isinstance(rhs, StructureForSummary)
532        ret = self
533        for key in StructureForSummary.get_required_items():
534            setattr(ret, key, getattr(self, key) - getattr(rhs, key))
535
536        return ret
537
538    def get_dram_overflowed(self) -> bool:
539        return self.used_dram_ratio > 1.0
540
541    def get_iram_overflowed(self) -> bool:
542        return self.used_iram_ratio > 1.0
543
544    def get_diram_overflowed(self) -> bool:
545        return self.used_diram_ratio > 1.0
546
547    @classmethod
548    def get_required_items(cls: Any) -> List:
549        whole_list = list(filter(lambda x: not (x.startswith('__') or x.endswith('__') or callable(getattr(cls, x))), dir(cls)))
550        return whole_list
551
552    @staticmethod
553    def get(segments: List, sections: List) -> 'StructureForSummary':
554
555        def get_size(sections: Iterable) -> int:
556            return sum([x.len for x in sections])
557
558        def in_diram(x: MemRegions.Region) -> bool:
559            return x.region.type in (MemRegions.DRAM_ID, MemRegions.IRAM_ID) and x.region.secondary_addr > 0
560
561        def in_dram(x: MemRegions.Region) -> bool:
562            return x.region.type == MemRegions.DRAM_ID and x.region.secondary_addr == 0  # type: ignore
563
564        def in_iram(x: MemRegions.Region) -> bool:
565            return x.region.type == MemRegions.IRAM_ID and x.region.secondary_addr == 0  # type: ignore
566
567        r = StructureForSummary()
568
569        diram_filter = filter(in_diram, segments)
570        r.diram_total = int(get_size(diram_filter) / 2)
571
572        dram_filter = filter(in_dram, segments)
573        r.dram_total = get_size(dram_filter)
574        iram_filter = filter(in_iram, segments)
575        r.iram_total = get_size(iram_filter)
576
577        def filter_in_section(sections: Iterable[MemRegions.Region], section_to_check: str) -> List[MemRegions.Region]:
578            return list(filter(lambda x: LinkingSections.in_section(x.section, section_to_check), sections))  # type: ignore
579
580        dram_sections = list(filter(in_dram, sections))
581        iram_sections = list(filter(in_iram, sections))
582        diram_sections = list(filter(in_diram, sections))
583        flash_sections = filter_in_section(sections, 'flash')
584
585        dram_data_list = filter_in_section(dram_sections, 'data')
586        dram_bss_list = filter_in_section(dram_sections, 'bss')
587        dram_rodata_list = filter_in_section(dram_sections, 'rodata')
588        dram_other_list = [x for x in dram_sections if x not in dram_data_list + dram_bss_list + dram_rodata_list]
589
590        iram_vectors_list = filter_in_section(iram_sections, 'vectors')
591        iram_text_list = filter_in_section(iram_sections, 'text')
592        iram_other_list = [x for x in iram_sections if x not in iram_vectors_list + iram_text_list]
593
594        diram_vectors_list = filter_in_section(diram_sections, 'vectors')
595        diram_data_list = filter_in_section(diram_sections, 'data')
596        diram_bss_list = filter_in_section(diram_sections, 'bss')
597        diram_text_list = filter_in_section(diram_sections, 'text')
598        diram_rodata_list = filter_in_section(diram_sections, 'rodata')
599        diram_other_list = [x for x in diram_sections if x not in diram_data_list + diram_bss_list + diram_text_list + diram_vectors_list + diram_rodata_list]
600
601        flash_text_list = filter_in_section(flash_sections, 'text')
602        flash_rodata_list = filter_in_section(flash_sections, 'rodata')
603        flash_other_list = [x for x in flash_sections if x not in flash_text_list + flash_rodata_list]
604
605        r.used_dram_data = get_size(dram_data_list)
606        r.used_dram_bss = get_size(dram_bss_list)
607        r.used_dram_rodata = get_size(dram_rodata_list)
608        r.used_dram_other = get_size(dram_other_list)
609        r.used_dram = r.used_dram_data + r.used_dram_bss + r.used_dram_other + r.used_dram_rodata
610        try:
611            r.used_dram_ratio = r.used_dram / r.dram_total
612        except ZeroDivisionError:
613            r.used_dram_ratio = float('nan') if r.used_dram != 0 else 0
614        r.dram_remain = r.dram_total - r.used_dram
615
616        r.used_iram_vectors = get_size((iram_vectors_list))
617        r.used_iram_text = get_size((iram_text_list))
618        r.used_iram_other = get_size((iram_other_list))
619        r.used_iram = r.used_iram_vectors + r.used_iram_text + r.used_iram_other
620        try:
621            r.used_iram_ratio = r.used_iram / r.iram_total
622        except ZeroDivisionError:
623            r.used_iram_ratio = float('nan') if r.used_iram != 0 else 0
624        r.iram_remain = r.iram_total - r.used_iram
625
626        r.used_diram_data = get_size(diram_data_list)
627        r.used_diram_bss = get_size(diram_bss_list)
628        r.used_diram_text = get_size(diram_text_list)
629        r.used_diram_vectors = get_size(diram_vectors_list)
630        r.used_diram_rodata = get_size(diram_rodata_list)
631        r.used_diram_other = get_size(diram_other_list)
632        r.used_diram = r.used_diram_data + r.used_diram_bss + r.used_diram_text + r.used_diram_vectors + r.used_diram_other + r.used_diram_rodata
633        try:
634            r.used_diram_ratio = r.used_diram / r.diram_total
635        except ZeroDivisionError:
636            r.used_diram_ratio = float('nan') if r.used_diram != 0 else 0
637        r.diram_remain = r.diram_total - r.used_diram
638
639        r.used_flash_text = get_size(flash_text_list)
640        r.used_flash_rodata = get_size(flash_rodata_list)
641
642        r.used_flash_other = get_size(flash_other_list)
643        r.used_flash = r.used_flash_text + r.used_flash_rodata + r.used_flash_other
644
645        # The used DRAM BSS is counted into the "Used static DRAM" but not into the "Total image size"
646        r.total_size = r.used_dram - r.used_dram_bss + r.used_iram + r.used_diram - r.used_diram_bss + r.used_flash
647        return r
648
649    def get_json_dic(self) -> collections.OrderedDict:
650        ret = collections.OrderedDict([
651            ('dram_data', self.used_dram_data),
652            ('dram_bss', self.used_dram_bss),
653            ('dram_rodata', self.used_dram_rodata),
654            ('dram_other', self.used_dram_other),
655            ('used_dram', self.used_dram),
656            ('dram_total', self.dram_total),
657            ('used_dram_ratio', self.used_dram_ratio if self.used_dram_ratio is not float('nan') else 0),
658            ('dram_remain', self.dram_remain),
659
660            ('iram_vectors', self.used_iram_vectors),
661            ('iram_text', self.used_iram_text),
662            ('iram_other', self.used_iram_other),
663            ('used_iram', self.used_iram),
664            ('iram_total', self.iram_total),
665            ('used_iram_ratio', self.used_iram_ratio),
666            ('iram_remain', self.iram_remain),
667
668            ('diram_data', self.used_diram_data),
669            ('diram_bss', self.used_diram_bss),
670            ('diram_text', self.used_diram_text),
671            ('diram_vectors', self.used_diram_vectors),
672            ('diram_rodata', self.used_diram_rodata),
673            ('diram_other', self.used_diram_other),
674            ('diram_total', self.diram_total),
675            ('used_diram', self.used_diram),
676            ('used_diram_ratio', self.used_diram_ratio),
677            ('diram_remain', self.diram_remain),
678
679            ('flash_code', self.used_flash_text),
680            ('flash_rodata', self.used_flash_rodata),
681            ('flash_other', self.used_flash_other),
682            ('used_flash_non_ram', self.used_flash),    # text/data in D/I RAM not included
683
684            ('total_size', self.total_size)             # bss not included
685        ])
686        assert len(ret) == len(StructureForSummary.get_required_items())
687        return ret
688
689
690def get_structure_for_target(segments: Dict, sections: Dict, target: str) -> StructureForSummary:
691    """
692    Return StructureForSummary for specific target
693    """
694    mem_regions = MemRegions(target)
695    segment_layout = mem_regions.fit_segments_into_regions(segments)
696    section_layout = mem_regions.fit_sections_into_regions(LinkingSections.filter_sections(sections))
697    current = StructureForSummary.get(segment_layout, section_layout)
698    return current
699
700
701def get_summary(path: str, segments: Dict, sections: Dict, target: str,
702                as_json: bool=False,
703                path_diff: str='', segments_diff: Optional[Dict]=None, sections_diff: Optional[Dict]=None,
704                target_diff: str='', print_suggestions: bool=True) -> str:
705    segments_diff = segments_diff or {}
706    sections_diff = sections_diff or {}
707
708    current = get_structure_for_target(segments, sections, target)
709
710    if path_diff:
711        diff_en = True
712        mem_regions_diff = MemRegions(target_diff)
713        segment_layout_diff = mem_regions_diff.fit_segments_into_regions(segments_diff)
714        section_layout_diff = mem_regions_diff.fit_sections_into_regions(LinkingSections.filter_sections(sections_diff))
715        reference = StructureForSummary.get(segment_layout_diff, section_layout_diff)
716    else:
717        diff_en = False
718        reference = StructureForSummary()
719
720    if as_json:
721        current_json_dic = current.get_json_dic()
722        if diff_en:
723            reference_json_dic = reference.get_json_dic()
724            diff_json_dic = collections.OrderedDict([
725                (k, v - reference_json_dic[k]) for k, v in iteritems(current_json_dic)])
726            output = format_json(collections.OrderedDict([('current', current_json_dic),
727                                                          ('reference', reference_json_dic),
728                                                          ('diff', diff_json_dic),
729                                                          ]))
730        else:
731            output = format_json(current_json_dic)
732    else:
733        class LineDef(object):
734            title = ''
735            name = ''
736
737            def __init__(self, title: str, name: str) -> None:
738                self.title = title
739                self.name = name
740
741            def format_line(self) -> Tuple[str, str, str, str]:
742                return (self.title + ': {%s:>7} bytes' % self.name,
743                        '{%s:>7}' % self.name,
744                        '{%s:+}' % self.name,
745                        '')
746
747        class HeadLineDef(LineDef):
748            remain = ''
749            ratio = ''
750            total = ''
751            warning_message = ''
752
753            def __init__(self, title: str, name: str, remain: str, ratio: str, total: str, warning_message: str) -> None:
754                super(HeadLineDef, self).__init__(title, name)
755                self.remain = remain
756                self.ratio = ratio
757                self.total = total
758                self.warning_message = warning_message
759
760            def format_line(self) -> Tuple[str, str, str, str]:
761                return ('%s: {%s:>7} bytes ({%s:>7} remain, {%s:.1%%} used)%s' % (self.title, self.name, self.remain, self.ratio, self.warning_message),
762                        '{%s:>7}' % self.name,
763                        '{%s:+}' % self.name,
764                        '({%s:>+7} remain, {%s:>+7} total)' % (self.remain, self.total))
765
766        class TotalLineDef(LineDef):
767
768            def format_line(self) -> Tuple[str, str, str, str]:
769                return (self.title + ': {%s:>7} bytes (.bin may be padded larger)' % self.name,
770                        '{%s:>7}' % self.name,
771                        '{%s:+}' % self.name,
772                        '')
773
774        warning_message = ' Overflow detected!' + (' You can run idf.py size-files for more information.' if print_suggestions else '')
775
776        format_list = [
777            HeadLineDef('Used static DRAM', 'used_dram', remain='dram_remain', ratio='used_dram_ratio', total='dram_total',
778                        warning_message=warning_message if current.get_dram_overflowed() else ''),
779            LineDef('      .data size', 'used_dram_data'),
780            LineDef('      .bss  size', 'used_dram_bss'),
781            LineDef('   .rodata  size', 'used_dram_rodata'),
782            LineDef(' DRAM other size', 'used_dram_other'),
783
784            HeadLineDef('Used static IRAM', 'used_iram', remain='iram_remain', ratio='used_iram_ratio', total='iram_total',
785                        warning_message=warning_message if current.get_iram_overflowed() else ''),
786            LineDef('      .text size', 'used_iram_text'),
787            LineDef('   .vectors size', 'used_iram_vectors'),
788
789            HeadLineDef('Used stat D/IRAM', 'used_diram', remain='diram_remain', ratio='used_diram_ratio', total='diram_total',
790                        warning_message=warning_message if current.get_diram_overflowed() else ''),
791            LineDef('      .data size', 'used_diram_data'),
792            LineDef('      .bss  size', 'used_diram_bss'),
793            LineDef('      .text size', 'used_diram_text'),
794            LineDef('   .vectors size', 'used_diram_vectors'),
795            LineDef('    .rodata size', 'used_diram_rodata'),
796            LineDef('      other     ', 'used_diram_other'),
797
798            LineDef('Used Flash size ', 'used_flash'),
799            LineDef('      .text     ', 'used_flash_text'),
800            LineDef('      .rodata   ', 'used_flash_rodata'),
801
802            TotalLineDef('Total image size', 'total_size')
803        ]
804
805        def convert_to_fmt_dict(summary: StructureForSummary, suffix: str='') -> Dict:
806            required_items = StructureForSummary.get_required_items()
807            return dict([(key + suffix, getattr(summary, key)) for key in required_items])
808
809        f_dic1 = convert_to_fmt_dict(current)
810        if diff_en:
811            f_dic2 = convert_to_fmt_dict(reference)
812            f_dic_diff = convert_to_fmt_dict(current - reference)
813
814        lf = '{:60}{:>15}{:>15} {}'  # Width for a, b, c, d columns
815
816        def print_in_columns(a: str, b: Optional[str]='', c: Optional[str]='', d: Optional[str]='') -> str:
817            return lf.format(a, b, c, d).rstrip() + os.linesep
818
819        output = ''
820        if diff_en:
821            output += print_in_columns('<CURRENT> MAP file: ' + path)
822            output += print_in_columns('<REFERENCE> MAP file: ' + path_diff)
823            output += print_in_columns('Difference is counted as <CURRENT> - <REFERENCE>, ',
824                                       'i.e. a positive number means that <CURRENT> is larger.')
825            output += print_in_columns('Total sizes of <CURRENT>:', '<REFERENCE>', 'Difference', '')
826
827            for line in format_list:
828                if getattr(current, line.name) > 0 or getattr(reference, line.name) > 0 or line.name == 'total_size':
829                    main_string_format, reference_format, sign_format, main_diff_format = line.format_line()
830                    output += print_in_columns(
831                        main_string_format.format(**f_dic1),
832                        reference_format.format(**f_dic2),
833                        sign_format.format(**f_dic_diff) if not sign_format.format(**f_dic_diff).startswith('+0') else '',
834                        main_diff_format.format(**f_dic_diff))
835        else:
836            output += print_in_columns('Total sizes:')
837
838            for line in format_list:
839                if getattr(current, line.name) > 0 or line.name == 'total_size':
840                    main_string_format, reference_format, sign_format, main_diff_format = line.format_line()
841                    output += print_in_columns(main_string_format.format(**f_dic1))
842
843    return output
844
845
846def sort_dict(non_sort_list: List) -> List:
847    '''
848    sort with keeping the order data, bss, other, iram, diram, ram_st_total, flash_text, flash_rodata, flash_total
849    '''
850    start_of_other = 0
851    props_sort = []  # type: List
852    props_elem = ['.data', '.bss', 'other', 'iram', 'diram', 'ram_st_total', 'flash.text', 'flash.rodata', 'flash', 'flash_total']
853    for i in props_elem:
854        for j in non_sort_list:
855            if i == 'other':
856                # remembering where 'other' will start
857                start_of_other = len(props_sort)
858            elif i in j and j not in props_sort:
859                props_sort.append(j)
860    for j in non_sort_list:
861        if j not in props_sort:
862            # add all item that fit in other in dict
863            props_sort.insert(start_of_other, j)
864    return props_sort
865
866
867class StructureForDetailedSizes(object):
868
869    @staticmethod
870    def sizes_by_key(sections: SectionDict, key: str, include_padding: Optional[bool]=False) -> Dict[str, Dict[str, int]]:
871        """ Takes a dict of sections (from load_sections) and returns
872        a dict keyed by 'key' with aggregate output size information.
873
874        Key can be either "archive" (for per-archive data) or "file" (for per-file data) in the result.
875        """
876        result = {}  # type: Dict[str, Dict[str, int]]
877        for _, section in iteritems(sections):
878            for s in section['sources']:
879                if not s[key] in result:
880                    result[s[key]] = {}
881                archive = result[s[key]]
882                if not section['name'] in archive:
883                    archive[section['name']] = 0
884                archive[section['name']] += s['size']
885                if include_padding:
886                    archive[section['name']] += s['fill']
887        return result
888
889    @staticmethod
890    def get(sections: SectionDict, by_key: str) -> collections.OrderedDict:
891        """
892        Get the detailed structure before using the filter to remove undesired sections,
893        to show entries without desired sections
894        """
895        sizes = StructureForDetailedSizes.sizes_by_key(sections, by_key)
896        for key_name in sizes:
897            sizes[key_name] = LinkingSections.filter_sections(sizes[key_name])
898
899        s = []
900        for key, section_dict in sizes.items():
901            ram_st_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'flash')])
902            flash_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'bss')])  # type: int
903
904            section_dict['ram_st_total'] = ram_st_total
905            section_dict['flash_total'] = flash_total
906
907            sorted_dict = sorted(section_dict.items(), key=lambda elem: elem[0])
908
909            s.append((key, collections.OrderedDict(sorted_dict)))
910
911        s = sorted(s, key=lambda elem: elem[0])
912        # do a secondary sort in order to have consistent order (for diff-ing the output)
913        s = sorted(s, key=lambda elem: elem[1]['flash_total'], reverse=True)
914
915        return collections.OrderedDict(s)
916
917
918def get_detailed_sizes(sections: Dict, key: str, header: str, as_json: bool=False, sections_diff: Dict=None) -> str:
919
920    key_name_set = set()
921    current = StructureForDetailedSizes.get(sections, key)
922    for section_dict in current.values():
923        key_name_set.update(section_dict.keys())
924
925    if sections_diff:
926        reference = StructureForDetailedSizes.get(sections_diff, key)
927        for section_dict in reference.values():
928            key_name_set.update(section_dict.keys())
929        diff_en = True
930    else:
931        diff_en = False
932
933    key_name_list = list(key_name_set)
934    ordered_key_list, display_name_list = LinkingSections.get_display_name_order(key_name_list)
935    if as_json:
936        if diff_en:
937            diff_json_dic = collections.OrderedDict()
938            for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
939                cur_name_dic = current.get(name, {})
940                ref_name_dic = reference.get(name, {})
941                all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
942                diff_json_dic[name] = collections.OrderedDict([(k,
943                                                                cur_name_dic.get(k, 0) -
944                                                                ref_name_dic.get(k, 0)) for k in all_keys])
945            output = format_json(collections.OrderedDict([('current', current),
946                                                          ('reference', reference),
947                                                          ('diff', diff_json_dic),
948                                                          ]))
949        else:
950            output = format_json(current)
951    else:
952        def _get_header_format(disp_list: List=display_name_list) -> str:
953            len_list = [len(x) for x in disp_list]
954            len_list.insert(0, 24)
955            return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
956
957        def _get_output(data: Dict[str, Dict[str, int]], selection: Collection, key_list: List=ordered_key_list, disp_list: List=display_name_list) -> str:
958            header_format = _get_header_format(disp_list)
959            output = header_format.format(header, *disp_list)
960
961            for key, data_info in iteritems(data):
962                if key not in selection:
963                    continue
964
965                try:
966                    _, key = key.split(':', 1)
967                    # print subheadings for key of format archive:file
968                except ValueError:
969                    # k remains the same
970                    pass
971
972                def get_section_size(section_dict: Dict) -> Callable[[str], int]:
973                    return lambda x: section_dict.get(x, 0)
974
975                section_size_list = map(get_section_size(section_dict=data_info), key_list)
976                output += header_format.format(key[:24], *(section_size_list))
977            return output
978
979        def _get_header_format_diff(disp_list: List=display_name_list, columns: bool=False) -> str:
980            if columns:
981                len_list = (24, ) + (7, ) * 3 * len(disp_list)
982                return '|'.join(['{:>%d}' % x for x in len_list]) + os.linesep
983
984            len_list = (24, ) + (23, ) * len(disp_list)
985            return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
986
987        def _get_output_diff(curr: Dict, ref: Dict, key_list: List=ordered_key_list, disp_list: List=display_name_list) -> str:
988            # First header without Current/Ref/Diff columns
989            header_format = _get_header_format_diff(columns=False)
990            output = header_format.format(header, *disp_list)
991
992            f_print = ('-' * 23, '') * len(key_list)
993            f_print = f_print[0:len(key_list)]
994            header_line = header_format.format('', *f_print)
995
996            header_format = _get_header_format_diff(columns=True)
997            f_print = ('<C>', '<R>', '<C>-<R>') * len(key_list)
998
999            output += header_format.format('', *f_print)
1000            output += header_line
1001
1002            for key, data_info in iteritems(curr):
1003                try:
1004                    v2 = ref[key]
1005                except KeyError:
1006                    continue
1007
1008                try:
1009                    _, key = key.split(':', 1)
1010                    # print subheadings for key of format archive:file
1011                except ValueError:
1012                    # k remains the same
1013                    pass
1014
1015                def _get_items(name: str, section_dict: Dict=data_info, section_dict_ref: Dict=v2) -> Tuple[str, str, str]:
1016                    a = section_dict.get(name, 0)
1017                    b = section_dict_ref.get(name, 0)
1018                    diff = a - b
1019                    # the sign is added here and not in header_format in order to be able to print empty strings
1020                    return (a or '', b or '', '' if diff == 0 else '{:+}'.format(diff))
1021
1022                x = []  # type: List[str]
1023                for section in key_list:
1024                    x.extend(_get_items(section))
1025
1026                output += header_format.format(key[:24], *(x))
1027
1028            return output
1029
1030        output = 'Per-{} contributions to ELF file:{}'.format(key, os.linesep)
1031
1032        if diff_en:
1033            output += _get_output_diff(current, reference)
1034
1035            in_current = frozenset(current.keys())
1036            in_reference = frozenset(reference.keys())
1037            only_in_current = in_current - in_reference
1038            only_in_reference = in_reference - in_current
1039
1040            if len(only_in_current) > 0:
1041                output += 'The following entries are present in <CURRENT> only:{}'.format(os.linesep)
1042                output += _get_output(current, only_in_current)
1043
1044            if len(only_in_reference) > 0:
1045                output += 'The following entries are present in <REFERENCE> only:{}'.format(os.linesep)
1046                output += _get_output(reference, only_in_reference)
1047        else:
1048            output += _get_output(current, current)
1049
1050    return output
1051
1052
1053class StructureForArchiveSymbols(object):
1054    @staticmethod
1055    def get(archive: str, sections: Dict) -> Dict:
1056        interested_sections = LinkingSections.filter_sections(sections)
1057
1058        result = dict([(t, {}) for t in interested_sections])  # type: Dict[str, Dict[str, int]]
1059        for _, section in iteritems(sections):
1060            section_name = section['name']
1061            if section_name not in interested_sections:
1062                continue
1063            for s in section['sources']:
1064                if archive != s['archive']:
1065                    continue
1066                s['sym_name'] = re.sub('(.text.|.literal.|.data.|.bss.|.rodata.)', '', s['sym_name'])
1067                result[section_name][s['sym_name']] = result[section_name].get(s['sym_name'], 0) + s['size']
1068
1069        # build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
1070        section_symbols = collections.OrderedDict()
1071        for t in sorted(list(interested_sections)):
1072            s = sorted(result[t].items(), key=lambda k_v: str(k_v[0]))
1073            # do a secondary sort in order to have consistent order (for diff-ing the output)
1074            s = sorted(s, key=lambda k_v: int(k_v[1]), reverse=True)
1075            section_symbols[t] = collections.OrderedDict(s)
1076
1077        return section_symbols
1078
1079
1080def get_archive_symbols(sections: Dict, archive: str, as_json: bool=False, sections_diff: Dict=None) -> str:
1081    diff_en = bool(sections_diff)
1082    current = StructureForArchiveSymbols.get(archive, sections)
1083    reference = StructureForArchiveSymbols.get(archive, sections_diff) if sections_diff else {}
1084
1085    if as_json:
1086        if diff_en:
1087            diff_json_dic = collections.OrderedDict()
1088            for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
1089                cur_name_dic = current.get(name, {})
1090                ref_name_dic = reference.get(name, {})
1091                all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
1092                diff_json_dic[name] = collections.OrderedDict([(key,
1093                                                                cur_name_dic.get(key, 0) -
1094                                                                ref_name_dic.get(key, 0)) for key in all_keys])
1095            output = format_json(collections.OrderedDict([('current', current),
1096                                                          ('reference', reference),
1097                                                          ('diff', diff_json_dic),
1098                                                          ]))
1099        else:
1100            output = format_json(current)
1101    else:
1102        def _get_item_pairs(name: str, section: collections.OrderedDict) -> collections.OrderedDict:
1103            return collections.OrderedDict([(key.replace(name + '.', ''), val) for key, val in iteritems(section)])
1104
1105        def _get_max_len(symbols_dict: Dict) -> Tuple[int, int]:
1106            # the lists have 0 in them because max() doesn't work with empty lists
1107            names_max_len = 0
1108            numbers_max_len = 0
1109            for t, s in iteritems(symbols_dict):
1110                numbers_max_len = max([numbers_max_len, *[len(str(x)) for _, x in iteritems(s)]])
1111                names_max_len = max([names_max_len, *[len(x) for x in _get_item_pairs(t, s)]])
1112
1113            return names_max_len, numbers_max_len
1114
1115        def _get_output(section_symbols: Dict) -> str:
1116            output = ''
1117            names_max_len, numbers_max_len  = _get_max_len(section_symbols)
1118            for t, s in iteritems(section_symbols):
1119                output += '{}Symbols from section: {}{}'.format(os.linesep, t, os.linesep)
1120                item_pairs = _get_item_pairs(t, s)
1121                for key, val in iteritems(item_pairs):
1122                    output += ' '.join([('\t{:<%d} : {:>%d}\n' % (names_max_len,numbers_max_len)).format(key, val)])
1123                section_total = sum([val for _, val in iteritems(item_pairs)])
1124                output += 'Section total: {}{}'.format(section_total, os.linesep)
1125            return output
1126
1127        output = '{}Symbols within the archive: {} (Not all symbols may be reported){}'.format(os.linesep, archive, os.linesep)
1128        if diff_en:
1129
1130            def _generate_line_tuple(curr: collections.OrderedDict, ref: collections.OrderedDict, name: str) -> Tuple[str, int, int, str]:
1131                cur_val = curr.get(name, 0)
1132                ref_val = ref.get(name, 0)
1133                diff_val = cur_val - ref_val
1134                # string slicing is used just to make sure it will fit into the first column of line_format
1135                return ((' ' * 4 + name)[:40], cur_val, ref_val, '' if diff_val == 0 else '{:+}'.format(diff_val))
1136
1137            line_format = '{:40} {:>12} {:>12} {:>25}'
1138            all_section_names = sorted(list(frozenset(current.keys()) | frozenset(reference.keys())))
1139            for section_name in all_section_names:
1140                current_item_pairs = _get_item_pairs(section_name, current.get(section_name, {}))
1141                reference_item_pairs = _get_item_pairs(section_name, reference.get(section_name, {}))
1142                output += os.linesep + line_format.format(section_name[:40],
1143                                                          '<CURRENT>',
1144                                                          '<REFERENCE>',
1145                                                          '<CURRENT> - <REFERENCE>') + os.linesep
1146                current_section_total = sum([val for _, val in iteritems(current_item_pairs)])
1147                reference_section_total = sum([val for _, val in iteritems(reference_item_pairs)])
1148                diff_section_total = current_section_total - reference_section_total
1149                all_item_names = sorted(list(frozenset(current_item_pairs.keys()) |
1150                                             frozenset(reference_item_pairs.keys())))
1151                output += os.linesep.join([line_format.format(*_generate_line_tuple(current_item_pairs,
1152                                                                                    reference_item_pairs,
1153                                                                                    n)
1154                                                              ).rstrip() for n in all_item_names])
1155                output += os.linesep if current_section_total > 0 or reference_section_total > 0 else ''
1156                output += line_format.format('Section total:',
1157                                             current_section_total,
1158                                             reference_section_total,
1159                                             '' if diff_section_total == 0 else '{:+}'.format(diff_section_total)
1160                                             ).rstrip() + os.linesep
1161        else:
1162            output += _get_output(current)
1163    return output
1164
1165
1166if __name__ == '__main__':
1167    main()
1168