1#!/usr/bin/env python3
2#
3# Copyright (c) 2010-2024 Antmicro
4#
5# This file is licensed under the MIT License.
6# Full license text is available in 'licenses/MIT.txt'.
7#
8
9import argparse
10import json
11
12from pathlib import Path
13from tqdm import tqdm
14
15from cache import Cache
16from presets import PRESETS
17
18
19class RenodeLogInterface:
20    def __init__(self, file: Path):
21        self.fname = file
22        self.count_insn_read = 0
23        self.count_mem_read = 0
24        self.count_mem_write = 0
25        self.count_io_read = 0
26        self.count_io_write = 0
27        self.invalidate_on_io = False
28
29    def configure_caches(
30        self,
31        l1i: Cache | None = None,
32        l1d: Cache | None = None,
33        invalidation_opcodes: list | None = None,
34        invalidate_on_io: bool = False
35    ) -> None:
36
37        self.l1i = l1i
38        self.l1d = l1d
39        self.invalidation_opcodes = invalidation_opcodes if invalidation_opcodes else {}
40        self.invalidate_on_io = invalidate_on_io
41
42        for cache in [self.l1i, self.l1d]:
43            if cache is not None:
44                cache.print_cache_info()
45
46    def simulate(self) -> None:
47        """ Simulate the cache structure
48
49        Due to _large_ trace files, parse the file line-by-line, and operate on caches this way.
50
51        Renode ExecutionTracer outputs the following data:
52
53        * `PC`: `OPCODE`
54        * Memory{Write, Read} with address `ADDR`
55        * MemoryIO{Write, Read} with address `ADDR`
56        """
57
58        lines = sum(1 for i in open(self.fname, 'rb'))
59        with open(self.fname, 'r') as f:
60            for line in tqdm(f, total=lines):
61                # Handle instruction fetch
62                # 0xPC: 0xOPCODE
63                if ':' in line and self.l1i is not None:
64                    self.count_insn_read += 1
65                    pc, opcode = (int(value.strip(), 16) for value in line.split(":"))
66                    if opcode in self.invalidation_opcodes:
67                        cache_type = self.invalidation_opcodes[opcode]
68                        getattr(self, f'l1{cache_type}').flush()
69                    self.l1i.read(pc)
70                # Handle I/O access
71                # Memory{Read, Write} with address 0xADDRESS
72                elif line.startswith('Memory') and self.l1d is not None:
73                    parts = line.split()
74                    address = int(parts[-1], 16)
75                    match parts[0].lower().removeprefix('memory'):
76                        case 'iowrite':
77                            if self.invalidate_on_io:
78                                self.l1d.flush()
79                            self.count_io_write += 1
80                        case 'ioread':
81                            if self.invalidate_on_io:
82                                self.l1d.flush()
83                            self.count_io_read += 1
84                        case 'write':
85                            self.count_mem_write += 1
86                            self.l1d.write(address)
87                        case 'read':
88                            self.count_mem_read += 1
89                            self.l1d.read(address)
90                        case _:
91                            raise ValueError('Unsupported memory operation!')
92
93    def print_analysis_results(self) -> None:
94        if self.l1i:
95            print(f'Instructions read: {self.count_insn_read}')
96        if self.l1d:
97            print(f'Total memory operations: {self.count_mem_read + self.count_mem_write} (read: {self.count_mem_read}, write {self.count_mem_write})')
98            print(f'Total I/O operations: {self.count_io_read + self.count_io_write} (read: {self.count_io_read}, write {self.count_io_write})')
99
100        print()
101        for c in [self.l1i, self.l1d]:
102            if c is not None:
103                print(f'{c.name} results:')
104                c.print_hmr()
105                print()
106
107    def save_results(self, filename: Path) -> None:
108        data = {c.name: {'hit': c.hits, 'miss': c.misses, 'invalidations': c.invalidations}
109                for c in [self.l1i, self.l1d] if c is not None}
110
111        with open(filename, 'w') as f:
112            json.dump(data, f)
113
114
115def parse_arguments():
116    parser = argparse.ArgumentParser(description='Cache Simulator')
117    parser.add_argument('trace_file', type=str, help='The file containing the trace to process')
118    parser.add_argument('--output', type=str, required=False, help='Filename where results will be saved (optional)')
119
120    subparsers = parser.add_subparsers(help='Help for subcommands', dest='subcommand')
121
122    preset_parser = subparsers.add_parser('presets', help='Run cache simulation using premade configuration presets')
123    preset_parser.add_argument('preset', type=str, choices=list(PRESETS.keys()), help='Available presets')
124
125    config_parser = subparsers.add_parser('config', help='Configure cache manually')
126    config_parser.add_argument('--memory_width', type=int, required=True, help='System memory width')
127    config_parser.add_argument('--invalidate_on_io', action='store_true', default=False, help='Invalidate L1 data cache on IO operations')
128
129    cache_groups = {
130        'Instruction cache configuration': ['l1i'],
131        'Data cache configuration': ['l1d']
132    }
133
134    for group, cache in cache_groups.items():
135        group_parser = config_parser.add_argument_group(group)
136        for cache_type in cache:
137            group_parser.add_argument(f'--{cache_type}_cache_width', type=int, help=f'Cache width for {cache_type}')
138            group_parser.add_argument(f'--{cache_type}_block_width', type=int, help=f'Block width for {cache_type}')
139            group_parser.add_argument(f'--{cache_type}_lines_per_set', type=int, help=f'Lines per set for {cache_type}. Set associativity: 2^n, n = (1, 2, 3, ...). -1 for fully associative, 1 for direct mapping.')
140            group_parser.add_argument(f'--{cache_type}_replacement_policy', type=str, default=None, help=f'Replacement policy for {cache_type}')
141
142    return parser.parse_args()
143
144
145def configure_cache(args):
146    l1i, l1d, opcodes, invalidate_on_io = None, None, None, None
147    if args.subcommand == 'presets':
148        preset = PRESETS[args.preset]
149        l1i, l1d, opcodes, invalidate_on_io = preset.get('l1i'), preset.get('l1d'), preset.get('flush_opcodes'), preset.get('invalidate_on_io')
150    elif args.subcommand == 'config':
151        l1i = create_cache('l1i', args) if all_args_present(args, 'l1i') else None
152        l1d = create_cache('l1d', args) if all_args_present(args, 'l1d') else None
153        invalidate_on_io = args.invalidate_on_io
154        if not any([l1i, l1d]):
155            print('[!!!] Missing or invalid cache configuration. Aborting!')
156            exit(1)
157    return l1i, l1d, opcodes, invalidate_on_io
158
159
160def all_args_present(args, prefix):
161    return all(getattr(args, f'{prefix}_{attr}') is not None for attr in ['cache_width', 'block_width', 'lines_per_set'])
162
163
164def create_cache(name, args):
165    return Cache(
166        name=name,
167        cache_width=getattr(args, f'{name}_cache_width'),
168        block_width=getattr(args, f'{name}_block_width'),
169        memory_width=args.memory_width,
170        lines_per_set=getattr(args, f'{name}_lines_per_set'),
171        replacement_policy=getattr(args, f'{name}_replacement_policy')
172    )
173
174
175if __name__ == '__main__':
176    args = parse_arguments()
177    l1i, l1d, opcodes, invalidate_on_io = configure_cache(args)
178
179    log = RenodeLogInterface(args.trace_file)
180    log.configure_caches(l1i, l1d, opcodes, invalidate_on_io)
181    log.simulate()
182    log.print_analysis_results()
183
184    if (filename := args.output) is not None:
185        log.save_results(filename)
186