1#!/usr/bin/env python3 2# 3# Script to compile and runs benches. 4# 5# Example: 6# ./scripts/bench.py runners/bench_runner -b 7# 8# Copyright (c) 2022, The littlefs authors. 9# SPDX-License-Identifier: BSD-3-Clause 10# 11 12import collections as co 13import csv 14import errno 15import glob 16import itertools as it 17import math as m 18import os 19import pty 20import re 21import shlex 22import shutil 23import signal 24import subprocess as sp 25import threading as th 26import time 27import toml 28 29 30RUNNER_PATH = './runners/bench_runner' 31HEADER_PATH = 'runners/bench_runner.h' 32 33GDB_PATH = ['gdb'] 34VALGRIND_PATH = ['valgrind'] 35PERF_SCRIPT = ['./scripts/perf.py'] 36 37 38def openio(path, mode='r', buffering=-1): 39 # allow '-' for stdin/stdout 40 if path == '-': 41 if mode == 'r': 42 return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) 43 else: 44 return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) 45 else: 46 return open(path, mode, buffering) 47 48class BenchCase: 49 # create a BenchCase object from a config 50 def __init__(self, config, args={}): 51 self.name = config.pop('name') 52 self.path = config.pop('path') 53 self.suite = config.pop('suite') 54 self.lineno = config.pop('lineno', None) 55 self.if_ = config.pop('if', None) 56 if isinstance(self.if_, bool): 57 self.if_ = 'true' if self.if_ else 'false' 58 self.code = config.pop('code') 59 self.code_lineno = config.pop('code_lineno', None) 60 self.in_ = config.pop('in', 61 config.pop('suite_in', None)) 62 63 # figure out defines and build possible permutations 64 self.defines = set() 65 self.permutations = [] 66 67 # defines can be a dict or a list or dicts 68 suite_defines = config.pop('suite_defines', {}) 69 if not isinstance(suite_defines, list): 70 suite_defines = [suite_defines] 71 defines = config.pop('defines', {}) 72 if not isinstance(defines, list): 73 defines = [defines] 74 75 def csplit(v): 76 # split commas but only outside of parens 77 parens = 0 78 i_ = 0 79 for i in range(len(v)): 80 if v[i] == ',' and parens == 0: 81 yield v[i_:i] 82 i_ = i+1 83 elif v[i] in '([{': 84 parens += 1 85 elif v[i] in '}])': 86 parens -= 1 87 if v[i_:].strip(): 88 yield v[i_:] 89 90 def parse_define(v): 91 # a define entry can be a list 92 if isinstance(v, list): 93 for v_ in v: 94 yield from parse_define(v_) 95 # or a string 96 elif isinstance(v, str): 97 # which can be comma-separated values, with optional 98 # range statements. This matches the runtime define parser in 99 # the runner itself. 100 for v_ in csplit(v): 101 m = re.search(r'\brange\b\s*\(' 102 '(?P<start>[^,\s]*)' 103 '\s*(?:,\s*(?P<stop>[^,\s]*)' 104 '\s*(?:,\s*(?P<step>[^,\s]*)\s*)?)?\)', 105 v_) 106 if m: 107 start = (int(m.group('start'), 0) 108 if m.group('start') else 0) 109 stop = (int(m.group('stop'), 0) 110 if m.group('stop') else None) 111 step = (int(m.group('step'), 0) 112 if m.group('step') else 1) 113 if m.lastindex <= 1: 114 start, stop = 0, start 115 for x in range(start, stop, step): 116 yield from parse_define('%s(%d)%s' % ( 117 v_[:m.start()], x, v_[m.end():])) 118 else: 119 yield v_ 120 # or a literal value 121 elif isinstance(v, bool): 122 yield 'true' if v else 'false' 123 else: 124 yield v 125 126 # build possible permutations 127 for suite_defines_ in suite_defines: 128 self.defines |= suite_defines_.keys() 129 for defines_ in defines: 130 self.defines |= defines_.keys() 131 self.permutations.extend(dict(perm) for perm in it.product(*( 132 [(k, v) for v in parse_define(vs)] 133 for k, vs in sorted((suite_defines_ | defines_).items())))) 134 135 for k in config.keys(): 136 print('%swarning:%s in %s, found unused key %r' % ( 137 '\x1b[01;33m' if args['color'] else '', 138 '\x1b[m' if args['color'] else '', 139 self.name, 140 k), 141 file=sys.stderr) 142 143 144class BenchSuite: 145 # create a BenchSuite object from a toml file 146 def __init__(self, path, args={}): 147 self.path = path 148 self.name = os.path.basename(path) 149 if self.name.endswith('.toml'): 150 self.name = self.name[:-len('.toml')] 151 152 # load toml file and parse bench cases 153 with open(self.path) as f: 154 # load benches 155 config = toml.load(f) 156 157 # find line numbers 158 f.seek(0) 159 case_linenos = [] 160 code_linenos = [] 161 for i, line in enumerate(f): 162 match = re.match( 163 '(?P<case>\[\s*cases\s*\.\s*(?P<name>\w+)\s*\])' 164 '|' '(?P<code>code\s*=)', 165 line) 166 if match and match.group('case'): 167 case_linenos.append((i+1, match.group('name'))) 168 elif match and match.group('code'): 169 code_linenos.append(i+2) 170 171 # sort in case toml parsing did not retain order 172 case_linenos.sort() 173 174 cases = config.pop('cases') 175 for (lineno, name), (nlineno, _) in it.zip_longest( 176 case_linenos, case_linenos[1:], 177 fillvalue=(float('inf'), None)): 178 code_lineno = min( 179 (l for l in code_linenos if l >= lineno and l < nlineno), 180 default=None) 181 cases[name]['lineno'] = lineno 182 cases[name]['code_lineno'] = code_lineno 183 184 self.if_ = config.pop('if', None) 185 if isinstance(self.if_, bool): 186 self.if_ = 'true' if self.if_ else 'false' 187 188 self.code = config.pop('code', None) 189 self.code_lineno = min( 190 (l for l in code_linenos 191 if not case_linenos or l < case_linenos[0][0]), 192 default=None) 193 194 # a couple of these we just forward to all cases 195 defines = config.pop('defines', {}) 196 in_ = config.pop('in', None) 197 198 self.cases = [] 199 for name, case in sorted(cases.items(), 200 key=lambda c: c[1].get('lineno')): 201 self.cases.append(BenchCase(config={ 202 'name': name, 203 'path': path + (':%d' % case['lineno'] 204 if 'lineno' in case else ''), 205 'suite': self.name, 206 'suite_defines': defines, 207 'suite_in': in_, 208 **case}, 209 args=args)) 210 211 # combine per-case defines 212 self.defines = set.union(*( 213 set(case.defines) for case in self.cases)) 214 215 for k in config.keys(): 216 print('%swarning:%s in %s, found unused key %r' % ( 217 '\x1b[01;33m' if args['color'] else '', 218 '\x1b[m' if args['color'] else '', 219 self.name, 220 k), 221 file=sys.stderr) 222 223 224 225def compile(bench_paths, **args): 226 # find .toml files 227 paths = [] 228 for path in bench_paths: 229 if os.path.isdir(path): 230 path = path + '/*.toml' 231 232 for path in glob.glob(path): 233 paths.append(path) 234 235 if not paths: 236 print('no bench suites found in %r?' % bench_paths) 237 sys.exit(-1) 238 239 # load the suites 240 suites = [BenchSuite(path, args) for path in paths] 241 suites.sort(key=lambda s: s.name) 242 243 # check for name conflicts, these will cause ambiguity problems later 244 # when running benches 245 seen = {} 246 for suite in suites: 247 if suite.name in seen: 248 print('%swarning:%s conflicting suite %r, %s and %s' % ( 249 '\x1b[01;33m' if args['color'] else '', 250 '\x1b[m' if args['color'] else '', 251 suite.name, 252 suite.path, 253 seen[suite.name].path), 254 file=sys.stderr) 255 seen[suite.name] = suite 256 257 for case in suite.cases: 258 # only allow conflicts if a case and its suite share a name 259 if case.name in seen and not ( 260 isinstance(seen[case.name], BenchSuite) 261 and seen[case.name].cases == [case]): 262 print('%swarning:%s conflicting case %r, %s and %s' % ( 263 '\x1b[01;33m' if args['color'] else '', 264 '\x1b[m' if args['color'] else '', 265 case.name, 266 case.path, 267 seen[case.name].path), 268 file=sys.stderr) 269 seen[case.name] = case 270 271 # we can only compile one bench suite at a time 272 if not args.get('source'): 273 if len(suites) > 1: 274 print('more than one bench suite for compilation? (%r)' % bench_paths) 275 sys.exit(-1) 276 277 suite = suites[0] 278 279 # write generated bench source 280 if 'output' in args: 281 with openio(args['output'], 'w') as f: 282 _write = f.write 283 def write(s): 284 f.lineno += s.count('\n') 285 _write(s) 286 def writeln(s=''): 287 f.lineno += s.count('\n') + 1 288 _write(s) 289 _write('\n') 290 f.lineno = 1 291 f.write = write 292 f.writeln = writeln 293 294 f.writeln("// Generated by %s:" % sys.argv[0]) 295 f.writeln("//") 296 f.writeln("// %s" % ' '.join(sys.argv)) 297 f.writeln("//") 298 f.writeln() 299 300 # include bench_runner.h in every generated file 301 f.writeln("#include \"%s\"" % args['include']) 302 f.writeln() 303 304 # write out generated functions, this can end up in different 305 # files depending on the "in" attribute 306 # 307 # note it's up to the specific generated file to declare 308 # the bench defines 309 def write_case_functions(f, suite, case): 310 # create case define functions 311 if case.defines: 312 # deduplicate defines by value to try to reduce the 313 # number of functions we generate 314 define_cbs = {} 315 for i, defines in enumerate(case.permutations): 316 for k, v in sorted(defines.items()): 317 if v not in define_cbs: 318 name = ('__bench__%s__%s__%d' 319 % (case.name, k, i)) 320 define_cbs[v] = name 321 f.writeln('intmax_t %s(' 322 '__attribute__((unused)) ' 323 'void *data) {' % name) 324 f.writeln(4*' '+'return %s;' % v) 325 f.writeln('}') 326 f.writeln() 327 f.writeln('const bench_define_t ' 328 '__bench__%s__defines[][' 329 'BENCH_IMPLICIT_DEFINE_COUNT+%d] = {' 330 % (case.name, len(suite.defines))) 331 for defines in case.permutations: 332 f.writeln(4*' '+'{') 333 for k, v in sorted(defines.items()): 334 f.writeln(8*' '+'[%-24s] = {%s, NULL},' % ( 335 k+'_i', define_cbs[v])) 336 f.writeln(4*' '+'},') 337 f.writeln('};') 338 f.writeln() 339 340 # create case filter function 341 if suite.if_ is not None or case.if_ is not None: 342 f.writeln('bool __bench__%s__filter(void) {' 343 % (case.name)) 344 f.writeln(4*' '+'return %s;' 345 % ' && '.join('(%s)' % if_ 346 for if_ in [suite.if_, case.if_] 347 if if_ is not None)) 348 f.writeln('}') 349 f.writeln() 350 351 # create case run function 352 f.writeln('void __bench__%s__run(' 353 '__attribute__((unused)) struct lfs_config *cfg) {' 354 % (case.name)) 355 f.writeln(4*' '+'// bench case %s' % case.name) 356 if case.code_lineno is not None: 357 f.writeln(4*' '+'#line %d "%s"' 358 % (case.code_lineno, suite.path)) 359 f.write(case.code) 360 if case.code_lineno is not None: 361 f.writeln(4*' '+'#line %d "%s"' 362 % (f.lineno+1, args['output'])) 363 f.writeln('}') 364 f.writeln() 365 366 if not args.get('source'): 367 if suite.code is not None: 368 if suite.code_lineno is not None: 369 f.writeln('#line %d "%s"' 370 % (suite.code_lineno, suite.path)) 371 f.write(suite.code) 372 if suite.code_lineno is not None: 373 f.writeln('#line %d "%s"' 374 % (f.lineno+1, args['output'])) 375 f.writeln() 376 377 if suite.defines: 378 for i, define in enumerate(sorted(suite.defines)): 379 f.writeln('#ifndef %s' % define) 380 f.writeln('#define %-24s ' 381 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % (define+'_i', i)) 382 f.writeln('#define %-24s ' 383 'BENCH_DEFINE(%s)' % (define, define+'_i')) 384 f.writeln('#endif') 385 f.writeln() 386 387 # create case functions 388 for case in suite.cases: 389 if case.in_ is None: 390 write_case_functions(f, suite, case) 391 else: 392 if case.defines: 393 f.writeln('extern const bench_define_t ' 394 '__bench__%s__defines[][' 395 'BENCH_IMPLICIT_DEFINE_COUNT+%d];' 396 % (case.name, len(suite.defines))) 397 if suite.if_ is not None or case.if_ is not None: 398 f.writeln('extern bool __bench__%s__filter(' 399 'void);' 400 % (case.name)) 401 f.writeln('extern void __bench__%s__run(' 402 'struct lfs_config *cfg);' 403 % (case.name)) 404 f.writeln() 405 406 # create suite struct 407 # 408 # note we place this in the custom bench_suites section with 409 # minimum alignment, otherwise GCC ups the alignment to 410 # 32-bytes for some reason 411 f.writeln('__attribute__((section("_bench_suites"), ' 412 'aligned(1)))') 413 f.writeln('const struct bench_suite __bench__%s__suite = {' 414 % suite.name) 415 f.writeln(4*' '+'.name = "%s",' % suite.name) 416 f.writeln(4*' '+'.path = "%s",' % suite.path) 417 f.writeln(4*' '+'.flags = 0,') 418 if suite.defines: 419 # create suite define names 420 f.writeln(4*' '+'.define_names = (const char *const[' 421 'BENCH_IMPLICIT_DEFINE_COUNT+%d]){' % ( 422 len(suite.defines))) 423 for k in sorted(suite.defines): 424 f.writeln(8*' '+'[%-24s] = "%s",' % (k+'_i', k)) 425 f.writeln(4*' '+'},') 426 f.writeln(4*' '+'.define_count = ' 427 'BENCH_IMPLICIT_DEFINE_COUNT+%d,' % len(suite.defines)) 428 f.writeln(4*' '+'.cases = (const struct bench_case[]){') 429 for case in suite.cases: 430 # create case structs 431 f.writeln(8*' '+'{') 432 f.writeln(12*' '+'.name = "%s",' % case.name) 433 f.writeln(12*' '+'.path = "%s",' % case.path) 434 f.writeln(12*' '+'.flags = 0,') 435 f.writeln(12*' '+'.permutations = %d,' 436 % len(case.permutations)) 437 if case.defines: 438 f.writeln(12*' '+'.defines ' 439 '= (const bench_define_t*)__bench__%s__defines,' 440 % (case.name)) 441 if suite.if_ is not None or case.if_ is not None: 442 f.writeln(12*' '+'.filter = __bench__%s__filter,' 443 % (case.name)) 444 f.writeln(12*' '+'.run = __bench__%s__run,' 445 % (case.name)) 446 f.writeln(8*' '+'},') 447 f.writeln(4*' '+'},') 448 f.writeln(4*' '+'.case_count = %d,' % len(suite.cases)) 449 f.writeln('};') 450 f.writeln() 451 452 else: 453 # copy source 454 f.writeln('#line 1 "%s"' % args['source']) 455 with open(args['source']) as sf: 456 shutil.copyfileobj(sf, f) 457 f.writeln() 458 459 # write any internal benches 460 for suite in suites: 461 for case in suite.cases: 462 if (case.in_ is not None 463 and os.path.normpath(case.in_) 464 == os.path.normpath(args['source'])): 465 # write defines, but note we need to undef any 466 # new defines since we're in someone else's file 467 if suite.defines: 468 for i, define in enumerate( 469 sorted(suite.defines)): 470 f.writeln('#ifndef %s' % define) 471 f.writeln('#define %-24s ' 472 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % ( 473 define+'_i', i)) 474 f.writeln('#define %-24s ' 475 'BENCH_DEFINE(%s)' % ( 476 define, define+'_i')) 477 f.writeln('#define ' 478 '__BENCH__%s__NEEDS_UNDEF' % ( 479 define)) 480 f.writeln('#endif') 481 f.writeln() 482 483 write_case_functions(f, suite, case) 484 485 if suite.defines: 486 for define in sorted(suite.defines): 487 f.writeln('#ifdef __BENCH__%s__NEEDS_UNDEF' 488 % define) 489 f.writeln('#undef __BENCH__%s__NEEDS_UNDEF' 490 % define) 491 f.writeln('#undef %s' % define) 492 f.writeln('#undef %s' % (define+'_i')) 493 f.writeln('#endif') 494 f.writeln() 495 496def find_runner(runner, **args): 497 cmd = runner.copy() 498 499 # run under some external command? 500 if args.get('exec'): 501 cmd[:0] = args['exec'] 502 503 # run under valgrind? 504 if args.get('valgrind'): 505 cmd[:0] = args['valgrind_path'] + [ 506 '--leak-check=full', 507 '--track-origins=yes', 508 '--error-exitcode=4', 509 '-q'] 510 511 # run under perf? 512 if args.get('perf'): 513 cmd[:0] = args['perf_script'] + list(filter(None, [ 514 '-R', 515 '--perf-freq=%s' % args['perf_freq'] 516 if args.get('perf_freq') else None, 517 '--perf-period=%s' % args['perf_period'] 518 if args.get('perf_period') else None, 519 '--perf-events=%s' % args['perf_events'] 520 if args.get('perf_events') else None, 521 '--perf-path=%s' % args['perf_path'] 522 if args.get('perf_path') else None, 523 '-o%s' % args['perf']])) 524 525 # other context 526 if args.get('geometry'): 527 cmd.append('-G%s' % args['geometry']) 528 if args.get('disk'): 529 cmd.append('-d%s' % args['disk']) 530 if args.get('trace'): 531 cmd.append('-t%s' % args['trace']) 532 if args.get('trace_backtrace'): 533 cmd.append('--trace-backtrace') 534 if args.get('trace_period'): 535 cmd.append('--trace-period=%s' % args['trace_period']) 536 if args.get('trace_freq'): 537 cmd.append('--trace-freq=%s' % args['trace_freq']) 538 if args.get('read_sleep'): 539 cmd.append('--read-sleep=%s' % args['read_sleep']) 540 if args.get('prog_sleep'): 541 cmd.append('--prog-sleep=%s' % args['prog_sleep']) 542 if args.get('erase_sleep'): 543 cmd.append('--erase-sleep=%s' % args['erase_sleep']) 544 545 # defines? 546 if args.get('define'): 547 for define in args.get('define'): 548 cmd.append('-D%s' % define) 549 550 return cmd 551 552def list_(runner, bench_ids=[], **args): 553 cmd = find_runner(runner, **args) + bench_ids 554 if args.get('summary'): cmd.append('--summary') 555 if args.get('list_suites'): cmd.append('--list-suites') 556 if args.get('list_cases'): cmd.append('--list-cases') 557 if args.get('list_suite_paths'): cmd.append('--list-suite-paths') 558 if args.get('list_case_paths'): cmd.append('--list-case-paths') 559 if args.get('list_defines'): cmd.append('--list-defines') 560 if args.get('list_permutation_defines'): 561 cmd.append('--list-permutation-defines') 562 if args.get('list_implicit_defines'): 563 cmd.append('--list-implicit-defines') 564 if args.get('list_geometries'): cmd.append('--list-geometries') 565 566 if args.get('verbose'): 567 print(' '.join(shlex.quote(c) for c in cmd)) 568 return sp.call(cmd) 569 570 571def find_perms(runner_, ids=[], **args): 572 case_suites = {} 573 expected_case_perms = co.defaultdict(lambda: 0) 574 expected_perms = 0 575 total_perms = 0 576 577 # query cases from the runner 578 cmd = runner_ + ['--list-cases'] + ids 579 if args.get('verbose'): 580 print(' '.join(shlex.quote(c) for c in cmd)) 581 proc = sp.Popen(cmd, 582 stdout=sp.PIPE, 583 stderr=sp.PIPE if not args.get('verbose') else None, 584 universal_newlines=True, 585 errors='replace', 586 close_fds=False) 587 pattern = re.compile( 588 '^(?P<case>[^\s]+)' 589 '\s+(?P<flags>[^\s]+)' 590 '\s+(?P<filtered>\d+)/(?P<perms>\d+)') 591 # skip the first line 592 for line in it.islice(proc.stdout, 1, None): 593 m = pattern.match(line) 594 if m: 595 filtered = int(m.group('filtered')) 596 perms = int(m.group('perms')) 597 expected_case_perms[m.group('case')] += filtered 598 expected_perms += filtered 599 total_perms += perms 600 proc.wait() 601 if proc.returncode != 0: 602 if not args.get('verbose'): 603 for line in proc.stderr: 604 sys.stdout.write(line) 605 sys.exit(-1) 606 607 # get which suite each case belongs to via paths 608 cmd = runner_ + ['--list-case-paths'] + ids 609 if args.get('verbose'): 610 print(' '.join(shlex.quote(c) for c in cmd)) 611 proc = sp.Popen(cmd, 612 stdout=sp.PIPE, 613 stderr=sp.PIPE if not args.get('verbose') else None, 614 universal_newlines=True, 615 errors='replace', 616 close_fds=False) 617 pattern = re.compile( 618 '^(?P<case>[^\s]+)' 619 '\s+(?P<path>[^:]+):(?P<lineno>\d+)') 620 # skip the first line 621 for line in it.islice(proc.stdout, 1, None): 622 m = pattern.match(line) 623 if m: 624 path = m.group('path') 625 # strip path/suffix here 626 suite = os.path.basename(path) 627 if suite.endswith('.toml'): 628 suite = suite[:-len('.toml')] 629 case_suites[m.group('case')] = suite 630 proc.wait() 631 if proc.returncode != 0: 632 if not args.get('verbose'): 633 for line in proc.stderr: 634 sys.stdout.write(line) 635 sys.exit(-1) 636 637 # figure out expected suite perms 638 expected_suite_perms = co.defaultdict(lambda: 0) 639 for case, suite in case_suites.items(): 640 expected_suite_perms[suite] += expected_case_perms[case] 641 642 return ( 643 case_suites, 644 expected_suite_perms, 645 expected_case_perms, 646 expected_perms, 647 total_perms) 648 649def find_path(runner_, id, **args): 650 path = None 651 # query from runner 652 cmd = runner_ + ['--list-case-paths', id] 653 if args.get('verbose'): 654 print(' '.join(shlex.quote(c) for c in cmd)) 655 proc = sp.Popen(cmd, 656 stdout=sp.PIPE, 657 stderr=sp.PIPE if not args.get('verbose') else None, 658 universal_newlines=True, 659 errors='replace', 660 close_fds=False) 661 pattern = re.compile( 662 '^(?P<case>[^\s]+)' 663 '\s+(?P<path>[^:]+):(?P<lineno>\d+)') 664 # skip the first line 665 for line in it.islice(proc.stdout, 1, None): 666 m = pattern.match(line) 667 if m and path is None: 668 path_ = m.group('path') 669 lineno = int(m.group('lineno')) 670 path = (path_, lineno) 671 proc.wait() 672 if proc.returncode != 0: 673 if not args.get('verbose'): 674 for line in proc.stderr: 675 sys.stdout.write(line) 676 sys.exit(-1) 677 678 return path 679 680def find_defines(runner_, id, **args): 681 # query permutation defines from runner 682 cmd = runner_ + ['--list-permutation-defines', id] 683 if args.get('verbose'): 684 print(' '.join(shlex.quote(c) for c in cmd)) 685 proc = sp.Popen(cmd, 686 stdout=sp.PIPE, 687 stderr=sp.PIPE if not args.get('verbose') else None, 688 universal_newlines=True, 689 errors='replace', 690 close_fds=False) 691 defines = co.OrderedDict() 692 pattern = re.compile('^(?P<define>\w+)=(?P<value>.+)') 693 for line in proc.stdout: 694 m = pattern.match(line) 695 if m: 696 define = m.group('define') 697 value = m.group('value') 698 defines[define] = value 699 proc.wait() 700 if proc.returncode != 0: 701 if not args.get('verbose'): 702 for line in proc.stderr: 703 sys.stdout.write(line) 704 sys.exit(-1) 705 706 return defines 707 708 709# Thread-safe CSV writer 710class BenchOutput: 711 def __init__(self, path, head=None, tail=None): 712 self.f = openio(path, 'w+', 1) 713 self.lock = th.Lock() 714 self.head = head or [] 715 self.tail = tail or [] 716 self.writer = csv.DictWriter(self.f, self.head + self.tail) 717 self.rows = [] 718 719 def close(self): 720 self.f.close() 721 722 def __enter__(self): 723 return self 724 725 def __exit__(self, *_): 726 self.f.close() 727 728 def writerow(self, row): 729 with self.lock: 730 self.rows.append(row) 731 if all(k in self.head or k in self.tail for k in row.keys()): 732 # can simply append 733 self.writer.writerow(row) 734 else: 735 # need to rewrite the file 736 self.head.extend(row.keys() - (self.head + self.tail)) 737 self.f.seek(0) 738 self.f.truncate() 739 self.writer = csv.DictWriter(self.f, self.head + self.tail) 740 self.writer.writeheader() 741 for row in self.rows: 742 self.writer.writerow(row) 743 744# A bench failure 745class BenchFailure(Exception): 746 def __init__(self, id, returncode, stdout, assert_=None): 747 self.id = id 748 self.returncode = returncode 749 self.stdout = stdout 750 self.assert_ = assert_ 751 752def run_stage(name, runner_, ids, stdout_, trace_, output_, **args): 753 # get expected suite/case/perm counts 754 (case_suites, 755 expected_suite_perms, 756 expected_case_perms, 757 expected_perms, 758 total_perms) = find_perms(runner_, ids, **args) 759 760 passed_suite_perms = co.defaultdict(lambda: 0) 761 passed_case_perms = co.defaultdict(lambda: 0) 762 passed_perms = 0 763 readed = 0 764 proged = 0 765 erased = 0 766 failures = [] 767 killed = False 768 769 pattern = re.compile('^(?:' 770 '(?P<op>running|finished|skipped|powerloss)' 771 ' (?P<id>(?P<case>[^:]+)[^\s]*)' 772 '(?: (?P<readed>\d+))?' 773 '(?: (?P<proged>\d+))?' 774 '(?: (?P<erased>\d+))?' 775 '|' '(?P<path>[^:]+):(?P<lineno>\d+):(?P<op_>assert):' 776 ' *(?P<message>.*)' 777 ')$') 778 locals = th.local() 779 children = set() 780 781 def run_runner(runner_, ids=[]): 782 nonlocal passed_suite_perms 783 nonlocal passed_case_perms 784 nonlocal passed_perms 785 nonlocal readed 786 nonlocal proged 787 nonlocal erased 788 nonlocal locals 789 790 # run the benches! 791 cmd = runner_ + ids 792 if args.get('verbose'): 793 print(' '.join(shlex.quote(c) for c in cmd)) 794 795 mpty, spty = pty.openpty() 796 proc = sp.Popen(cmd, stdout=spty, stderr=spty, close_fds=False) 797 os.close(spty) 798 children.add(proc) 799 mpty = os.fdopen(mpty, 'r', 1) 800 801 last_id = None 802 last_stdout = co.deque(maxlen=args.get('context', 5) + 1) 803 last_assert = None 804 try: 805 while True: 806 # parse a line for state changes 807 try: 808 line = mpty.readline() 809 except OSError as e: 810 if e.errno != errno.EIO: 811 raise 812 break 813 if not line: 814 break 815 last_stdout.append(line) 816 if stdout_: 817 try: 818 stdout_.write(line) 819 stdout_.flush() 820 except BrokenPipeError: 821 pass 822 823 m = pattern.match(line) 824 if m: 825 op = m.group('op') or m.group('op_') 826 if op == 'running': 827 locals.seen_perms += 1 828 last_id = m.group('id') 829 last_stdout.clear() 830 last_assert = None 831 elif op == 'finished': 832 case = m.group('case') 833 suite = case_suites[case] 834 readed_ = int(m.group('readed')) 835 proged_ = int(m.group('proged')) 836 erased_ = int(m.group('erased')) 837 passed_suite_perms[suite] += 1 838 passed_case_perms[case] += 1 839 passed_perms += 1 840 readed += readed_ 841 proged += proged_ 842 erased += erased_ 843 if output_: 844 # get defines and write to csv 845 defines = find_defines( 846 runner_, m.group('id'), **args) 847 output_.writerow({ 848 'suite': suite, 849 'case': case, 850 'bench_readed': readed_, 851 'bench_proged': proged_, 852 'bench_erased': erased_, 853 **defines}) 854 elif op == 'skipped': 855 locals.seen_perms += 1 856 elif op == 'assert': 857 last_assert = ( 858 m.group('path'), 859 int(m.group('lineno')), 860 m.group('message')) 861 # go ahead and kill the process, aborting takes a while 862 if args.get('keep_going'): 863 proc.kill() 864 except KeyboardInterrupt: 865 raise BenchFailure(last_id, 1, list(last_stdout)) 866 finally: 867 children.remove(proc) 868 mpty.close() 869 870 proc.wait() 871 if proc.returncode != 0: 872 raise BenchFailure( 873 last_id, 874 proc.returncode, 875 list(last_stdout), 876 last_assert) 877 878 def run_job(runner_, ids=[], start=None, step=None): 879 nonlocal failures 880 nonlocal killed 881 nonlocal locals 882 883 start = start or 0 884 step = step or 1 885 while start < total_perms: 886 job_runner = runner_.copy() 887 if args.get('isolate') or args.get('valgrind'): 888 job_runner.append('-s%s,%s,%s' % (start, start+step, step)) 889 else: 890 job_runner.append('-s%s,,%s' % (start, step)) 891 892 try: 893 # run the benches 894 locals.seen_perms = 0 895 run_runner(job_runner, ids) 896 assert locals.seen_perms > 0 897 start += locals.seen_perms*step 898 899 except BenchFailure as failure: 900 # keep track of failures 901 if output_: 902 case, _ = failure.id.split(':', 1) 903 suite = case_suites[case] 904 # get defines and write to csv 905 defines = find_defines(runner_, failure.id, **args) 906 output_.writerow({ 907 'suite': suite, 908 'case': case, 909 **defines}) 910 911 # race condition for multiple failures? 912 if failures and not args.get('keep_going'): 913 break 914 915 failures.append(failure) 916 917 if args.get('keep_going') and not killed: 918 # resume after failed bench 919 assert locals.seen_perms > 0 920 start += locals.seen_perms*step 921 continue 922 else: 923 # stop other benches 924 killed = True 925 for child in children.copy(): 926 child.kill() 927 break 928 929 930 # parallel jobs? 931 runners = [] 932 if 'jobs' in args: 933 for job in range(args['jobs']): 934 runners.append(th.Thread( 935 target=run_job, args=(runner_, ids, job, args['jobs']), 936 daemon=True)) 937 else: 938 runners.append(th.Thread( 939 target=run_job, args=(runner_, ids, None, None), 940 daemon=True)) 941 942 def print_update(done): 943 if not args.get('verbose') and (args['color'] or done): 944 sys.stdout.write('%s%srunning %s%s:%s %s%s' % ( 945 '\r\x1b[K' if args['color'] else '', 946 '\x1b[?7l' if not done else '', 947 ('\x1b[34m' if not failures else '\x1b[31m') 948 if args['color'] else '', 949 name, 950 '\x1b[m' if args['color'] else '', 951 ', '.join(filter(None, [ 952 '%d/%d suites' % ( 953 sum(passed_suite_perms[k] == v 954 for k, v in expected_suite_perms.items()), 955 len(expected_suite_perms)) 956 if (not args.get('by_suites') 957 and not args.get('by_cases')) else None, 958 '%d/%d cases' % ( 959 sum(passed_case_perms[k] == v 960 for k, v in expected_case_perms.items()), 961 len(expected_case_perms)) 962 if not args.get('by_cases') else None, 963 '%d/%d perms' % (passed_perms, expected_perms), 964 '%s%d/%d failures%s' % ( 965 '\x1b[31m' if args['color'] else '', 966 len(failures), 967 expected_perms, 968 '\x1b[m' if args['color'] else '') 969 if failures else None])), 970 '\x1b[?7h' if not done else '\n')) 971 sys.stdout.flush() 972 973 for r in runners: 974 r.start() 975 976 try: 977 while any(r.is_alive() for r in runners): 978 time.sleep(0.01) 979 print_update(False) 980 except KeyboardInterrupt: 981 # this is handled by the runner threads, we just 982 # need to not abort here 983 killed = True 984 finally: 985 print_update(True) 986 987 for r in runners: 988 r.join() 989 990 return ( 991 expected_perms, 992 passed_perms, 993 readed, 994 proged, 995 erased, 996 failures, 997 killed) 998 999 1000def run(runner, bench_ids=[], **args): 1001 # query runner for benches 1002 runner_ = find_runner(runner, **args) 1003 print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_)) 1004 (_, 1005 expected_suite_perms, 1006 expected_case_perms, 1007 expected_perms, 1008 total_perms) = find_perms(runner_, bench_ids, **args) 1009 print('found %d suites, %d cases, %d/%d permutations' % ( 1010 len(expected_suite_perms), 1011 len(expected_case_perms), 1012 expected_perms, 1013 total_perms)) 1014 print() 1015 1016 # automatic job detection? 1017 if args.get('jobs') == 0: 1018 args['jobs'] = len(os.sched_getaffinity(0)) 1019 1020 # truncate and open logs here so they aren't disconnected between benches 1021 stdout = None 1022 if args.get('stdout'): 1023 stdout = openio(args['stdout'], 'w', 1) 1024 trace = None 1025 if args.get('trace'): 1026 trace = openio(args['trace'], 'w', 1) 1027 output = None 1028 if args.get('output'): 1029 output = BenchOutput(args['output'], 1030 ['suite', 'case'], 1031 ['bench_readed', 'bench_proged', 'bench_erased']) 1032 1033 # measure runtime 1034 start = time.time() 1035 1036 # spawn runners 1037 expected = 0 1038 passed = 0 1039 readed = 0 1040 proged = 0 1041 erased = 0 1042 failures = [] 1043 for by in (bench_ids if bench_ids 1044 else expected_case_perms.keys() if args.get('by_cases') 1045 else expected_suite_perms.keys() if args.get('by_suites') 1046 else [None]): 1047 # spawn jobs for stage 1048 (expected_, 1049 passed_, 1050 readed_, 1051 proged_, 1052 erased_, 1053 failures_, 1054 killed) = run_stage( 1055 by or 'benches', 1056 runner_, 1057 [by] if by is not None else [], 1058 stdout, 1059 trace, 1060 output, 1061 **args) 1062 # collect passes/failures 1063 expected += expected_ 1064 passed += passed_ 1065 readed += readed_ 1066 proged += proged_ 1067 erased += erased_ 1068 failures.extend(failures_) 1069 if (failures and not args.get('keep_going')) or killed: 1070 break 1071 1072 stop = time.time() 1073 1074 if stdout: 1075 try: 1076 stdout.close() 1077 except BrokenPipeError: 1078 pass 1079 if trace: 1080 try: 1081 trace.close() 1082 except BrokenPipeError: 1083 pass 1084 if output: 1085 output.close() 1086 1087 # show summary 1088 print() 1089 print('%sdone:%s %s' % ( 1090 ('\x1b[34m' if not failures else '\x1b[31m') 1091 if args['color'] else '', 1092 '\x1b[m' if args['color'] else '', 1093 ', '.join(filter(None, [ 1094 '%d readed' % readed, 1095 '%d proged' % proged, 1096 '%d erased' % erased, 1097 'in %.2fs' % (stop-start)])))) 1098 print() 1099 1100 # print each failure 1101 for failure in failures: 1102 assert failure.id is not None, '%s broken? %r' % ( 1103 ' '.join(shlex.quote(c) for c in runner_), 1104 failure) 1105 1106 # get some extra info from runner 1107 path, lineno = find_path(runner_, failure.id, **args) 1108 defines = find_defines(runner_, failure.id, **args) 1109 1110 # show summary of failure 1111 print('%s%s:%d:%sfailure:%s %s%s failed' % ( 1112 '\x1b[01m' if args['color'] else '', 1113 path, lineno, 1114 '\x1b[01;31m' if args['color'] else '', 1115 '\x1b[m' if args['color'] else '', 1116 failure.id, 1117 ' (%s)' % ', '.join('%s=%s' % (k,v) for k,v in defines.items()) 1118 if defines else '')) 1119 1120 if failure.stdout: 1121 stdout = failure.stdout 1122 if failure.assert_ is not None: 1123 stdout = stdout[:-1] 1124 for line in stdout[-args.get('context', 5):]: 1125 sys.stdout.write(line) 1126 1127 if failure.assert_ is not None: 1128 path, lineno, message = failure.assert_ 1129 print('%s%s:%d:%sassert:%s %s' % ( 1130 '\x1b[01m' if args['color'] else '', 1131 path, lineno, 1132 '\x1b[01;31m' if args['color'] else '', 1133 '\x1b[m' if args['color'] else '', 1134 message)) 1135 with open(path) as f: 1136 line = next(it.islice(f, lineno-1, None)).strip('\n') 1137 print(line) 1138 print() 1139 1140 # drop into gdb? 1141 if failures and (args.get('gdb') 1142 or args.get('gdb_case') 1143 or args.get('gdb_main')): 1144 failure = failures[0] 1145 cmd = runner_ + [failure.id] 1146 1147 if args.get('gdb_main'): 1148 # we don't really need the case breakpoint here, but it 1149 # can be helpful 1150 path, lineno = find_path(runner_, failure.id, **args) 1151 cmd[:0] = args['gdb_path'] + [ 1152 '-ex', 'break main', 1153 '-ex', 'break %s:%d' % (path, lineno), 1154 '-ex', 'run', 1155 '--args'] 1156 elif args.get('gdb_case'): 1157 path, lineno = find_path(runner_, failure.id, **args) 1158 cmd[:0] = args['gdb_path'] + [ 1159 '-ex', 'break %s:%d' % (path, lineno), 1160 '-ex', 'run', 1161 '--args'] 1162 elif failure.assert_ is not None: 1163 cmd[:0] = args['gdb_path'] + [ 1164 '-ex', 'run', 1165 '-ex', 'frame function raise', 1166 '-ex', 'up 2', 1167 '--args'] 1168 else: 1169 cmd[:0] = args['gdb_path'] + [ 1170 '-ex', 'run', 1171 '--args'] 1172 1173 # exec gdb interactively 1174 if args.get('verbose'): 1175 print(' '.join(shlex.quote(c) for c in cmd)) 1176 os.execvp(cmd[0], cmd) 1177 1178 return 1 if failures else 0 1179 1180 1181def main(**args): 1182 # figure out what color should be 1183 if args.get('color') == 'auto': 1184 args['color'] = sys.stdout.isatty() 1185 elif args.get('color') == 'always': 1186 args['color'] = True 1187 else: 1188 args['color'] = False 1189 1190 if args.get('compile'): 1191 return compile(**args) 1192 elif (args.get('summary') 1193 or args.get('list_suites') 1194 or args.get('list_cases') 1195 or args.get('list_suite_paths') 1196 or args.get('list_case_paths') 1197 or args.get('list_defines') 1198 or args.get('list_permutation_defines') 1199 or args.get('list_implicit_defines') 1200 or args.get('list_geometries')): 1201 return list_(**args) 1202 else: 1203 return run(**args) 1204 1205 1206if __name__ == "__main__": 1207 import argparse 1208 import sys 1209 argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None 1210 argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None 1211 parser = argparse.ArgumentParser( 1212 description="Build and run benches.", 1213 allow_abbrev=False, 1214 conflict_handler='ignore') 1215 parser.add_argument( 1216 '-v', '--verbose', 1217 action='store_true', 1218 help="Output commands that run behind the scenes.") 1219 parser.add_argument( 1220 '--color', 1221 choices=['never', 'always', 'auto'], 1222 default='auto', 1223 help="When to use terminal colors. Defaults to 'auto'.") 1224 1225 # bench flags 1226 bench_parser = parser.add_argument_group('bench options') 1227 bench_parser.add_argument( 1228 'runner', 1229 nargs='?', 1230 type=lambda x: x.split(), 1231 help="Bench runner to use for benching. Defaults to %r." % RUNNER_PATH) 1232 bench_parser.add_argument( 1233 'bench_ids', 1234 nargs='*', 1235 help="Description of benches to run.") 1236 bench_parser.add_argument( 1237 '-Y', '--summary', 1238 action='store_true', 1239 help="Show quick summary.") 1240 bench_parser.add_argument( 1241 '-l', '--list-suites', 1242 action='store_true', 1243 help="List bench suites.") 1244 bench_parser.add_argument( 1245 '-L', '--list-cases', 1246 action='store_true', 1247 help="List bench cases.") 1248 bench_parser.add_argument( 1249 '--list-suite-paths', 1250 action='store_true', 1251 help="List the path for each bench suite.") 1252 bench_parser.add_argument( 1253 '--list-case-paths', 1254 action='store_true', 1255 help="List the path and line number for each bench case.") 1256 bench_parser.add_argument( 1257 '--list-defines', 1258 action='store_true', 1259 help="List all defines in this bench-runner.") 1260 bench_parser.add_argument( 1261 '--list-permutation-defines', 1262 action='store_true', 1263 help="List explicit defines in this bench-runner.") 1264 bench_parser.add_argument( 1265 '--list-implicit-defines', 1266 action='store_true', 1267 help="List implicit defines in this bench-runner.") 1268 bench_parser.add_argument( 1269 '--list-geometries', 1270 action='store_true', 1271 help="List the available disk geometries.") 1272 bench_parser.add_argument( 1273 '-D', '--define', 1274 action='append', 1275 help="Override a bench define.") 1276 bench_parser.add_argument( 1277 '-G', '--geometry', 1278 help="Comma-separated list of disk geometries to bench.") 1279 bench_parser.add_argument( 1280 '-d', '--disk', 1281 help="Direct block device operations to this file.") 1282 bench_parser.add_argument( 1283 '-t', '--trace', 1284 help="Direct trace output to this file.") 1285 bench_parser.add_argument( 1286 '--trace-backtrace', 1287 action='store_true', 1288 help="Include a backtrace with every trace statement.") 1289 bench_parser.add_argument( 1290 '--trace-period', 1291 help="Sample trace output at this period in cycles.") 1292 bench_parser.add_argument( 1293 '--trace-freq', 1294 help="Sample trace output at this frequency in hz.") 1295 bench_parser.add_argument( 1296 '-O', '--stdout', 1297 help="Direct stdout to this file. Note stderr is already merged here.") 1298 bench_parser.add_argument( 1299 '-o', '--output', 1300 help="CSV file to store results.") 1301 bench_parser.add_argument( 1302 '--read-sleep', 1303 help="Artificial read delay in seconds.") 1304 bench_parser.add_argument( 1305 '--prog-sleep', 1306 help="Artificial prog delay in seconds.") 1307 bench_parser.add_argument( 1308 '--erase-sleep', 1309 help="Artificial erase delay in seconds.") 1310 bench_parser.add_argument( 1311 '-j', '--jobs', 1312 nargs='?', 1313 type=lambda x: int(x, 0), 1314 const=0, 1315 help="Number of parallel runners to run. 0 runs one runner per core.") 1316 bench_parser.add_argument( 1317 '-k', '--keep-going', 1318 action='store_true', 1319 help="Don't stop on first error.") 1320 bench_parser.add_argument( 1321 '-i', '--isolate', 1322 action='store_true', 1323 help="Run each bench permutation in a separate process.") 1324 bench_parser.add_argument( 1325 '-b', '--by-suites', 1326 action='store_true', 1327 help="Step through benches by suite.") 1328 bench_parser.add_argument( 1329 '-B', '--by-cases', 1330 action='store_true', 1331 help="Step through benches by case.") 1332 bench_parser.add_argument( 1333 '--context', 1334 type=lambda x: int(x, 0), 1335 default=5, 1336 help="Show this many lines of stdout on bench failure. " 1337 "Defaults to 5.") 1338 bench_parser.add_argument( 1339 '--gdb', 1340 action='store_true', 1341 help="Drop into gdb on bench failure.") 1342 bench_parser.add_argument( 1343 '--gdb-case', 1344 action='store_true', 1345 help="Drop into gdb on bench failure but stop at the beginning " 1346 "of the failing bench case.") 1347 bench_parser.add_argument( 1348 '--gdb-main', 1349 action='store_true', 1350 help="Drop into gdb on bench failure but stop at the beginning " 1351 "of main.") 1352 bench_parser.add_argument( 1353 '--gdb-path', 1354 type=lambda x: x.split(), 1355 default=GDB_PATH, 1356 help="Path to the gdb executable, may include flags. " 1357 "Defaults to %r." % GDB_PATH) 1358 bench_parser.add_argument( 1359 '--exec', 1360 type=lambda e: e.split(), 1361 help="Run under another executable.") 1362 bench_parser.add_argument( 1363 '--valgrind', 1364 action='store_true', 1365 help="Run under Valgrind to find memory errors. Implicitly sets " 1366 "--isolate.") 1367 bench_parser.add_argument( 1368 '--valgrind-path', 1369 type=lambda x: x.split(), 1370 default=VALGRIND_PATH, 1371 help="Path to the Valgrind executable, may include flags. " 1372 "Defaults to %r." % VALGRIND_PATH) 1373 bench_parser.add_argument( 1374 '-p', '--perf', 1375 help="Run under Linux's perf to sample performance counters, writing " 1376 "samples to this file.") 1377 bench_parser.add_argument( 1378 '--perf-freq', 1379 help="perf sampling frequency. This is passed directly to the perf " 1380 "script.") 1381 bench_parser.add_argument( 1382 '--perf-period', 1383 help="perf sampling period. This is passed directly to the perf " 1384 "script.") 1385 bench_parser.add_argument( 1386 '--perf-events', 1387 help="perf events to record. This is passed directly to the perf " 1388 "script.") 1389 bench_parser.add_argument( 1390 '--perf-script', 1391 type=lambda x: x.split(), 1392 default=PERF_SCRIPT, 1393 help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT) 1394 bench_parser.add_argument( 1395 '--perf-path', 1396 type=lambda x: x.split(), 1397 help="Path to the perf executable, may include flags. This is passed " 1398 "directly to the perf script") 1399 1400 # compilation flags 1401 comp_parser = parser.add_argument_group('compilation options') 1402 comp_parser.add_argument( 1403 'bench_paths', 1404 nargs='*', 1405 help="Description of *.toml files to compile. May be a directory " 1406 "or a list of paths.") 1407 comp_parser.add_argument( 1408 '-c', '--compile', 1409 action='store_true', 1410 help="Compile a bench suite or source file.") 1411 comp_parser.add_argument( 1412 '-s', '--source', 1413 help="Source file to compile, possibly injecting internal benches.") 1414 comp_parser.add_argument( 1415 '--include', 1416 default=HEADER_PATH, 1417 help="Inject this header file into every compiled bench file. " 1418 "Defaults to %r." % HEADER_PATH) 1419 comp_parser.add_argument( 1420 '-o', '--output', 1421 help="Output file.") 1422 1423 # runner/bench_paths overlap, so need to do some munging here 1424 args = parser.parse_intermixed_args() 1425 args.bench_paths = [' '.join(args.runner or [])] + args.bench_ids 1426 args.runner = args.runner or [RUNNER_PATH] 1427 1428 sys.exit(main(**{k: v 1429 for k, v in vars(args).items() 1430 if v is not None})) 1431