1#!/usr/bin/env python3
2#
3# Parallel VM test case executor
4# Copyright (c) 2014-2019, Jouni Malinen <j@w1.fi>
5#
6# This software may be distributed under the terms of the BSD license.
7# See README for more details.
8
9from __future__ import print_function
10import curses
11import fcntl
12import logging
13import multiprocessing
14import os
15import selectors
16import subprocess
17import sys
18import time
19import errno
20
21logger = logging.getLogger()
22
23# Test cases that take significantly longer time to execute than average.
24long_tests = ["ap_roam_open",
25              "wpas_mesh_password_mismatch_retry",
26              "wpas_mesh_password_mismatch",
27              "hostapd_oom_wpa2_psk_connect",
28              "ap_hs20_fetch_osu_stop",
29              "ap_roam_wpa2_psk",
30              "ibss_wpa_none_ccmp",
31              "nfc_wps_er_handover_pk_hash_mismatch_sta",
32              "go_neg_peers_force_diff_freq",
33              "p2p_cli_invite",
34              "sta_ap_scan_2b",
35              "ap_pmf_sta_unprot_deauth_burst",
36              "ap_bss_add_remove_during_ht_scan",
37              "wext_scan_hidden",
38              "autoscan_exponential",
39              "nfc_p2p_client",
40              "wnm_bss_keep_alive",
41              "ap_inactivity_disconnect",
42              "scan_bss_expiration_age",
43              "autoscan_periodic",
44              "discovery_group_client",
45              "concurrent_p2pcli",
46              "ap_bss_add_remove",
47              "wpas_ap_wps",
48              "wext_pmksa_cache",
49              "ibss_wpa_none",
50              "ap_ht_40mhz_intolerant_ap",
51              "ibss_rsn",
52              "discovery_pd_retries",
53              "ap_wps_setup_locked_timeout",
54              "ap_vht160",
55              'he160',
56              'he160b',
57              "dfs_radar",
58              "dfs",
59              "dfs_ht40_minus",
60              "dfs_etsi",
61              "dfs_radar_vht80_downgrade",
62              "ap_acs_dfs",
63              "grpform_cred_ready_timeout",
64              "hostapd_oom_wpa2_eap_connect",
65              "wpas_ap_dfs",
66              "autogo_many",
67              "hostapd_oom_wpa2_eap",
68              "ibss_open",
69              "proxyarp_open_ebtables",
70              "proxyarp_open_ebtables_ipv6",
71              "radius_failover",
72              "obss_scan_40_intolerant",
73              "dbus_connect_oom",
74              "proxyarp_open",
75              "proxyarp_open_ipv6",
76              "ap_wps_iteration",
77              "ap_wps_iteration_error",
78              "ap_wps_pbc_timeout",
79              "ap_wps_pbc_ap_timeout",
80              "ap_wps_pin_ap_timeout",
81              "ap_wps_http_timeout",
82              "p2p_go_move_reg_change",
83              "p2p_go_move_active",
84              "p2p_go_move_scm",
85              "p2p_go_move_scm_peer_supports",
86              "p2p_go_move_scm_peer_does_not_support",
87              "p2p_go_move_scm_multi"]
88
89def get_failed(vm):
90    failed = []
91    for i in range(num_servers):
92        failed += vm[i]['failed']
93    return failed
94
95def vm_read_stdout(vm, test_queue):
96    global total_started, total_passed, total_failed, total_skipped
97    global rerun_failures
98    global first_run_failures
99    global all_failed
100
101    ready = False
102    try:
103        out = vm['proc'].stdout.read()
104        if out == None:
105            return False
106        out = out.decode()
107    except IOError as e:
108        if e.errno == errno.EAGAIN:
109            return False
110        raise
111    logger.debug("VM[%d] stdout.read[%s]" % (vm['idx'], out.rstrip()))
112    pending = vm['pending'] + out
113    lines = []
114    while True:
115        pos = pending.find('\n')
116        if pos < 0:
117            break
118        line = pending[0:pos].rstrip()
119        pending = pending[(pos + 1):]
120        logger.debug("VM[%d] stdout full line[%s]" % (vm['idx'], line))
121        if line.startswith("READY"):
122            vm['starting'] = False
123            vm['started'] = True
124            ready = True
125        elif line.startswith("PASS"):
126            ready = True
127            total_passed += 1
128        elif line.startswith("FAIL"):
129            ready = True
130            total_failed += 1
131            vals = line.split(' ')
132            if len(vals) < 2:
133                logger.info("VM[%d] incomplete FAIL line: %s" % (vm['idx'],
134                                                                 line))
135                name = line
136            else:
137                name = vals[1]
138            logger.debug("VM[%d] test case failed: %s" % (vm['idx'], name))
139            vm['failed'].append(name)
140            all_failed.append(name)
141            if name != vm['current_name']:
142                logger.info("VM[%d] test result mismatch: %s (expected %s)" % (vm['idx'], name, vm['current_name']))
143            else:
144                count = vm['current_count']
145                if count == 0:
146                    first_run_failures.append(name)
147                if rerun_failures and count < 1:
148                    logger.debug("Requeue test case %s" % name)
149                    test_queue.append((name, vm['current_count'] + 1))
150        elif line.startswith("NOT-FOUND"):
151            ready = True
152            total_failed += 1
153            logger.info("VM[%d] test case not found" % vm['idx'])
154        elif line.startswith("SKIP"):
155            ready = True
156            total_skipped += 1
157        elif line.startswith("REASON"):
158            vm['skip_reason'].append(line[7:])
159        elif line.startswith("START"):
160            total_started += 1
161            if len(vm['failed']) == 0:
162                vals = line.split(' ')
163                if len(vals) >= 2:
164                    vm['fail_seq'].append(vals[1])
165        vm['out'] += line + '\n'
166        lines.append(line)
167    vm['pending'] = pending
168    return ready
169
170def start_vm(vm, sel):
171    logger.info("VM[%d] starting up" % (vm['idx'] + 1))
172    vm['starting'] = True
173    vm['proc'] = subprocess.Popen(vm['cmd'],
174                                  stdin=subprocess.PIPE,
175                                  stdout=subprocess.PIPE,
176                                  stderr=subprocess.PIPE)
177    vm['cmd'] = None
178    for stream in [vm['proc'].stdout, vm['proc'].stderr]:
179        fd = stream.fileno()
180        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
181        fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
182        sel.register(stream, selectors.EVENT_READ, vm)
183
184def num_vm_starting():
185    count = 0
186    for i in range(num_servers):
187        if vm[i]['starting']:
188            count += 1
189    return count
190
191def vm_read_stderr(vm):
192    try:
193        err = vm['proc'].stderr.read()
194        if err != None:
195            err = err.decode()
196            if len(err) > 0:
197                vm['err'] += err
198                logger.info("VM[%d] stderr.read[%s]" % (vm['idx'], err))
199    except IOError as e:
200        if e.errno != errno.EAGAIN:
201            raise
202
203def vm_next_step(_vm, scr, test_queue):
204    max_y, max_x = scr.getmaxyx()
205    status_line = num_servers + 1
206    if status_line >= max_y:
207        status_line = max_y - 1
208    if _vm['idx'] + 1 < status_line:
209        scr.move(_vm['idx'] + 1, 10)
210        scr.clrtoeol()
211    if not test_queue:
212        _vm['proc'].stdin.write(b'\n')
213        _vm['proc'].stdin.flush()
214        if _vm['idx'] + 1 < status_line:
215            scr.addstr("shutting down")
216        logger.info("VM[%d] shutting down" % _vm['idx'])
217        return
218    (name, count) = test_queue.pop(0)
219    _vm['current_name'] = name
220    _vm['current_count'] = count
221    _vm['proc'].stdin.write(name.encode() + b'\n')
222    _vm['proc'].stdin.flush()
223    if _vm['idx'] + 1 < status_line:
224        scr.addstr(name)
225    logger.debug("VM[%d] start test %s" % (_vm['idx'], name))
226
227def check_vm_start(scr, sel, test_queue):
228    running = False
229    max_y, max_x = scr.getmaxyx()
230    status_line = num_servers + 1
231    if status_line >= max_y:
232        status_line = max_y - 1
233    for i in range(num_servers):
234        if vm[i]['proc']:
235            running = True
236            continue
237
238        # Either not yet started or already stopped VM
239        max_start = multiprocessing.cpu_count()
240        if max_start > 4:
241            max_start /= 2
242        num_starting = num_vm_starting()
243        if vm[i]['cmd'] and len(test_queue) > num_starting and \
244           num_starting < max_start:
245            if i + 1 < status_line:
246                scr.move(i + 1, 10)
247                scr.clrtoeol()
248                scr.addstr(i + 1, 10, "starting VM")
249            start_vm(vm[i], sel)
250            return True, True
251
252    return running, False
253
254def vm_terminated(_vm, scr, sel, test_queue):
255    updated = False
256    for stream in [_vm['proc'].stdout, _vm['proc'].stderr]:
257        sel.unregister(stream)
258    _vm['proc'] = None
259    max_y, max_x = scr.getmaxyx()
260    status_line = num_servers + 1
261    if status_line >= max_y:
262        status_line = max_y - 1
263    if _vm['idx'] + 1 < status_line:
264        scr.move(_vm['idx'] + 1, 10)
265        scr.clrtoeol()
266    log = '{}/{}.srv.{}/console'.format(dir, timestamp, _vm['idx'] + 1)
267    with open(log, 'r') as f:
268        if "Kernel panic" in f.read():
269            if _vm['idx'] + 1 < status_line:
270                scr.addstr("kernel panic")
271            logger.info("VM[%d] kernel panic" % _vm['idx'])
272            updated = True
273    if test_queue:
274        num_vm = 0
275        for i in range(num_servers):
276            if _vm['proc']:
277                num_vm += 1
278        if len(test_queue) > num_vm:
279            if _vm['idx'] + 1 < status_line:
280                scr.addstr("unexpected exit")
281            logger.info("VM[%d] unexpected exit" % i)
282            updated = True
283    return updated
284
285def update_screen(scr, total_tests):
286    max_y, max_x = scr.getmaxyx()
287    status_line = num_servers + 1
288    if status_line >= max_y:
289        status_line = max_y - 1
290    scr.move(status_line, 10)
291    scr.clrtoeol()
292    scr.addstr("{} %".format(int(100.0 * (total_passed + total_failed + total_skipped) / total_tests)))
293    scr.addstr(status_line, 20,
294               "TOTAL={} STARTED={} PASS={} FAIL={} SKIP={}".format(total_tests, total_started, total_passed, total_failed, total_skipped))
295    global all_failed
296    max_y, max_x = scr.getmaxyx()
297    max_lines = max_y - num_servers - 3
298    if len(all_failed) > 0 and max_lines > 0 and num_servers + 2 < max_y - 1:
299        scr.move(num_servers + 2, 0)
300        scr.addstr("Last failed test cases:")
301        if max_lines >= len(all_failed):
302            max_lines = len(all_failed)
303        count = 0
304        for i in range(len(all_failed) - max_lines, len(all_failed)):
305            count += 1
306            if num_servers + 2 + count >= max_y:
307                break
308            scr.move(num_servers + 2 + count, 0)
309            scr.addstr(all_failed[i])
310            scr.clrtoeol()
311    scr.refresh()
312
313def show_progress(scr):
314    global num_servers
315    global vm
316    global dir
317    global timestamp
318    global tests
319    global first_run_failures
320    global total_started, total_passed, total_failed, total_skipped
321    global rerun_failures
322
323    sel = selectors.DefaultSelector()
324    total_tests = len(tests)
325    logger.info("Total tests: %d" % total_tests)
326    test_queue = [(t, 0) for t in tests]
327    start_vm(vm[0], sel)
328
329    scr.leaveok(1)
330    max_y, max_x = scr.getmaxyx()
331    status_line = num_servers + 1
332    if status_line >= max_y:
333        status_line = max_y - 1
334    scr.addstr(0, 0, "Parallel test execution status", curses.A_BOLD)
335    for i in range(0, num_servers):
336        if i + 1 < status_line:
337            scr.addstr(i + 1, 0, "VM %d:" % (i + 1), curses.A_BOLD)
338            status = "starting VM" if vm[i]['proc'] else "not yet started"
339            scr.addstr(i + 1, 10, status)
340    scr.addstr(status_line, 0, "Total:", curses.A_BOLD)
341    scr.addstr(status_line, 20, "TOTAL={} STARTED=0 PASS=0 FAIL=0 SKIP=0".format(total_tests))
342    scr.refresh()
343
344    while True:
345        updated = False
346        events = sel.select(timeout=1)
347        for key, mask in events:
348            _vm = key.data
349            if not _vm['proc']:
350                continue
351            vm_read_stderr(_vm)
352            if vm_read_stdout(_vm, test_queue):
353                vm_next_step(_vm, scr, test_queue)
354                updated = True
355            vm_read_stderr(_vm)
356            if _vm['proc'].poll() is not None:
357                if vm_terminated(_vm, scr, sel, test_queue):
358                    updated = True
359
360        running, run_update = check_vm_start(scr, sel, test_queue)
361        if updated or run_update:
362            update_screen(scr, total_tests)
363        if not running:
364            break
365    sel.close()
366
367    for i in range(num_servers):
368        if not vm[i]['proc']:
369            continue
370        vm[i]['proc'] = None
371        scr.move(i + 1, 10)
372        scr.clrtoeol()
373        scr.addstr("still running")
374        logger.info("VM[%d] still running" % i)
375
376    scr.refresh()
377    time.sleep(0.3)
378
379def known_output(tests, line):
380    if not line:
381        return True
382    if line in tests:
383        return True
384    known = ["START ", "PASS ", "FAIL ", "SKIP ", "REASON ", "ALL-PASSED",
385             "READY",
386             "  ", "Exception: ", "Traceback (most recent call last):",
387             "./run-all.sh: running",
388             "./run-all.sh: passing",
389             "Test run completed", "Logfiles are at", "Starting test run",
390             "passed all", "skipped ", "failed tests:"]
391    for k in known:
392        if line.startswith(k):
393            return True
394    return False
395
396def main():
397    import argparse
398    import os
399    global num_servers
400    global vm
401    global all_failed
402    global dir
403    global timestamp
404    global tests
405    global first_run_failures
406    global total_started, total_passed, total_failed, total_skipped
407    global rerun_failures
408
409    total_started = 0
410    total_passed = 0
411    total_failed = 0
412    total_skipped = 0
413
414    debug_level = logging.INFO
415    rerun_failures = True
416    start_time = time.time()
417    timestamp = int(start_time)
418
419    scriptsdir = os.path.dirname(os.path.realpath(sys.argv[0]))
420
421    p = argparse.ArgumentParser(description='run multiple testing VMs in parallel')
422    p.add_argument('num_servers', metavar='number of VMs', type=int, choices=range(1, 100),
423                   help="number of VMs to start")
424    p.add_argument('-f', dest='testmodules', metavar='<test module>',
425                   help='execute only tests from these test modules',
426                   type=str, nargs='+')
427    p.add_argument('-1', dest='no_retry', action='store_const', const=True, default=False,
428                   help="don't retry failed tests automatically")
429    p.add_argument('--debug', dest='debug', action='store_const', const=True, default=False,
430                   help="enable debug logging")
431    p.add_argument('--codecov', dest='codecov', action='store_const', const=True, default=False,
432                   help="enable code coverage collection")
433    p.add_argument('--shuffle-tests', dest='shuffle', action='store_const', const=True, default=False,
434                   help="shuffle test cases to randomize order")
435    p.add_argument('--short', dest='short', action='store_const', const=True,
436                   default=False,
437                   help="only run short-duration test cases")
438    p.add_argument('--long', dest='long', action='store_const', const=True,
439                   default=False,
440                   help="include long-duration test cases")
441    p.add_argument('--valgrind', dest='valgrind', action='store_const',
442                   const=True, default=False,
443                   help="run tests under valgrind")
444    p.add_argument('--telnet', dest='telnet', metavar='<baseport>', type=int,
445                   help="enable telnet server inside VMs, specify the base port here")
446    p.add_argument('--nocurses', dest='nocurses', action='store_const',
447                   const=True, default=False, help="Don't use curses for output")
448    p.add_argument('params', nargs='*')
449    args = p.parse_args()
450
451    dir = os.environ.get('HWSIM_TEST_LOG_DIR', '/tmp/hwsim-test-logs')
452    try:
453        os.makedirs(dir)
454    except OSError as e:
455        if e.errno != errno.EEXIST:
456            raise
457
458    num_servers = args.num_servers
459    rerun_failures = not args.no_retry
460    if args.debug:
461        debug_level = logging.DEBUG
462    extra_args = []
463    if args.valgrind:
464        extra_args += ['--valgrind']
465    if args.long:
466        extra_args += ['--long']
467    if args.codecov:
468        print("Code coverage - build separate binaries")
469        logdir = os.path.join(dir, str(timestamp))
470        os.makedirs(logdir)
471        subprocess.check_call([os.path.join(scriptsdir, 'build-codecov.sh'),
472                               logdir])
473        codecov_args = ['--codecov_dir', logdir]
474        codecov = True
475    else:
476        codecov_args = []
477        codecov = False
478
479    first_run_failures = []
480    if args.params:
481        tests = args.params
482    else:
483        tests = []
484        cmd = [os.path.join(os.path.dirname(scriptsdir), 'run-tests.py'), '-L']
485        if args.testmodules:
486            cmd += ["-f"]
487            cmd += args.testmodules
488        lst = subprocess.Popen(cmd, stdout=subprocess.PIPE)
489        for l in lst.stdout.readlines():
490            name = l.decode().split(' ')[0]
491            tests.append(name)
492    if len(tests) == 0:
493        sys.exit("No test cases selected")
494
495    if args.shuffle:
496        from random import shuffle
497        shuffle(tests)
498    elif num_servers > 2 and len(tests) > 100:
499        # Move test cases with long duration to the beginning as an
500        # optimization to avoid last part of the test execution running a long
501        # duration test case on a single VM while all other VMs have already
502        # completed their work.
503        for l in long_tests:
504            if l in tests:
505                tests.remove(l)
506                tests.insert(0, l)
507    if args.short:
508        tests = [t for t in tests if t not in long_tests]
509
510    logger.setLevel(debug_level)
511    if not args.nocurses:
512        log_handler = logging.FileHandler('parallel-vm.log')
513    else:
514        log_handler = logging.StreamHandler(sys.stdout)
515    log_handler.setLevel(debug_level)
516    fmt = "%(asctime)s %(levelname)s %(message)s"
517    log_formatter = logging.Formatter(fmt)
518    log_handler.setFormatter(log_formatter)
519    logger.addHandler(log_handler)
520
521    all_failed = []
522    vm = {}
523    for i in range(0, num_servers):
524        cmd = [os.path.join(scriptsdir, 'vm-run.sh'),
525               '--timestamp', str(timestamp),
526               '--ext', 'srv.%d' % (i + 1),
527               '-i'] + codecov_args + extra_args
528        if args.telnet:
529            cmd += ['--telnet', str(args.telnet + i)]
530        vm[i] = {}
531        vm[i]['idx'] = i
532        vm[i]['starting'] = False
533        vm[i]['started'] = False
534        vm[i]['cmd'] = cmd
535        vm[i]['proc'] = None
536        vm[i]['out'] = ""
537        vm[i]['pending'] = ""
538        vm[i]['err'] = ""
539        vm[i]['failed'] = []
540        vm[i]['fail_seq'] = []
541        vm[i]['skip_reason'] = []
542    print('')
543
544    if not args.nocurses:
545        curses.wrapper(show_progress)
546    else:
547        class FakeScreen:
548            def leaveok(self, n):
549                pass
550            def refresh(self):
551                pass
552            def addstr(self, *args, **kw):
553                pass
554            def move(self, x, y):
555                pass
556            def clrtoeol(self):
557                pass
558            def getmaxyx(self):
559                return (25, 80)
560
561        show_progress(FakeScreen())
562
563    with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
564        for i in range(0, num_servers):
565            f.write('VM {}\n{}\n{}\n'.format(i + 1, vm[i]['out'], vm[i]['err']))
566        first = True
567        for i in range(0, num_servers):
568            for line in vm[i]['out'].splitlines():
569                if line.startswith("FAIL "):
570                    if first:
571                        first = False
572                        print("Logs for failed test cases:")
573                        f.write("Logs for failed test cases:\n")
574                    fname = "%s/%d.srv.%d/%s.log" % (dir, timestamp, i + 1,
575                                                     line.split(' ')[1])
576                    print(fname)
577                    f.write("%s\n" % fname)
578
579    failed = get_failed(vm)
580
581    if first_run_failures:
582        print("To re-run same failure sequence(s):")
583        for i in range(0, num_servers):
584            if len(vm[i]['failed']) == 0:
585                continue
586            print("./vm-run.sh", end=' ')
587            if args.long:
588                print("--long", end=' ')
589            skip = len(vm[i]['fail_seq'])
590            skip -= min(skip, 30)
591            for t in vm[i]['fail_seq']:
592                if skip > 0:
593                    skip -= 1
594                    continue
595                print(t, end=' ')
596            logger.info("Failure sequence: " + " ".join(vm[i]['fail_seq']))
597            print('')
598        print("Failed test cases:")
599        for f in first_run_failures:
600            print(f, end=' ')
601            logger.info("Failed: " + f)
602        print('')
603    double_failed = []
604    for name in failed:
605        double_failed.append(name)
606    for test in first_run_failures:
607        double_failed.remove(test)
608    if not rerun_failures:
609        pass
610    elif failed and not double_failed:
611        print("All failed cases passed on retry")
612        logger.info("All failed cases passed on retry")
613    elif double_failed:
614        print("Failed even on retry:")
615        for f in double_failed:
616            print(f, end=' ')
617            logger.info("Failed on retry: " + f)
618        print('')
619    res = "TOTAL={} PASS={} FAIL={} SKIP={}".format(total_started,
620                                                    total_passed,
621                                                    total_failed,
622                                                    total_skipped)
623    print(res)
624    logger.info(res)
625    print("Logs: " + dir + '/' + str(timestamp))
626    logger.info("Logs: " + dir + '/' + str(timestamp))
627
628    skip_reason = []
629    for i in range(num_servers):
630        if not vm[i]['started']:
631            continue
632        skip_reason += vm[i]['skip_reason']
633        if len(vm[i]['pending']) > 0:
634            logger.info("Unprocessed stdout from VM[%d]: '%s'" %
635                        (i, vm[i]['pending']))
636        log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
637        with open(log, 'r') as f:
638            if "Kernel panic" in f.read():
639                print("Kernel panic in " + log)
640                logger.info("Kernel panic in " + log)
641    missing = {}
642    missing['OCV not supported'] = 'OCV'
643    missing['sigma_dut not available'] = 'sigma_dut'
644    missing['Skip test case with long duration due to --long not specified'] = 'long'
645    missing['TEST_ALLOC_FAIL not supported' ] = 'TEST_FAIL'
646    missing['TEST_ALLOC_FAIL not supported in the build'] = 'TEST_FAIL'
647    missing['TEST_FAIL not supported' ] = 'TEST_FAIL'
648    missing['veth not supported (kernel CONFIG_VETH)'] = 'KERNEL:CONFIG_VETH'
649    missing['WPA-EAP-SUITE-B-192 not supported'] = 'CONFIG_SUITEB192'
650    missing['WPA-EAP-SUITE-B not supported'] = 'CONFIG_SUITEB'
651    missing['wmediumd not available'] = 'wmediumd'
652    missing['DPP not supported'] = 'CONFIG_DPP'
653    missing['DPP version 2 not supported'] = 'CONFIG_DPP2'
654    missing['EAP method PWD not supported in the build'] = 'CONFIG_EAP_PWD'
655    missing['EAP method TEAP not supported in the build'] = 'CONFIG_EAP_TEAP'
656    missing['FILS not supported'] = 'CONFIG_FILS'
657    missing['FILS-SK-PFS not supported'] = 'CONFIG_FILS_SK_PFS'
658    missing['OWE not supported'] = 'CONFIG_OWE'
659    missing['SAE not supported'] = 'CONFIG_SAE'
660    missing['Not using OpenSSL'] = 'CONFIG_TLS=openssl'
661    missing['wpa_supplicant TLS library is not OpenSSL: internal'] = 'CONFIG_TLS=openssl'
662    missing_items = []
663    other_reasons = []
664    for reason in sorted(set(skip_reason)):
665        if reason in missing:
666            missing_items.append(missing[reason])
667        elif reason.startswith('OCSP-multi not supported with this TLS library'):
668            missing_items.append('OCSP-MULTI')
669        else:
670            other_reasons.append(reason)
671    if missing_items:
672        print("Missing items (SKIP):", missing_items)
673    if other_reasons:
674        print("Other skip reasons:", other_reasons)
675
676    for i in range(num_servers):
677        unknown = ""
678        for line in vm[i]['out'].splitlines():
679            if not known_output(tests, line):
680                unknown += line + "\n"
681        if unknown:
682            print("\nVM %d - unexpected stdout output:\n%s" % (i, unknown))
683        if vm[i]['err']:
684            print("\nVM %d - unexpected stderr output:\n%s\n" % (i, vm[i]['err']))
685
686    if codecov:
687        print("Code coverage - preparing report")
688        for i in range(num_servers):
689            subprocess.check_call([os.path.join(scriptsdir,
690                                                'process-codecov.sh'),
691                                   logdir + ".srv.%d" % (i + 1),
692                                   str(i)])
693        subprocess.check_call([os.path.join(scriptsdir, 'combine-codecov.sh'),
694                               logdir])
695        print("file://%s/index.html" % logdir)
696        logger.info("Code coverage report: file://%s/index.html" % logdir)
697
698    end_time = time.time()
699    try:
700        cmd = subprocess.Popen(['git', 'describe'], stdout=subprocess.PIPE)
701        ver = cmd.stdout.read().decode().strip()
702    except:
703        ver = "unknown"
704        pass
705    logger.info("Tests run: {}  Tests failed: {}  Total time: {}  Version: {}".format(total_started, total_failed, end_time - start_time, ver))
706
707    if double_failed or (failed and not rerun_failures):
708        logger.info("Test run complete - failures found")
709        sys.exit(2)
710    if failed:
711        logger.info("Test run complete - failures found on first run; passed on retry")
712        sys.exit(1)
713    logger.info("Test run complete - no failures")
714    sys.exit(0)
715
716if __name__ == "__main__":
717    main()
718