1# pylint: disable=C0301,C0103,C0111
2from __future__ import print_function
3from collections import defaultdict, namedtuple
4from sys import platform
5import os
6import sys
7import argparse
8import subprocess
9import yaml
10import multiprocessing
11
12this_path = os.path.abspath(os.path.dirname(__file__))
13registered_handlers = []
14TestResult = namedtuple('TestResult', ('ok', 'log_file'))
15
16
17class IncludeLoader(yaml.SafeLoader):
18
19    def __init__(self, stream):
20
21        self._root = os.path.split(stream.name)[0]
22
23        super(IncludeLoader, self).__init__(stream)
24
25    def include(self, node):
26        config = self.construct_mapping(node)
27        filename = os.path.join(self._root, config['path'])
28
29        def _append_prefix(lst, prefix):
30            for idx, val in enumerate(lst):
31                if isinstance(val, str):
32                    lst[idx] = os.path.join(prefix, lst[idx])
33                elif isinstance(val, dict):
34                    _append_prefix(val.values()[0], prefix)
35                else:
36                    raise Exception('Unsupported list element: ' + val)
37
38        with open(filename, 'r') as f:
39            data = yaml.load(f, IncludeLoader)
40            if data is not None and 'prefix' in config:
41                _append_prefix(data, config['prefix'])
42            return data
43
44
45IncludeLoader.add_constructor('!include', IncludeLoader.include)
46
47
48def prepare_parser():
49    parser = argparse.ArgumentParser(
50        epilog="""The -n/--repeat and -N/--retry options are not mutually exclusive.
51        For example, "-n2 -N5" would repeat twice running the test(s) with a "tolerance"
52        of up to 4 failures each. This means each test case would run from 2 to 10 times."""
53    )
54
55    parser.add_argument("tests",
56                        help="List of test files.",
57                        nargs='*')
58
59    parser.add_argument("-f", "--fixture",
60                        dest="fixture",
61                        help="Fixture to test.",
62                        metavar="FIXTURE")
63
64    parser.add_argument("-n", "--repeat",
65                        dest="iteration_count",
66                        nargs="?",
67                        type=int,
68                        const=0,
69                        default=1,
70                        help="Repeat tests a number of times (no-flag: 1, no-value: infinite).")
71
72    parser.add_argument("-N", "--retry",
73                        dest="retry_count",
74                        type=int,
75                        default=1,
76                        help="Run tests up to a number of times (like -n, but stops on success; must be >0)")
77
78    parser.add_argument("-d", "--debug",
79                        dest="debug_mode",
80                        action="store_true",
81                        default=False,
82                        help="Debug mode.")
83
84    parser.add_argument("-o", "--output",
85                        dest="output_file",
86                        action="store",
87                        default=None,
88                        help="Output file, default STDOUT.")
89
90    parser.add_argument("-b", "--buildbot",
91                        dest="buildbot",
92                        action="store_true",
93                        default=False,
94                        help="Buildbot mode. Before running tests prepare environment, i.e., create tap0 interface.")
95
96    parser.add_argument("-t", "--tests",
97                        dest="tests_file",
98                        action="store",
99                        default=None,
100                        help="Path to a file with a list of assemblies with tests to run. This is ignored if any test file is passed as positional argument.")
101
102    parser.add_argument("-T", "--type",
103                        dest="test_type",
104                        action="store",
105                        default="all",
106                        help="Type of test to execute (all by default).")
107
108    parser.add_argument("-r", "--results-dir",
109                        dest="results_directory",
110                        action="store",
111                        default=os.path.join(this_path, 'tests'),
112                        type=os.path.abspath,
113                        help="Location where test results should be stored.")
114
115    parser.add_argument("--run-gdb",
116                        dest="run_gdb",
117                        action="store_true",
118                        help="Run tests under GDB control.")
119
120    parser.add_argument("--include",
121                        default=None,
122                        action="append",
123                        help="Run only tests marked with a tag.")
124
125    parser.add_argument("--exclude",
126                        default=['skipped'],
127                        action="append",
128                        help="Do not run tests marked with a tag.")
129
130    parser.add_argument("--stop-on-error",
131                        dest="stop_on_error",
132                        action="store_true",
133                        default=False,
134                        help="Terminate immediately on the first test failure.")
135
136    parser.add_argument("-j", "--jobs",
137                        dest="jobs",
138                        action="store",
139                        default=1,
140                        type=int,
141                        help="Maximum number of parallel tests.")
142    parser.add_argument("--keep-temporary-files",
143                        dest="keep_temps",
144                        action="store_true",
145                        default=False,
146                        help="Don't clean temporary files on exit.")
147
148    parser.add_argument("--save-logs",
149                        choices=("onfail", "always"),
150                        default="onfail",
151                        help="When to save Renode logs. Defaults to 'onfail'. This also affects --keep-renode-output, if enabled.")
152
153    parser.add_argument("--perf-output-path",
154                        dest="perf_output_path",
155                        default=None,
156                        help="Generate perf.data from test in specified directory")
157
158    parser.add_argument("--runner",
159                        dest="runner",
160                        action="store",
161                        default=None,
162                        help=".NET runner.")
163
164    parser.add_argument("--net",
165                        dest="discarded",
166                        action="store_const",
167                        const="dotnet",
168                        help="Flag is deprecated and has no effect.")
169
170    if platform != "win32":
171        parser.add_argument("-p", "--port",
172                            dest="port",
173                            action="store",
174                            default=None,
175                            help="Debug port.")
176
177        parser.add_argument("-s", "--suspend",
178                            dest="suspend",
179                            action="store_true",
180                            default=False,
181                            help="Suspend test waiting for a debugger.")
182
183    return parser
184
185
186def call_or_die(to_call, error_message):
187    ret_code = subprocess.call(to_call)
188    if ret_code != 0:
189        print(error_message)
190        sys.exit(ret_code)
191
192
193def setup_tap():
194    call_or_die(['sudo', 'tunctl', '-d', 'tap0'], 'Error while removing old tap0 interface')
195    call_or_die(['sudo', 'tunctl', '-t', 'tap0', '-u', str(os.getuid())], 'Error while creating tap0 interface')
196    call_or_die(['sudo', '-n', 'ip', 'link', 'set', 'tap0', 'up'], 'Error while setting interface state')
197    call_or_die(['sudo', '-n', 'ip', 'addr', 'add', '192.0.2.1/24', 'dev', 'tap0'], 'Error while setting ip address')
198
199
200def parse_tests_file(path):
201
202    def _process(data, result):
203        if data is None:
204            return
205        for entry in data:
206            if entry is None:
207                continue
208            if isinstance(entry, list):
209                _process(entry, result)
210            else:
211                result.append(entry)
212
213    result = []
214    with open(path) as f:
215        data = yaml.load(f, Loader=IncludeLoader)
216        _process(data, result)
217    return result
218
219
220def handle_options(options):
221    if options.buildbot:
222        print("Preparing Environment")
223        setup_tap()
224    if options.debug_mode:
225        print("Running in debug mode.")
226    elif platform != "win32" and (options.port is not None or options.suspend):
227        print('Port/suspend options can be used in debug mode only.')
228        sys.exit(1)
229    if 'FIXTURE' in os.environ:
230        options.fixture = os.environ['FIXTURE']
231    if options.fixture:
232        print("Testing fixture: " + options.fixture)
233
234    if options.tests:
235        tests_collection = options.tests
236    elif options.tests_file is not None:
237        tests_collection = parse_tests_file(options.tests_file)
238    else:
239        tests_collection = []
240    options.tests = split_tests_into_groups(tests_collection, options.test_type)
241
242    options.configuration = 'Debug' if options.debug_mode else 'Release'
243
244    if options.remote_server_full_directory is not None:
245        if not os.path.isabs(options.remote_server_full_directory):
246            options.remote_server_full_directory = os.path.join(this_path, options.remote_server_full_directory)
247    else:
248        options.remote_server_full_directory = os.path.join(options.remote_server_directory_prefix, options.configuration)
249
250    try:
251        # Try to infer the runner based on the build type
252        with open(os.path.join(options.remote_server_full_directory, "build_type"), "r") as f:
253            options.runner = f.read().strip()
254        if platform == "win32" and options.runner != "dotnet":
255            options.runner = "none" # .NET Framework applications run natively on Windows
256    except:
257        # Fallback to the explicitly provided runner or platform's default if nothing was passed
258        if options.runner is None:
259            options.runner = "mono" if platform.startswith("linux") or platform == "darwin" else "none"
260
261    # Apply the dotnet telemetry optout in this script instead of the shell wrappers as it's
262    # portable between OSes
263    if options.runner == 'dotnet':
264        os.putenv("DOTNET_CLI_TELEMETRY_OPTOUT", "1")
265
266
267def register_handler(handler_type, extension, creator, before_parsing=None, after_parsing=None):
268    registered_handlers.append({'type': handler_type, 'extension': extension, 'creator': creator, 'before_parsing': before_parsing, 'after_parsing': after_parsing})
269
270
271def split_tests_into_groups(tests, test_type):
272
273    def _handle_entry(test_type, path, result):
274        if not os.path.exists(path):
275            print("Path {} does not exist. Quitting ...".format(path))
276            return False
277        for handler in registered_handlers:
278            if (test_type == 'all' or handler['type'] == test_type) and path.endswith(handler['extension']):
279                result.append(handler['creator'](path))
280        return True
281
282    parallel_group_counter = 0
283    test_groups = {}
284
285    for entry in tests:
286        if isinstance(entry, dict):
287            group_name = list(entry.keys())[0]
288            if group_name not in test_groups:
289                test_groups[group_name] = []
290            for inner_entry in entry[group_name]:
291                if not _handle_entry(test_type, inner_entry, test_groups[group_name]):
292                    return None
293        elif isinstance(entry, str):
294            group_name = '__NONE_' + str(parallel_group_counter) + '__'
295            parallel_group_counter += 1
296            if group_name not in test_groups:
297                test_groups[group_name] = []
298            if not _handle_entry(test_type, entry, test_groups[group_name]):
299                return None
300        else:
301            print("Unexpected test type: " + entry)
302            return None
303
304    return test_groups
305
306
307def configure_output(options):
308    options.output = sys.stdout
309    if options.output_file is not None:
310        try:
311            options.output = open(options.output_file)
312        except Exception:
313            print("Failed to open output file. Falling back to STDOUT.")
314
315
316def run_test_group(args):
317
318    group, options, test_id = args
319
320    iteration_counter = 0
321    group_failed = False
322    log_files = set()
323
324    # this function will be called in a separate
325    # context (due to the pool.map_async) and
326    # needs the stdout to be reconfigured
327    configure_output(options)
328
329    while options.iteration_count == 0 or iteration_counter < options.iteration_count:
330        iteration_counter += 1
331
332        if options.iteration_count > 1:
333            print("Running tests iteration {} of {}...".format(iteration_counter, options.iteration_count))
334        elif options.iteration_count == 0:
335            print("Running tests iteration {}...".format(iteration_counter))
336
337        for suite in group:
338            retry_suites_counter = 0
339            should_retry_suite = True
340            suite_failed = False
341
342            try:
343                while should_retry_suite and retry_suites_counter < options.retry_count:
344                    retry_suites_counter += 1
345
346                    if retry_suites_counter > 1:
347                        print("Retrying suite, attempt {} of {}...".format(retry_suites_counter, options.retry_count))
348
349                    # we need to collect log files here instead of appending to a global list
350                    # in each suite runner because this function will be called in a multiprocessing
351                    # context when using the --jobs argument, as mentioned above
352                    ok, suite_log_files = suite.run(options,
353                                                    run_id=test_id if options.jobs != 1 else 0,
354                                                    iteration_index=iteration_counter,
355                                                    suite_retry_index=retry_suites_counter - 1)
356                    log_files.update((type(suite), log_file) for log_file in suite_log_files)
357                    if ok:
358                        suite_failed = False
359                        should_retry_suite = False
360                    else:
361                        suite_failed = True
362                        should_retry_suite = suite.should_retry_suite(options, iteration_counter, retry_suites_counter - 1)
363                        if options.retry_count > 1 and not should_retry_suite:
364                            print("No Robot<->Renode connection issues were detected to warrant a suite retry - giving up.")
365            except Exception as e:
366                print(f"Exception occurred when running {suite.path}:")
367                import traceback
368                traceback.print_exception(e)
369                raise
370
371            if suite_failed:
372                group_failed = True
373        if options.stop_on_error and group_failed:
374            break
375
376    options.output.flush()
377    return (group_failed, log_files)
378
379def print_failed_tests(options):
380    for handler in registered_handlers:
381        handler_obj = handler['creator']
382        failed = handler_obj.find_failed_tests(options.results_directory)
383
384        if failed is not None:
385            def _print_helper(what):
386                for i, fail in enumerate(failed[what]):
387                    print("\t{0}. {1}".format(i + 1, fail))
388
389            print("Failed {} critical tests:".format(handler['type']))
390            _print_helper('mandatory')
391            if 'non_critical' in failed and failed['non_critical']:
392                print("Failed {} non-critical tests:".format(handler['type']))
393                _print_helper('non_critical')
394            print("------")
395
396def print_rerun_trace(options):
397    for handler in registered_handlers:
398        handler_obj = handler["creator"]
399        reruns = handler_obj.find_rerun_tests(options.results_directory)
400        if not reruns:
401            continue
402
403        did_retry = False
404        if options.retry_count != 1:
405            for trace in reruns.values():
406                test_case_retry_occurred = any([x["nth"] > 1 for x in trace])
407                suite_retry_occurred = any([x["label"].endswith("retry1") for x in trace])
408                if test_case_retry_occurred or suite_retry_occurred:
409                    did_retry = True
410                    break
411        if options.iteration_count == 1 and not did_retry:
412            return
413        elif options.iteration_count == 1 and did_retry:
414            print("Some tests were retried:")
415        elif options.iteration_count != 1 and not did_retry:
416            print(f"Ran {options.iteration_count} iterations:")
417        elif options.iteration_count != 1 and did_retry:
418            print(f"Ran {options.iteration_count} iterations, some tests were retried:")
419
420        trace_index = 0
421        for test, trace in reruns.items():
422            n_runs = sum([x["nth"] for x in trace])
423            has_failed = not all(x["nth"] == 1 and x["status"] == "PASS" for x in trace)
424            if n_runs == 1 or not has_failed:
425                # Don't mention tests that were run only once or that never
426                # failed. It CAN happen that n_runs > 1 and has_failed == False;
427                # when another test in the same suite triggers a suite retry.
428                continue
429            trace_index += 1
430            print(f"\t{trace_index}. {test} was started {n_runs} times:")
431            iteration_index = 1
432            suite_retry_index = 0
433            for i, trace_entry in enumerate(trace, 1):
434                label, status, nth, tags, crash = trace_entry.values()
435                print("\t     {}:  {} {:<9} {}{}{}".format(
436                    label, nth,
437                    "attempt," if nth == 1 else "attempts,",
438                    status,
439                    f" [{', '.join(tags)}]" if tags else "",
440                    " (crash detected)" if crash else "",
441                ))
442                if label == "iteration":
443                    iteration_index += 1
444                else:
445                    suite_retry_index += 1
446        print("------")
447
448# analyzes logs before they are cleaned up to determine
449# if any test failures were caused by a Renode crash.
450#
451# returns:
452# - TRUE only when a test failed on its final run AND
453#   that last failure was caused by a crash
454# - FALSE if the test passed or when one of the runs crashed
455#   but the final retry failed for other reasons such as wrong result
456#
457# when running multiple test suites returns TRUE if ANY failed due to a crash.
458def failed_due_to_crash(options) -> bool:
459    for group in options.tests:
460        for suite in options.tests[group]:
461            if suite.tests_failed_due_to_renode_crash():
462                return True
463
464    return False
465
466def run():
467    parser = prepare_parser()
468    for handler in registered_handlers:
469        if 'before_parsing' in handler and handler['before_parsing'] is not None:
470            handler['before_parsing'](parser)
471
472    options = parser.parse_args()
473    handle_options(options)
474
475    if not options.tests:
476        sys.exit(1)
477
478    for handler in registered_handlers:
479        if 'after_parsing' in handler and handler['after_parsing'] is not None:
480            handler['after_parsing'](options)
481
482    configure_output(options)
483
484    print("Preparing suites")
485
486    test_id = 0
487    args = []
488    for group in options.tests.values():
489        args.append((group, options, test_id))
490        test_id += 1
491
492    for group in options.tests:
493        for suite in options.tests[group]:
494            suite.check(options, number_of_runs=test_id if options.jobs != 1 else 1)
495
496    for group in options.tests:
497        for suite in options.tests[group]:
498            res = suite.prepare(options)
499            if res is not None and res != 0:
500                print("Build failure, not running tests.")
501                sys.exit(res)
502
503    print("Starting suites")
504
505    # python3 cannot handle passing
506    # 'output' field via 'pool.map_async';
507    # the value is restored later
508    options.output = None
509
510    if options.jobs == 1:
511        tests_failed, logs = zip(*map(run_test_group, args))
512    else:
513        multiprocessing.set_start_method("spawn")
514        pool = multiprocessing.Pool(processes=options.jobs)
515        # this get is a hack - see: https://stackoverflow.com/a/1408476/980025
516        # we use `async` + `get` in order to allow "Ctrl+C" to be handled correctly;
517        # otherwise it would not be possible to abort tests in progress
518        tests_failed, logs = zip(*pool.map_async(run_test_group, args).get(999999))
519        pool.close()
520        print("Waiting for all processes to exit")
521        pool.join()
522
523    tests_failed = any(tests_failed)
524    logs = set().union(*logs)
525    logs_per_type = defaultdict(lambda: [])
526    for suite_type, log in logs:
527        logs_per_type[suite_type].append(log)
528
529    configure_output(options)
530
531    print("Cleaning up suites")
532
533    # check if renode crash caused a failed test based on logs for tested suites
534    # before the log files are cleaned up
535    test_failed_due_to_crash: bool = tests_failed and failed_due_to_crash(options)
536
537    for group in options.tests:
538        for suite in options.tests[group]:
539            type(suite).log_files = logs_per_type[type(suite)]
540            suite.cleanup(options)
541
542    options.output.flush()
543    if options.output is not sys.stdout:
544        options.output.close()
545
546    if tests_failed:
547        print("Some tests failed :( See the list of failed tests below and logs for details!")
548        print_failed_tests(options)
549        print_rerun_trace(options)
550        if test_failed_due_to_crash:
551            print('Renode crashed during testing and caused a failure', file=sys.stderr)
552            sys.exit(2)
553        sys.exit(1)
554    print("Tests finished successfully :)")
555    print_rerun_trace(options)
556