1#!/usr/bin/env python3
2# vim: set syntax=python ts=4 :
3# Copyright (c) 2020 Intel Corporation
4# SPDX-License-Identifier: Apache-2.0
5"""Zephyr Test Runner (twister)
6
7Also check the "User and Developer Guides" at https://docs.zephyrproject.org/
8
9This script scans for the set of unit test applications in the git
10repository and attempts to execute them. By default, it tries to
11build each test case on one platform per architecture, using a precedence
12list defined in an architecture configuration file, and if possible
13run the tests in any available emulators or simulators on the system.
14
15Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
16files in the application's project directory. This file may contain one or more
17blocks, each identifying a test scenario. The title of the block is a name for
18the test case, which only needs to be unique for the test cases specified in
19that testcase meta-data. The full canonical name for each test case is <path to
20test case>/<block>.
21
22Each test block in the testcase meta data can define the following key/value
23pairs:
24
25  tags: <list of tags> (required)
26    A set of string tags for the testcase. Usually pertains to
27    functional domains but can be anything. Command line invocations
28    of this script can filter the set of tests to run based on tag.
29
30  skip: <True|False> (default False)
31    skip testcase unconditionally. This can be used for broken tests.
32
33  slow: <True|False> (default False)
34    Don't build or run this test case unless --enable-slow was passed
35    in on the command line. Intended for time-consuming test cases
36    that are only run under certain circumstances, like daily
37    builds.
38
39  extra_args: <list of extra arguments>
40    Extra cache entries to pass to CMake when building or running the
41    test case.
42
43  extra_configs: <list of extra configurations>
44    Extra configuration options to be merged with a master prj.conf
45    when building or running the test case.
46
47  build_only: <True|False> (default False)
48    If true, don't try to run the test even if the selected platform
49    supports it.
50
51  build_on_all: <True|False> (default False)
52    If true, attempt to build test on all available platforms.
53
54  depends_on: <list of features>
55    A board or platform can announce what features it supports, this option
56    will enable the test only those platforms that provide this feature.
57
58  min_ram: <integer>
59    minimum amount of RAM needed for this test to build and run. This is
60    compared with information provided by the board metadata.
61
62  min_flash: <integer>
63    minimum amount of ROM needed for this test to build and run. This is
64    compared with information provided by the board metadata.
65
66  timeout: <number of seconds>
67    Length of time to run test in emulator before automatically killing it.
68    Default to 60 seconds.
69
70  arch_allow: <list of arches, such as x86, arm, arc>
71    Set of architectures that this test case should only be run for.
72
73  arch_exclude: <list of arches, such as x86, arm, arc>
74    Set of architectures that this test case should not run on.
75
76  platform_allow: <list of platforms>
77    Set of platforms that this test case should only be run for.
78
79  platform_exclude: <list of platforms>
80    Set of platforms that this test case should not run on.
81
82  extra_sections: <list of extra binary sections>
83    When computing sizes, twister will report errors if it finds
84    extra, unexpected sections in the Zephyr binary unless they are named
85    here. They will not be included in the size calculation.
86
87  filter: <expression>
88    Filter whether the testcase should be run by evaluating an expression
89    against an environment containing the following values:
90
91    { ARCH : <architecture>,
92      PLATFORM : <platform>,
93      <all CONFIG_* key/value pairs in the test's generated defconfig>,
94      <all DT_* key/value pairs in the test's generated device tree file>,
95      <all CMake key/value pairs in the test's generated CMakeCache.txt file>,
96      *<env>: any environment variable available
97    }
98
99    The grammar for the expression language is as follows:
100
101    expression ::= expression "and" expression
102                 | expression "or" expression
103                 | "not" expression
104                 | "(" expression ")"
105                 | symbol "==" constant
106                 | symbol "!=" constant
107                 | symbol "<" number
108                 | symbol ">" number
109                 | symbol ">=" number
110                 | symbol "<=" number
111                 | symbol "in" list
112                 | symbol ":" string
113                 | symbol
114
115    list ::= "[" list_contents "]"
116
117    list_contents ::= constant
118                    | list_contents "," constant
119
120    constant ::= number
121               | string
122
123
124    For the case where expression ::= symbol, it evaluates to true
125    if the symbol is defined to a non-empty string.
126
127    Operator precedence, starting from lowest to highest:
128
129        or (left associative)
130        and (left associative)
131        not (right associative)
132        all comparison operators (non-associative)
133
134    arch_allow, arch_exclude, platform_allow, platform_exclude
135    are all syntactic sugar for these expressions. For instance
136
137        arch_exclude = x86 arc
138
139    Is the same as:
140
141        filter = not ARCH in ["x86", "arc"]
142
143    The ':' operator compiles the string argument as a regular expression,
144    and then returns a true value only if the symbol's value in the environment
145    matches. For example, if CONFIG_SOC="stm32f107xc" then
146
147        filter = CONFIG_SOC : "stm.*"
148
149    Would match it.
150
151The set of test cases that actually run depends on directives in the testcase
152filed and options passed in on the command line. If there is any confusion,
153running with -v or examining the discard report (twister_discard.csv)
154can help show why particular test cases were skipped.
155
156Metrics (such as pass/fail state and binary size) for the last code
157release are stored in scripts/release/twister_last_release.csv.
158To update this, pass the --all --release options.
159
160To load arguments from a file, write '+' before the file name, e.g.,
161+file_name. File content must be one or more valid arguments separated by
162line break instead of white spaces.
163
164Most everyday users will run with no arguments.
165
166"""
167
168import os
169import argparse
170import sys
171import logging
172import time
173import itertools
174import shutil
175from collections import OrderedDict
176import multiprocessing
177from itertools import islice
178import csv
179from colorama import Fore
180from pathlib import Path
181from multiprocessing.managers import BaseManager
182import queue
183
184ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
185if not ZEPHYR_BASE:
186    # This file has been zephyr/scripts/twister for years,
187    # and that is not going to change anytime soon. Let the user
188    # run this script as ./scripts/twister without making them
189    # set ZEPHYR_BASE.
190    ZEPHYR_BASE = str(Path(__file__).resolve().parents[1])
191
192    # Propagate this decision to child processes.
193    os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
194
195    print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"')
196
197try:
198    from anytree import RenderTree, Node, find
199except ImportError:
200    print("Install the anytree module to use the --test-tree option")
201
202try:
203    from tabulate import tabulate
204except ImportError:
205    print("Install tabulate python module with pip to use --device-testing option.")
206
207sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
208
209from twisterlib import HardwareMap, TestSuite, SizeCalculator, CoverageTool, ExecutionCounter
210
211logger = logging.getLogger('twister')
212logger.setLevel(logging.DEBUG)
213
214def size_report(sc):
215    logger.info(sc.filename)
216    logger.info("SECTION NAME             VMA        LMA     SIZE  HEX SZ TYPE")
217    for i in range(len(sc.sections)):
218        v = sc.sections[i]
219
220        logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
221                    (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
222                     v["type"]))
223
224    logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
225                (sc.rom_size, sc.ram_size))
226    logger.info("")
227
228
229def export_tests(filename, tests):
230    with open(filename, "wt") as csvfile:
231        fieldnames = ['section', 'subsection', 'title', 'reference']
232        cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
233        for test in tests:
234            data = test.split(".")
235            if len(data) > 1:
236                subsec = " ".join(data[1].split("_")).title()
237                rowdict = {
238                    "section": data[0].capitalize(),
239                    "subsection": subsec,
240                    "title": test,
241                    "reference": test
242                }
243                cw.writerow(rowdict)
244            else:
245                logger.error("{} can't be exported: ".format(test))
246
247
248def parse_arguments():
249    parser = argparse.ArgumentParser(
250        description=__doc__,
251        formatter_class=argparse.RawDescriptionHelpFormatter)
252    parser.fromfile_prefix_chars = "+"
253
254    case_select = parser.add_argument_group("Test case selection",
255                                            """
256Artificially long but functional example:
257    $ ./scripts/twister -v     \\
258      --testcase-root tests/ztest/base    \\
259      --testcase-root tests/kernel   \\
260      --test      tests/ztest/base/testing.ztest.verbose_0  \\
261      --test      tests/kernel/fifo/fifo_api/kernel.fifo.poll
262
263   "kernel.fifo.poll" is one of the test section names in
264                                 __/fifo_api/testcase.yaml
265    """)
266
267    parser.add_argument("--force-toolchain", action="store_true",
268                        help="Do not filter based on toolchain, use the set "
269                             " toolchain unconditionally")
270    parser.add_argument(
271        "-p", "--platform", action="append",
272        help="Platform filter for testing. This option may be used multiple "
273             "times. Testcases will only be built/run on the platforms "
274             "specified. If this option is not used, then platforms marked "
275             "as default in the platform metadata file will be chosen "
276             "to build and test. ")
277
278    parser.add_argument("-P", "--exclude-platform", action="append", default=[],
279        help="""Exclude platforms and do not build or run any tests
280        on those platforms. This option can be called multiple times.
281        """
282        )
283
284    parser.add_argument(
285        "-a", "--arch", action="append",
286        help="Arch filter for testing. Takes precedence over --platform. "
287             "If unspecified, test all arches. Multiple invocations "
288             "are treated as a logical 'or' relationship")
289    parser.add_argument(
290        "-t", "--tag", action="append",
291        help="Specify tags to restrict which tests to run by tag value. "
292             "Default is to not do any tag filtering. Multiple invocations "
293             "are treated as a logical 'or' relationship")
294    parser.add_argument("-e", "--exclude-tag", action="append",
295                        help="Specify tags of tests that should not run. "
296                             "Default is to run all tests with all tags.")
297    case_select.add_argument(
298        "-f",
299        "--only-failed",
300        action="store_true",
301        help="Run only those tests that failed the previous twister run "
302             "invocation.")
303
304    parser.add_argument(
305        "--retry-failed", type=int, default=0,
306        help="Retry failing tests again, up to the number of times specified.")
307
308    parser.add_argument(
309        "--retry-interval", type=int, default=60,
310        help="Retry failing tests after specified period of time.")
311
312    test_xor_subtest = case_select.add_mutually_exclusive_group()
313
314    test_xor_subtest.add_argument(
315        "-s", "--test", action="append",
316        help="Run only the specified test cases. These are named by "
317             "<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
318
319    test_xor_subtest.add_argument(
320        "--sub-test", action="append",
321        help="""Recursively find sub-test functions and run the entire
322        test section where they were found, including all sibling test
323        functions. Sub-tests are named by:
324        section.name.in.testcase.yaml.function_name_without_test_prefix
325        Example: kernel.fifo.poll.fifo_loop
326        """)
327
328    parser.add_argument(
329        "-l", "--all", action="store_true",
330        help="Build/test on all platforms. Any --platform arguments "
331             "ignored.")
332
333    parser.add_argument(
334        "-o", "--report-dir",
335        help="""Output reports containing results of the test run into the
336        specified directory.
337        The output will be both in CSV and JUNIT format
338        (twister.csv and twister.xml).
339        """)
340    parser.add_argument(
341        "--json-report", action="store_true",
342        help="""Generate a JSON file with all test results. [Experimental]
343        """)
344    parser.add_argument(
345        "--platform-reports", action="store_true",
346        help="""Create individual reports for each platform.
347        """)
348    parser.add_argument(
349        "--report-name",
350        help="""Create a report with a custom name.
351        """)
352
353    parser.add_argument(
354        "--report-suffix",
355        help="""Add a suffix to all generated file names, for example to add a
356        version or a commit ID.
357        """)
358
359    parser.add_argument("--report-excluded",
360                        action="store_true",
361                        help="""List all tests that are never run based on current scope and
362            coverage. If you are looking for accurate results, run this with
363            --all, but this will take a while...""")
364
365    parser.add_argument("--compare-report",
366                        help="Use this report file for size comparison")
367
368    parser.add_argument(
369        "-B", "--subset",
370        help="Only run a subset of the tests, 1/4 for running the first 25%%, "
371             "3/5 means run the 3rd fifth of the total. "
372             "This option is useful when running a large number of tests on "
373             "different hosts to speed up execution time.")
374
375    parser.add_argument(
376        "-N", "--ninja", action="store_true",
377        help="Use the Ninja generator with CMake")
378
379    parser.add_argument(
380        "-y", "--dry-run", action="store_true",
381        help="""Create the filtered list of test cases, but don't actually
382        run them. Useful if you're just interested in the discard report
383        generated for every run and saved in the specified output
384        directory (twister_discard.csv).
385        """)
386
387    parser.add_argument("--list-tags", action="store_true",
388                        help="list all tags in selected tests")
389
390    case_select.add_argument("--list-tests", action="store_true",
391                             help="""List of all sub-test functions recursively found in
392        all --testcase-root arguments. Note different sub-tests can share
393        the same section name and come from different directories.
394        The output is flattened and reports --sub-test names only,
395        not their directories. For instance net.socket.getaddrinfo_ok
396        and net.socket.fd_set belong to different directories.
397        """)
398
399    case_select.add_argument("--test-tree", action="store_true",
400                             help="""Output the testsuite in a tree form""")
401
402    case_select.add_argument("--list-test-duplicates", action="store_true",
403                             help="""List tests with duplicate identifiers.
404        """)
405
406    parser.add_argument("--export-tests", action="store",
407                        metavar="FILENAME",
408                        help="Export tests case meta-data to a file in CSV format."
409                             "Test instances can be exported per target by supplying "
410                             "the platform name using --platform option. (tests for only "
411                             " one platform can be exported at a time)")
412
413    parser.add_argument("--timestamps",
414                        action="store_true",
415                        help="Print all messages with time stamps")
416
417    parser.add_argument(
418        "-r", "--release", action="store_true",
419        help="Update the benchmark database with the results of this test "
420             "run. Intended to be run by CI when tagging an official "
421             "release. This database is used as a basis for comparison "
422             "when looking for deltas in metrics such as footprint")
423
424    parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true",
425                        help="Treat warning conditions as errors")
426
427    parser.add_argument("--overflow-as-errors", action="store_true",
428                        help="Treat RAM/SRAM overflows as errors")
429
430    parser.add_argument(
431        "-v",
432        "--verbose",
433        action="count",
434        default=0,
435        help="Emit debugging information, call multiple times to increase "
436             "verbosity")
437    parser.add_argument(
438        "-i", "--inline-logs", action="store_true",
439        help="Upon test failure, print relevant log data to stdout "
440             "instead of just a path to it")
441    parser.add_argument("--log-file", metavar="FILENAME", action="store",
442                        help="log also to file")
443    parser.add_argument(
444        "-m", "--last-metrics", action="store_true",
445        help="Instead of comparing metrics from the last --release, "
446             "compare with the results of the previous twister "
447             "invocation")
448    parser.add_argument(
449        "-u",
450        "--no-update",
451        action="store_true",
452        help="do not update the results of the last run of the twister run")
453
454    parser.add_argument(
455        "-G",
456        "--integration",
457        action="store_true",
458        help="Run integration tests")
459
460    case_select.add_argument(
461        "-F",
462        "--load-tests",
463        metavar="FILENAME",
464        action="store",
465        help="Load list of tests and platforms to be run from file.")
466
467    parser.add_argument(
468        "--quarantine-list",
469        metavar="FILENAME",
470        help="Load list of test scenarios under quarantine. The entries in "
471             "the file need to correspond to the test scenarios names as in"
472             "corresponding tests .yaml files. These scenarios"
473             "will be skipped with quarantine as the reason")
474
475    parser.add_argument(
476        "--quarantine-verify",
477        action="store_true",
478        help="Use the list of test scenarios under quarantine and run them"
479             "to verify their current status")
480
481    case_select.add_argument(
482        "-E",
483        "--save-tests",
484        metavar="FILENAME",
485        action="store",
486        help="Append list of tests and platforms to be run to file.")
487
488    test_or_build = parser.add_mutually_exclusive_group()
489    test_or_build.add_argument(
490        "-b", "--build-only", action="store_true",
491        help="Only build the code, do not execute any of it in QEMU")
492
493    test_or_build.add_argument(
494        "--test-only", action="store_true",
495        help="""Only run device tests with current artifacts, do not build
496             the code""")
497
498    parser.add_argument(
499        "--cmake-only", action="store_true",
500        help="Only run cmake, do not build or run.")
501
502    parser.add_argument(
503        "--filter", choices=['buildable', 'runnable'],
504        default='buildable',
505        help="""Filter tests to be built and executed. By default everything is
506        built and if a test is runnable (emulation or a connected device), it
507        is run. This option allows for example to only build tests that can
508        actually be run. Runnable is a subset of buildable.""")
509
510
511    parser.add_argument(
512        "-M", "--runtime-artifact-cleanup", action="store_true",
513        help="Delete artifacts of passing tests.")
514
515    parser.add_argument(
516        "-j", "--jobs", type=int,
517        help="Number of jobs for building, defaults to number of CPU threads, "
518             "overcommited by factor 2 when --build-only")
519
520    parser.add_argument(
521        "--show-footprint", action="store_true",
522        help="Show footprint statistics and deltas since last release."
523    )
524    parser.add_argument(
525        "-H", "--footprint-threshold", type=float, default=5,
526        help="When checking test case footprint sizes, warn the user if "
527             "the new app size is greater then the specified percentage "
528             "from the last release. Default is 5. 0 to warn on any "
529             "increase on app size")
530    parser.add_argument(
531        "-D", "--all-deltas", action="store_true",
532        help="Show all footprint deltas, positive or negative. Implies "
533             "--footprint-threshold=0")
534    parser.add_argument(
535        "-O", "--outdir",
536        default=os.path.join(os.getcwd(), "twister-out"),
537        help="Output directory for logs and binaries. "
538             "Default is 'twister-out' in the current directory. "
539             "This directory will be cleaned unless '--no-clean' is set. "
540             "The '--clobber-output' option controls what cleaning does.")
541    parser.add_argument(
542        "-c", "--clobber-output", action="store_true",
543        help="Cleaning the output directory will simply delete it instead "
544             "of the default policy of renaming.")
545    parser.add_argument(
546        "-n", "--no-clean", action="store_true",
547        help="Re-use the outdir before building. Will result in "
548             "faster compilation since builds will be incremental.")
549    case_select.add_argument(
550        "-T", "--testcase-root", action="append", default=[],
551        help="Base directory to recursively search for test cases. All "
552             "testcase.yaml files under here will be processed. May be "
553             "called multiple times. Defaults to the 'samples/' and "
554             "'tests/' directories at the base of the Zephyr tree.")
555
556    board_root_list = ["%s/boards" % ZEPHYR_BASE,
557                       "%s/scripts/pylib/twister/boards" % ZEPHYR_BASE]
558
559    parser.add_argument(
560        "-A", "--board-root", action="append", default=board_root_list,
561        help="""Directory to search for board configuration files. All .yaml
562files in the directory will be processed. The directory should have the same
563structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
564
565    parser.add_argument(
566        "-z", "--size", action="append",
567        help="Don't run twister. Instead, produce a report to "
568             "stdout detailing RAM/ROM sizes on the specified filenames. "
569             "All other command line arguments ignored.")
570    parser.add_argument(
571        "-S", "--enable-slow", action="store_true",
572        help="Execute time-consuming test cases that have been marked "
573             "as 'slow' in testcase.yaml. Normally these are only built.")
574
575    parser.add_argument(
576        "-K", "--force-platform", action="store_true",
577        help="""Force testing on selected platforms,
578        even if they are excluded in the test configuration (testcase.yaml)"""
579    )
580
581    parser.add_argument(
582        "--disable-unrecognized-section-test", action="store_true",
583        default=False,
584        help="Skip the 'unrecognized section' test.")
585    parser.add_argument("-R", "--enable-asserts", action="store_true",
586                        default=True,
587                        help="deprecated, left for compatibility")
588    parser.add_argument("--disable-asserts", action="store_false",
589                        dest="enable_asserts",
590                        help="deprecated, left for compatibility")
591    parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
592                        help="Error on deprecation warnings.")
593    parser.add_argument("--enable-size-report", action="store_true",
594                        help="Enable expensive computation of RAM/ROM segment sizes.")
595
596    parser.add_argument(
597        "-x", "--extra-args", action="append", default=[],
598        help="""Extra CMake cache entries to define when building test cases.
599        May be called multiple times. The key-value entries will be
600        prefixed with -D before being passed to CMake.
601
602        E.g
603        "twister -x=USE_CCACHE=0"
604        will translate to
605        "cmake -DUSE_CCACHE=0"
606
607        which will ultimately disable ccache.
608        """
609    )
610
611    parser.add_argument(
612        "--emulation-only", action="store_true",
613        help="Only build and run emulation platforms")
614
615    parser.add_argument(
616        "--device-testing", action="store_true",
617        help="Test on device directly. Specify the serial device to "
618             "use with the --device-serial option.")
619
620    parser.add_argument(
621        "-X", "--fixture", action="append", default=[],
622        help="Specify a fixture that a board might support")
623
624    serial = parser.add_mutually_exclusive_group()
625    serial.add_argument("--device-serial",
626                        help="""Serial device for accessing the board
627                        (e.g., /dev/ttyACM0)
628                        """)
629
630    serial.add_argument("--device-serial-pty",
631                        help="""Script for controlling pseudoterminal.
632                        Twister believes that it interacts with a terminal
633                        when it actually interacts with the script.
634
635                        E.g "twister --device-testing
636                        --device-serial-pty <script>
637                        """)
638
639    parser.add_argument("--generate-hardware-map",
640                        help="""Probe serial devices connected to this platform
641                        and create a hardware map file to be used with
642                        --device-testing
643                        """)
644
645    parser.add_argument("--persistent-hardware-map", action='store_true',
646                        help="""With --generate-hardware-map, tries to use
647                        persistent names for serial devices on platforms
648                        that support this feature (currently only Linux).
649                        """)
650
651    parser.add_argument("--hardware-map",
652                        help="""Load hardware map from a file. This will be used
653                        for testing on hardware that is listed in the file.
654                        """)
655
656    parser.add_argument("--pre-script",
657                        help="""specify a pre script. This will be executed
658                        before device handler open serial port and invoke runner.
659                        """)
660
661    parser.add_argument(
662        "--west-flash", nargs='?', const=[],
663        help="""Uses west instead of ninja or make to flash when running with
664             --device-testing. Supports comma-separated argument list.
665
666        E.g "twister --device-testing --device-serial /dev/ttyACM0
667                         --west-flash="--board-id=foobar,--erase"
668        will translate to "west flash -- --board-id=foobar --erase"
669
670        NOTE: device-testing must be enabled to use this option.
671        """
672    )
673    parser.add_argument(
674        "--west-runner",
675        help="""Uses the specified west runner instead of default when running
676             with --west-flash.
677
678        E.g "twister --device-testing --device-serial /dev/ttyACM0
679                         --west-flash --west-runner=pyocd"
680        will translate to "west flash --runner pyocd"
681
682        NOTE: west-flash must be enabled to use this option.
683        """
684    )
685
686    valgrind_asan_group = parser.add_mutually_exclusive_group()
687
688    valgrind_asan_group.add_argument(
689        "--enable-valgrind", action="store_true",
690        help="""Run binary through valgrind and check for several memory access
691        errors. Valgrind needs to be installed on the host. This option only
692        works with host binaries such as those generated for the native_posix
693        configuration and is mutual exclusive with --enable-asan.
694        """)
695
696    valgrind_asan_group.add_argument(
697        "--enable-asan", action="store_true",
698        help="""Enable address sanitizer to check for several memory access
699        errors. Libasan needs to be installed on the host. This option only
700        works with host binaries such as those generated for the native_posix
701        configuration and is mutual exclusive with --enable-valgrind.
702        """)
703
704    parser.add_argument(
705        "--enable-lsan", action="store_true",
706        help="""Enable leak sanitizer to check for heap memory leaks.
707        Libasan needs to be installed on the host. This option only
708        works with host binaries such as those generated for the native_posix
709        configuration and when --enable-asan is given.
710        """)
711
712    parser.add_argument(
713        "--enable-ubsan", action="store_true",
714        help="""Enable undefined behavior sanitizer to check for undefined
715        behaviour during program execution. It uses an optional runtime library
716        to provide better error diagnostics. This option only works with host
717        binaries such as those generated for the native_posix configuration.
718        """)
719
720    parser.add_argument("--enable-coverage", action="store_true",
721                        help="Enable code coverage using gcov.")
722
723    parser.add_argument("-C", "--coverage", action="store_true",
724                        help="Generate coverage reports. Implies "
725                             "--enable-coverage.")
726
727    parser.add_argument("--coverage-platform", action="append", default=[],
728                        help="Plarforms to run coverage reports on. "
729                             "This option may be used multiple times. "
730                             "Default to what was selected with --platform.")
731
732    parser.add_argument("--gcov-tool", default=None,
733                        help="Path to the gcov tool to use for code coverage "
734                             "reports")
735
736    parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov',
737                        help="Tool to use to generate coverage report.")
738
739    parser.add_argument("--coverage-basedir", default=ZEPHYR_BASE,
740                        help="Base source directory for coverage report.")
741
742    return parser.parse_args()
743
744
745def main():
746    start_time = time.time()
747
748    options = parse_arguments()
749    previous_results = None
750    # Cleanup
751    if options.no_clean or options.only_failed or options.test_only:
752        if os.path.exists(options.outdir):
753            print("Keeping artifacts untouched")
754    elif options.last_metrics:
755        ls = os.path.join(options.outdir, "twister.csv")
756        if os.path.exists(ls):
757            with open(ls, "r") as fp:
758                previous_results = fp.read()
759        else:
760            sys.exit(f"Can't compare metrics with non existing file {ls}")
761    elif os.path.exists(options.outdir):
762        if options.clobber_output:
763            print("Deleting output directory {}".format(options.outdir))
764            shutil.rmtree(options.outdir)
765        else:
766            for i in range(1, 100):
767                new_out = options.outdir + ".{}".format(i)
768                if not os.path.exists(new_out):
769                    print("Renaming output directory to {}".format(new_out))
770                    shutil.move(options.outdir, new_out)
771                    break
772
773    previous_results_file = None
774    os.makedirs(options.outdir, exist_ok=True)
775    if options.last_metrics and previous_results:
776        previous_results_file = os.path.join(options.outdir, "baseline.csv")
777        with open(previous_results_file, "w") as fp:
778            fp.write(previous_results)
779
780    # create file handler which logs even debug messages
781    if options.log_file:
782        fh = logging.FileHandler(options.log_file)
783    else:
784        fh = logging.FileHandler(os.path.join(options.outdir, "twister.log"))
785
786    fh.setLevel(logging.DEBUG)
787
788    # create console handler with a higher log level
789    ch = logging.StreamHandler()
790
791    VERBOSE = options.verbose
792    if VERBOSE > 1:
793        ch.setLevel(logging.DEBUG)
794    else:
795        ch.setLevel(logging.INFO)
796
797    # create formatter and add it to the handlers
798    if options.timestamps:
799        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
800    else:
801        formatter = logging.Formatter('%(levelname)-7s - %(message)s')
802
803    formatter_file = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
804    ch.setFormatter(formatter)
805    fh.setFormatter(formatter_file)
806
807    # add the handlers to logger
808    logger.addHandler(ch)
809    logger.addHandler(fh)
810
811    hwm = HardwareMap()
812    if options.generate_hardware_map:
813        hwm.scan(persistent=options.persistent_hardware_map)
814        hwm.save(options.generate_hardware_map)
815        return
816
817    if not options.device_testing and options.hardware_map:
818        hwm.load(options.hardware_map)
819        logger.info("Available devices:")
820        table = []
821        hwm.dump(connected_only=True)
822        return
823
824    if options.west_runner and options.west_flash is None:
825        logger.error("west-runner requires west-flash to be enabled")
826        sys.exit(1)
827
828    if options.west_flash and not options.device_testing:
829        logger.error("west-flash requires device-testing to be enabled")
830        sys.exit(1)
831
832    if options.coverage:
833        options.enable_coverage = True
834
835    if not options.coverage_platform:
836        options.coverage_platform = options.platform
837
838    if options.size:
839        for fn in options.size:
840            size_report(SizeCalculator(fn, []))
841        sys.exit(0)
842
843    if options.subset:
844        subset, sets = options.subset.split("/")
845        if int(subset) > 0 and int(sets) >= int(subset):
846            logger.info("Running only a subset: %s/%s" % (subset, sets))
847        else:
848            logger.error("You have provided a wrong subset value: %s." % options.subset)
849            return
850
851    if not options.testcase_root:
852        options.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"),
853                                 os.path.join(ZEPHYR_BASE, "samples")]
854
855    if options.show_footprint or options.compare_report or options.release:
856        options.enable_size_report = True
857
858    suite = TestSuite(options.board_root, options.testcase_root, options.outdir)
859
860    # Check version of zephyr repo
861    suite.check_zephyr_version()
862
863    # Set testsuite options from command line.
864    suite.build_only = options.build_only
865    suite.cmake_only = options.cmake_only
866    suite.cleanup = options.runtime_artifact_cleanup
867    suite.test_only = options.test_only
868    suite.enable_slow = options.enable_slow
869    suite.device_testing = options.device_testing
870    suite.fixtures = options.fixture
871    suite.enable_asan = options.enable_asan
872    suite.enable_lsan = options.enable_lsan
873    suite.enable_ubsan = options.enable_ubsan
874    suite.enable_coverage = options.enable_coverage
875    suite.enable_valgrind = options.enable_valgrind
876    suite.coverage_platform = options.coverage_platform
877    suite.inline_logs = options.inline_logs
878    suite.enable_size_report = options.enable_size_report
879    suite.extra_args = options.extra_args
880    suite.west_flash = options.west_flash
881    suite.west_runner = options.west_runner
882    suite.verbose = VERBOSE
883    suite.warnings_as_errors = not options.disable_warnings_as_errors
884    suite.integration = options.integration
885    suite.overflow_as_errors = options.overflow_as_errors
886
887    if options.ninja:
888        suite.generator_cmd = "ninja"
889        suite.generator = "Ninja"
890    else:
891        suite.generator_cmd = "make"
892        suite.generator = "Unix Makefiles"
893
894    # Set number of jobs
895    if options.jobs:
896        suite.jobs = options.jobs
897    elif options.build_only:
898        suite.jobs = multiprocessing.cpu_count() * 2
899    else:
900        suite.jobs = multiprocessing.cpu_count()
901    logger.info("JOBS: %d" % suite.jobs)
902
903    run_individual_tests = []
904
905    if options.test:
906        run_individual_tests = options.test
907
908    num = suite.add_testcases(testcase_filter=run_individual_tests)
909    if num == 0:
910        logger.error("No test cases found at the specified location...")
911        sys.exit(1)
912    suite.add_configurations()
913
914    if options.device_testing:
915        if options.hardware_map:
916            hwm.load(options.hardware_map)
917            suite.duts = hwm.duts
918            if not options.platform:
919                options.platform = []
920                for d in hwm.duts:
921                    if d.connected:
922                        options.platform.append(d.platform)
923
924        elif options.device_serial or options.device_serial_pty:
925            if options.platform and len(options.platform) == 1:
926                if options.device_serial:
927                    hwm.add_device(options.device_serial,
928                                                 options.platform[0],
929                                                 options.pre_script,
930                                                 False)
931                else:
932                    hwm.add_device(options.device_serial_pty,
933                                                 options.platform[0],
934                                                 options.pre_script,
935                                                 True)
936
937                suite.duts = hwm.duts
938            else:
939                logger.error("""When --device-testing is used with
940                             --device-serial or --device-serial-pty,
941                             only one platform is allowed""")
942
943    if suite.load_errors:
944        sys.exit(1)
945
946    if options.list_tags:
947        tags = set()
948        for _, tc in suite.testcases.items():
949            tags = tags.union(tc.tags)
950
951        for t in tags:
952            print("- {}".format(t))
953
954        return
955
956    if not options.platform and (options.list_tests or options.test_tree or options.list_test_duplicates \
957        or options.sub_test or options.export_tests):
958        cnt = 0
959        all_tests = suite.get_all_tests()
960
961        if options.export_tests:
962            export_tests(options.export_tests, all_tests)
963            return
964
965        if options.list_test_duplicates:
966            import collections
967            dupes = [item for item, count in collections.Counter(all_tests).items() if count > 1]
968            if dupes:
969                print("Tests with duplicate identifiers:")
970                for dupe in dupes:
971                    print("- {}".format(dupe))
972                    for dc in suite.get_testcase(dupe):
973                        print("  - {}".format(dc))
974            else:
975                print("No duplicates found.")
976            return
977
978        if options.sub_test:
979            for st in options.sub_test:
980                subtests = suite.get_testcase(st)
981                for sti in subtests:
982                    run_individual_tests.append(sti.name)
983
984            if run_individual_tests:
985                logger.info("Running the following tests:")
986                for test in run_individual_tests:
987                    print(" - {}".format(test))
988            else:
989                logger.info("Tests not found")
990                return
991
992        elif options.list_tests or options.test_tree:
993            if options.test_tree:
994                testsuite = Node("Testsuite")
995                samples = Node("Samples", parent=testsuite)
996                tests = Node("Tests", parent=testsuite)
997
998            for test in sorted(all_tests):
999                cnt = cnt + 1
1000                if options.list_tests:
1001                    print(" - {}".format(test))
1002
1003                if options.test_tree:
1004                    if test.startswith("sample."):
1005                        sec = test.split(".")
1006                        area = find(samples, lambda node: node.name == sec[1] and node.parent == samples)
1007                        if not area:
1008                            area = Node(sec[1], parent=samples)
1009
1010                        t = Node(test, parent=area)
1011                    else:
1012                        sec = test.split(".")
1013                        area = find(tests, lambda node: node.name == sec[0] and node.parent == tests)
1014                        if not area:
1015                            area = Node(sec[0], parent=tests)
1016
1017                        if area and len(sec) > 2:
1018                            subarea = find(area, lambda node: node.name == sec[1] and node.parent == area)
1019                            if not subarea:
1020                                subarea = Node(sec[1], parent=area)
1021                            t = Node(test, parent=subarea)
1022
1023            if options.list_tests:
1024                print("{} total.".format(cnt))
1025
1026            if options.test_tree:
1027                for pre, _, node in RenderTree(testsuite):
1028                    print("%s%s" % (pre, node.name))
1029            return
1030
1031    discards = []
1032
1033    if options.report_suffix:
1034        last_run = os.path.join(options.outdir, "twister_{}.csv".format(options.report_suffix))
1035    else:
1036        last_run = os.path.join(options.outdir, "twister.csv")
1037
1038    if options.quarantine_list:
1039        suite.load_quarantine(options.quarantine_list)
1040
1041    if options.quarantine_verify:
1042        if not options.quarantine_list:
1043            logger.error("No quarantine list given to be verified")
1044            sys.exit(1)
1045        suite.quarantine_verify = options.quarantine_verify
1046
1047    if options.only_failed:
1048        suite.load_from_file(last_run, filter_status=['skipped', 'passed'])
1049        suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
1050    elif options.load_tests:
1051        suite.load_from_file(options.load_tests, filter_status=['skipped', 'error'])
1052        suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
1053    elif options.test_only:
1054        # Get list of connected hardware and filter tests to only be run on connected hardware
1055        # in cases where no platform was specified when runn the tests.
1056        # If the platform does not exist in the hardware map, just skip it.
1057        connected_list = []
1058        if not options.platform:
1059            for connected in hwm.connected_hardware:
1060                if connected['connected']:
1061                    connected_list.append(connected['platform'])
1062
1063        suite.load_from_file(last_run, filter_status=['skipped', 'error'],
1064                             filter_platform=connected_list)
1065        suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
1066    else:
1067        discards = suite.apply_filters(
1068            enable_slow=options.enable_slow,
1069            platform=options.platform,
1070            exclude_platform=options.exclude_platform,
1071            arch=options.arch,
1072            tag=options.tag,
1073            exclude_tag=options.exclude_tag,
1074            force_toolchain=options.force_toolchain,
1075            all=options.all,
1076            emulation_only=options.emulation_only,
1077            run_individual_tests=run_individual_tests,
1078            runnable=(options.device_testing or options.filter == 'runnable'),
1079            force_platform=options.force_platform
1080
1081        )
1082
1083    if (options.export_tests or options.list_tests) and options.platform:
1084        if len(options.platform) > 1:
1085            logger.error("When exporting tests, only one platform "
1086                         "should be specified.")
1087            return
1088
1089        for p in options.platform:
1090            inst = suite.get_platform_instances(p)
1091            if options.export_tests:
1092                tests = [x.testcase.cases for x in inst.values()]
1093                merged = list(itertools.chain(*tests))
1094                export_tests(options.export_tests, merged)
1095                return
1096
1097            count = 0
1098            for i in inst.values():
1099                for c in i.testcase.cases:
1100                    print(f"- {c}")
1101                    count += 1
1102
1103            print(f"Tests found: {count}")
1104        return
1105
1106    if VERBOSE > 1 and discards:
1107        # if we are using command line platform filter, no need to list every
1108        # other platform as excluded, we know that already.
1109        # Show only the discards that apply to the selected platforms on the
1110        # command line
1111
1112        for i, reason in discards.items():
1113            if options.platform and i.platform.name not in options.platform:
1114                continue
1115            logger.debug(
1116                "{:<25} {:<50} {}SKIPPED{}: {}".format(
1117                    i.platform.name,
1118                    i.testcase.name,
1119                    Fore.YELLOW,
1120                    Fore.RESET,
1121                    reason))
1122
1123    if options.report_excluded:
1124        all_tests = suite.get_all_tests()
1125        to_be_run = set()
1126        for i, p in suite.instances.items():
1127            to_be_run.update(p.testcase.cases)
1128
1129        if all_tests - to_be_run:
1130            print("Tests that never build or run:")
1131            for not_run in all_tests - to_be_run:
1132                print("- {}".format(not_run))
1133
1134        return
1135
1136    if options.subset:
1137        # Test instances are sorted depending on the context. For CI runs
1138        # the execution order is: "plat1-testA, plat1-testB, ...,
1139        # plat1-testZ, plat2-testA, ...". For hardware tests
1140        # (device_testing), were multiple physical platforms can run the tests
1141        # in parallel, it is more efficient to run in the order:
1142        # "plat1-testA, plat2-testA, ..., plat1-testB, plat2-testB, ..."
1143        if options.device_testing:
1144            suite.instances = OrderedDict(sorted(suite.instances.items(),
1145                                key=lambda x: x[0][x[0].find("/") + 1:]))
1146        else:
1147            suite.instances = OrderedDict(sorted(suite.instances.items()))
1148
1149        # Do calculation based on what is actually going to be run and evaluated
1150        # at runtime, ignore the cases we already know going to be skipped.
1151        # This fixes an issue where some sets would get majority of skips and
1152        # basically run nothing beside filtering.
1153        to_run = {k : v for k,v in suite.instances.items() if v.status is None}
1154
1155        subset, sets = options.subset.split("/")
1156        subset = int(subset)
1157        sets = int(sets)
1158        total = len(to_run)
1159        per_set = int(total / sets)
1160        num_extra_sets = total - (per_set * sets)
1161
1162        # Try and be more fair for rounding error with integer division
1163        # so the last subset doesn't get overloaded, we add 1 extra to
1164        # subsets 1..num_extra_sets.
1165        if subset <= num_extra_sets:
1166            start = (subset - 1) * (per_set + 1)
1167            end = start + per_set + 1
1168        else:
1169            base = num_extra_sets * (per_set + 1)
1170            start = ((subset - num_extra_sets - 1) * per_set) + base
1171            end = start + per_set
1172
1173        sliced_instances = islice(to_run.items(), start, end)
1174        skipped = {k : v for k,v in suite.instances.items() if v.status == 'skipped'}
1175        suite.instances = OrderedDict(sliced_instances)
1176        if subset == 1:
1177            # add all pre-filtered tests that are skipped to the first set to
1178            # allow for better distribution among all sets.
1179            suite.instances.update(skipped)
1180
1181    if options.save_tests:
1182        suite.csv_report(options.save_tests)
1183        return
1184
1185    logger.info("%d test scenarios (%d configurations) selected, %d configurations discarded due to filters." %
1186                (len(suite.testcases), len(suite.instances), len(discards)))
1187
1188    if options.device_testing and not options.build_only:
1189        print("\nDevice testing on:")
1190        hwm.dump(filtered=suite.selected_platforms)
1191        print("")
1192
1193    if options.dry_run:
1194        duration = time.time() - start_time
1195        logger.info("Completed in %d seconds" % (duration))
1196        return
1197
1198    retries = options.retry_failed + 1
1199    completed = 0
1200
1201    BaseManager.register('LifoQueue', queue.LifoQueue)
1202    manager = BaseManager()
1203    manager.start()
1204
1205    results = ExecutionCounter(total=len(suite.instances))
1206    pipeline = manager.LifoQueue()
1207    done_queue = manager.LifoQueue()
1208
1209    suite.update_counting(results, initial=True)
1210    suite.start_time = start_time
1211
1212    while True:
1213        completed += 1
1214
1215        if completed > 1:
1216            logger.info("%d Iteration:" % (completed))
1217            time.sleep(options.retry_interval)  # waiting for the system to settle down
1218            results.done = results.total - results.failed
1219            results.failed = results.error
1220
1221        results = suite.execute(pipeline, done_queue, results)
1222
1223        while True:
1224            try:
1225                inst = done_queue.get_nowait()
1226            except queue.Empty:
1227                break
1228            else:
1229                inst.metrics.update(suite.instances[inst.name].metrics)
1230                inst.metrics["handler_time"] = inst.handler.duration if inst.handler else 0
1231                inst.metrics["unrecognized"] = []
1232                suite.instances[inst.name] = inst
1233
1234        print("")
1235
1236        retries = retries - 1
1237        # There are cases where failed == error (only build failures),
1238        # we do not try build failures.
1239        if retries == 0 or results.failed == results.error:
1240            break
1241
1242    # figure out which report to use for size comparison
1243    if options.compare_report:
1244        report_to_use = options.compare_report
1245    elif options.last_metrics:
1246        report_to_use = previous_results_file
1247    else:
1248        report_to_use = suite.RELEASE_DATA
1249
1250    suite.footprint_reports(report_to_use,
1251                            options.show_footprint,
1252                            options.all_deltas,
1253                            options.footprint_threshold,
1254                            options.last_metrics)
1255
1256    suite.duration = time.time() - start_time
1257    suite.update_counting(results)
1258
1259    suite.summary(results, options.disable_unrecognized_section_test)
1260
1261    if options.coverage:
1262        if not options.gcov_tool:
1263            use_system_gcov = False
1264
1265            for plat in options.coverage_platform:
1266                ts_plat = suite.get_platform(plat)
1267                if ts_plat and (ts_plat.type in {"native", "unit"}):
1268                    use_system_gcov = True
1269
1270            if use_system_gcov or "ZEPHYR_SDK_INSTALL_DIR" not in os.environ:
1271                options.gcov_tool = "gcov"
1272            else:
1273                options.gcov_tool = os.path.join(os.environ["ZEPHYR_SDK_INSTALL_DIR"],
1274                                                 "x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gcov")
1275
1276        logger.info("Generating coverage files...")
1277        coverage_tool = CoverageTool.factory(options.coverage_tool)
1278        coverage_tool.gcov_tool = options.gcov_tool
1279        coverage_tool.base_dir = os.path.abspath(options.coverage_basedir)
1280        coverage_tool.add_ignore_file('generated')
1281        coverage_tool.add_ignore_directory('tests')
1282        coverage_tool.add_ignore_directory('samples')
1283        coverage_tool.generate(options.outdir)
1284
1285    if options.device_testing and not options.build_only:
1286        print("\nHardware distribution summary:\n")
1287        table = []
1288        header = ['Board', 'ID', 'Counter']
1289        for d in hwm.duts:
1290            if d.connected and d.platform in suite.selected_platforms:
1291                row = [d.platform, d.id, d.counter]
1292                table.append(row)
1293        print(tabulate(table, headers=header, tablefmt="github"))
1294
1295    suite.save_reports(options.report_name,
1296                       options.report_suffix,
1297                       options.report_dir,
1298                       options.no_update,
1299                       options.release,
1300                       options.only_failed,
1301                       options.platform_reports,
1302                       options.json_report
1303                       )
1304
1305    # FIXME: remove later
1306    #logger.info(f"failed: {results.failed}, cases: {results.cases}, skipped configurations: {results.skipped_configs}, skipped_cases: {results.skipped_cases}, skipped(runtime): {results.skipped_runtime}, passed: {results.passed}, total: {results.total}, done: {results.done}")
1307
1308    logger.info("Run completed")
1309    if results.failed or (suite.warnings and options.warnings_as_errors):
1310        sys.exit(1)
1311
1312
1313if __name__ == "__main__":
1314    try:
1315        main()
1316    finally:
1317        if os.isatty(1): # stdout is interactive
1318            os.system("stty sane")
1319