1#!/usr/bin/env python3
2# Copyright (c) 2020-2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5
6'''
7This test file contains testsuites for testsuite.py module of twister
8'''
9import sys
10import os
11import mock
12import pytest
13
14from contextlib import nullcontext
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from twisterlib.statuses import TwisterStatus
20from twisterlib.testplan import TestPlan, change_skip_to_error_if_integration
21from twisterlib.testinstance import TestInstance
22from twisterlib.testsuite import TestSuite
23from twisterlib.platform import Platform
24from twisterlib.quarantine import Quarantine
25from twisterlib.error import TwisterRuntimeError
26
27
28def test_testplan_add_testsuites_short(class_testplan):
29    """ Testing add_testcase function of Testsuite class in twister """
30    # Test 1: Check the list of testsuites after calling add testsuites function is as expected
31    class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
32    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
33    class_testplan.add_testsuites()
34
35    tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/'
36    expected_testsuites = ['test_b.check_1',
37                          'test_b.check_2',
38                          'test_c.check_1',
39                          'test_c.check_2',
40                          'test_a.check_1',
41                          'test_a.check_2',
42                          'test_d.check_1',
43                          'test_e.check_1',
44                          'sample_test.app',
45                          'test_config.main']
46    testsuite_list = []
47    for key in sorted(class_testplan.testsuites.keys()):
48        testsuite_list.append(os.path.basename(os.path.normpath(key)))
49    assert sorted(testsuite_list) == sorted(expected_testsuites)
50
51    # Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects
52    suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1')
53    assert suite.name == tests_rel_dir + 'test_a/test_a.check_1'
54    assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
55
56@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
57def test_add_configurations_short(test_data, class_env, board_root_dir):
58    """ Testing add_configurations function of TestPlan class in Twister
59    Test : Asserting on default platforms list
60    """
61    class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
62    plan = TestPlan(class_env)
63    plan.parse_configuration(config_file=class_env.test_config)
64    if board_root_dir == "board_config":
65        plan.add_configurations()
66        print(sorted(plan.default_platforms))
67        assert sorted(plan.default_platforms) == sorted(['demo_board_1/unit_testing', 'demo_board_3/unit_testing'])
68    elif board_root_dir == "board_config_file_not_exist":
69        plan.add_configurations()
70        assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
71
72
73def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
74    """ Testing get_all_testsuites function of TestPlan class in Twister """
75    plan = class_testplan
76    plan.testsuites = all_testsuites_dict
77    expected_tests = ['sample_test.app', 'test_a.check_1.1a',
78                      'test_a.check_1.1c',
79                      'test_a.check_1.2a', 'test_a.check_1.2b',
80                      'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a',
81                      'test_a.check_1.unit_1b', 'test_a.check_2.1a',
82                      'test_a.check_2.1c', 'test_a.check_2.2a',
83                      'test_a.check_2.2b', 'test_a.check_2.Unit_1c',
84                      'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b',
85                      'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
86                      'test_c.check_2', 'test_d.check_1.unit_1a',
87                      'test_d.check_1.unit_1b',
88                      'test_e.check_1.feature5.1a',
89                      'test_e.check_1.feature5.1b',
90                      'test_config.main']
91
92    assert sorted(plan.get_all_tests()) == sorted(expected_tests)
93
94def test_get_platforms_short(class_testplan, platforms_list):
95    """ Testing get_platforms function of TestPlan class in Twister """
96    plan = class_testplan
97    plan.platforms = platforms_list
98    platform = plan.get_platform("demo_board_1")
99    assert isinstance(platform, Platform)
100    assert platform.name == "demo_board_1/unit_testing"
101
102TESTDATA_PART1 = [
103    ("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"),
104    ("platform_allow", ['demo_board_1/unit_testing'], None, None, "Not in testsuite platform allow list"),
105    ("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"),
106    ("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"),
107    ("arch_exclude", ['x86'], None, None, "In test case arch exclude"),
108    ("arch_allow", ['arm'], None, None, "Not in test case arch allow list"),
109    ("skip", True, None, None, "Skip filter"),
110    ("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"),
111    ("min_flash", "2024", "flash", "1024", "Not enough FLASH"),
112    ("min_ram", "500", "ram", "256", "Not enough RAM"),
113    ("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"),
114    ("build_on_all", True, None, None, "Platform is excluded on command line."),
115    ("build_on_all", True, "level", "foobar", "Unknown test level 'foobar'"),
116    (None, None, "supported_toolchains", ['gcc', 'xcc', 'xt-clang'], "Not supported by the toolchain"),
117]
118
119
120@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
121                         TESTDATA_PART1)
122def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
123                             tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
124    """ Testing apply_filters function of TestPlan class in Twister
125    Part 1: Response of apply_filters function have
126            appropriate values according to the filters
127    """
128    plan = class_testplan
129    if tc_attribute is None and plat_attribute is None:
130        plan.apply_filters()
131
132    plan.platforms = platforms_list
133    plan.platform_names = [p.name for p in platforms_list]
134    plan.testsuites = all_testsuites_dict
135    for plat in plan.platforms:
136        if plat_attribute == "ignore_tags":
137            plat.ignore_tags = plat_value
138        if plat_attribute == "flash":
139            plat.flash = plat_value
140        if plat_attribute == "ram":
141            plat.ram = plat_value
142        if plat_attribute == "env":
143            plat.env = plat_value
144            plat.env_satisfied = False
145        if plat_attribute == "supported_toolchains":
146            plat.supported_toolchains = plat_value
147    for _, testcase in plan.testsuites.items():
148        if tc_attribute == "toolchain_allow":
149            testcase.toolchain_allow = tc_value
150        if tc_attribute == "platform_allow":
151            testcase.platform_allow = tc_value
152        if tc_attribute == "toolchain_exclude":
153            testcase.toolchain_exclude = tc_value
154        if tc_attribute == "platform_exclude":
155            testcase.platform_exclude = tc_value
156        if tc_attribute == "arch_exclude":
157            testcase.arch_exclude = tc_value
158        if tc_attribute == "arch_allow":
159            testcase.arch_allow = tc_value
160        if tc_attribute == "skip":
161            testcase.skip = tc_value
162        if tc_attribute == "tags":
163            testcase.tags = tc_value
164        if tc_attribute == "min_flash":
165            testcase.min_flash = tc_value
166        if tc_attribute == "min_ram":
167            testcase.min_ram = tc_value
168
169    if plat_attribute == "level":
170        plan.options.level = plat_value
171
172    if tc_attribute == "build_on_all":
173        for _, testcase in plan.testsuites.items():
174            testcase.build_on_all = tc_value
175        plan.apply_filters(exclude_platform=['demo_board_1'])
176    elif plat_attribute == "supported_toolchains":
177        plan.apply_filters(force_toolchain=False,
178                                                 exclude_platform=['demo_board_1'],
179                                                 platform=['demo_board_2/unit_testing'])
180    elif tc_attribute is None and plat_attribute is None:
181        plan.apply_filters()
182    else:
183        plan.apply_filters(exclude_platform=['demo_board_1'],
184                                                 platform=['demo_board_2/unit_testing'])
185
186    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, plan.instances.values()))
187    for d in filtered_instances:
188        assert d.reason == expected_discards
189
190TESTDATA_PART2 = [
191    ("runnable", "True", "Not runnable on device"),
192    ("exclude_tag", ['test_a'], "Command line testsuite exclude filter"),
193    ("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"),
194    ("arch", ['arm_test'], "Command line testsuite arch filter"),
195    ("tag", ['test_d'], "Command line testsuite tag filter")
196    ]
197
198
199@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2)
200def test_apply_filters_part2(class_testplan, all_testsuites_dict,
201                             platforms_list, extra_filter, extra_filter_value, expected_discards):
202    """ Testing apply_filters function of TestPlan class in Twister
203    Part 2 : Response of apply_filters function (discard dictionary) have
204             appropriate values according to the filters
205    """
206
207    class_testplan.platforms = platforms_list
208    class_testplan.platform_names = [p.name for p in platforms_list]
209    class_testplan.testsuites = all_testsuites_dict
210    kwargs = {
211        extra_filter : extra_filter_value,
212        "exclude_platform" : [
213            'demo_board_1'
214            ],
215        "platform" : [
216            'demo_board_2'
217            ]
218        }
219    class_testplan.apply_filters(**kwargs)
220    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
221    for d in filtered_instances:
222        assert d.reason == expected_discards
223
224
225TESTDATA_PART3 = [
226    (20, 20, -1, 0),
227    (-2, -1, 10, 20),
228    (0, 0, 0, 0)
229    ]
230
231@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram",
232                         TESTDATA_PART3)
233def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list,
234                             tc_min_flash, plat_flash, tc_min_ram, plat_ram):
235    """ Testing apply_filters function of TestPlan class in Twister
236    Part 3 : Testing edge cases for ram and flash values of platforms & testsuites
237    """
238    class_testplan.platforms = platforms_list
239    class_testplan.platform_names = [p.name for p in platforms_list]
240    class_testplan.testsuites = all_testsuites_dict
241
242    for plat in class_testplan.platforms:
243        plat.flash = plat_flash
244        plat.ram = plat_ram
245    for _, testcase in class_testplan.testsuites.items():
246        testcase.min_ram = tc_min_ram
247        testcase.min_flash = tc_min_flash
248    class_testplan.apply_filters(exclude_platform=['demo_board_1'],
249                                             platform=['demo_board_2'])
250
251    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
252    assert not filtered_instances
253
254def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list):
255    """ Testing add_instances() function of TestPlan class in Twister
256    Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
257    Test 2: Values of 'instances' dictionary in Testsuite class are an
258	        instance of 'TestInstance' class
259    Test 3: Values of 'instances' dictionary have expected values.
260    """
261    class_env.outdir = tmp_path
262    plan = TestPlan(class_env)
263    plan.platforms = platforms_list
264    platform = plan.get_platform("demo_board_2")
265    instance_list = []
266    for _, testcase in all_testsuites_dict.items():
267        instance = TestInstance(testcase, platform, 'zephyr', class_env.outdir)
268        instance_list.append(instance)
269    plan.add_instances(instance_list)
270    assert list(plan.instances.keys()) == \
271		   [platform.name + '/zephyr/' + s for s in list(all_testsuites_dict.keys())]
272    assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
273    assert list(plan.instances.values()) == instance_list
274
275
276QUARANTINE_BASIC = {
277    'demo_board_1/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3',
278    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3'
279}
280
281QUARANTINE_WITH_REGEXP = {
282    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86',
283    'demo_board_1/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
284    'demo_board_3/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
285    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
286    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86'
287}
288
289QUARANTINE_PLATFORM = {
290    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3',
291    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3',
292    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3',
293    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3',
294    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3',
295    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3',
296    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3',
297    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3',
298    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3'
299}
300
301QUARANTINE_MULTIFILES = {
302    **QUARANTINE_BASIC,
303    **QUARANTINE_WITH_REGEXP
304}
305
306@pytest.mark.parametrize(
307    ("quarantine_files, quarantine_verify, expected_val"),
308    [
309        (['basic.yaml'], False, QUARANTINE_BASIC),
310        (['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP),
311        (['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP),
312        (['platform.yaml'], False, QUARANTINE_PLATFORM),
313        (['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES),
314        (['empty.yaml'], False, {})
315    ],
316    ids=[
317        'basic',
318        'with_regexp',
319        'quarantine_verify',
320        'platform',
321        'multifiles',
322        'empty'
323    ])
324def test_quarantine_short(class_testplan, platforms_list, test_data,
325                    quarantine_files, quarantine_verify, expected_val):
326    """ Testing quarantine feature in Twister
327    """
328    class_testplan.options.all = True
329    class_testplan.platforms = platforms_list
330    class_testplan.platform_names = [p.name for p in platforms_list]
331    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
332    class_testplan.add_testsuites()
333
334    quarantine_list = [
335        os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files
336    ]
337    class_testplan.quarantine = Quarantine(quarantine_list)
338    class_testplan.options.quarantine_verify = quarantine_verify
339    class_testplan.apply_filters()
340    for testname, instance in class_testplan.instances.items():
341        if quarantine_verify:
342            if testname in expected_val:
343                assert instance.status == TwisterStatus.NONE
344            else:
345                assert instance.status == TwisterStatus.FILTER
346                assert instance.reason == "Not under quarantine"
347        else:
348            if testname in expected_val:
349                assert instance.status == TwisterStatus.FILTER
350                assert instance.reason == "Quarantine: " + expected_val[testname]
351            else:
352                assert instance.status == TwisterStatus.NONE
353
354
355TESTDATA_PART4 = [
356    (os.path.join('test_d', 'test_d.check_1'), ['dummy'],
357     None, 'Snippet not supported'),
358    (os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'],
359     0, None),
360    (os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'],
361     2, 'Snippet not supported'),
362]
363
364@pytest.mark.parametrize(
365    'testpath, required_snippets, expected_filtered_len, expected_filtered_reason',
366    TESTDATA_PART4,
367    ids=['app', 'global', 'multiple']
368)
369def test_required_snippets_short(
370    class_testplan,
371    all_testsuites_dict,
372    platforms_list,
373    testpath,
374    required_snippets,
375    expected_filtered_len,
376    expected_filtered_reason
377):
378    """ Testing required_snippets function of TestPlan class in Twister """
379    plan = class_testplan
380    testpath = os.path.join('scripts', 'tests', 'twister', 'test_data',
381                            'testsuites', 'tests', testpath)
382    testsuite = class_testplan.testsuites.get(testpath)
383    plan.platforms = platforms_list
384    plan.platform_names = [p.name for p in platforms_list]
385    plan.testsuites = {testpath: testsuite}
386
387    for _, testcase in plan.testsuites.items():
388        testcase.exclude_platform = []
389        testcase.required_snippets = required_snippets
390        testcase.build_on_all = True
391
392    plan.apply_filters()
393
394    filtered_instances = list(
395        filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values())
396    )
397    if expected_filtered_len is not None:
398        assert len(filtered_instances) == expected_filtered_len
399    if expected_filtered_reason is not None:
400        for d in filtered_instances:
401            assert d.reason == expected_filtered_reason
402
403
404def test_testplan_get_level():
405    testplan = TestPlan(env=mock.Mock())
406    lvl1 = mock.Mock()
407    lvl1.name = 'a lvl'
408    lvl2 = mock.Mock()
409    lvl2.name = 'a lvl'
410    lvl3 = mock.Mock()
411    lvl3.name = 'other lvl'
412    testplan.levels.append(lvl1)
413    testplan.levels.append(lvl2)
414    testplan.levels.append(lvl3)
415
416    name = 'a lvl'
417
418    res = testplan.get_level(name)
419    assert res == lvl1
420
421    res = testplan.get_level(name)
422    assert res == lvl1
423
424    lvl_missed = mock.Mock()
425    lvl_missed.name = 'missed lvl'
426    res = testplan.get_level('missed_lvl')
427    assert res is None
428
429    testplan.levels.remove(lvl1)
430    testplan.levels.remove(lvl2)
431
432    res = testplan.get_level(name)
433    assert res is None
434
435
436TESTDATA_1 = [
437    ('', {}),
438    (
439"""\
440levels:
441  - name: lvl1
442    adds:
443      - sc1
444      - sc2
445    inherits: []
446  - name: lvl2
447    adds:
448      - sc1-1
449      - sc1-2
450    inherits: [lvl1]
451""",
452    {
453        'lvl1': ['sc1', 'sc2'],
454        'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2']
455    }
456    ),
457]
458
459@pytest.mark.parametrize(
460    'config_yaml, expected_scenarios',
461    TESTDATA_1,
462    ids=['no config', 'valid config']
463)
464def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios):
465    testplan = TestPlan(env=mock.Mock())
466    testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2']
467
468    tmp_config_file = tmp_path / 'config_file.yaml'
469    if config_yaml:
470        tmp_config_file.write_text(config_yaml)
471
472    with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext():
473        testplan.parse_configuration(tmp_config_file)
474
475    if not testplan.levels:
476        assert expected_scenarios == {}
477    for level in testplan.levels:
478        assert sorted(level.scenarios) == sorted(expected_scenarios[level.name])
479
480
481TESTDATA_2 = [
482    ([], [], False),
483    (['ts1.tc3'], [], True),
484    (['ts2.tc2'], ['- ts2'], False),
485]
486
487@pytest.mark.parametrize(
488    'sub_tests, expected_outs, expect_error',
489    TESTDATA_2,
490    ids=['no subtests', 'subtests not found', 'valid subtests']
491)
492def test_testplan_find_subtests(
493    capfd,
494    sub_tests,
495    expected_outs,
496    expect_error
497):
498    testplan = TestPlan(env=mock.Mock())
499    testplan.options = mock.Mock(sub_test=sub_tests)
500    testplan.run_individual_testsuite = []
501    testplan.testsuites = {
502        'ts1': mock.Mock(
503            testcases=[
504                mock.Mock(),
505                mock.Mock(),
506            ]
507        ),
508        'ts2': mock.Mock(
509            testcases=[
510                mock.Mock(),
511                mock.Mock(),
512                mock.Mock(),
513            ]
514        )
515    }
516    testplan.testsuites['ts1'].name = 'ts1'
517    testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1'
518    testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2'
519    testplan.testsuites['ts2'].name = 'ts2'
520    testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1'
521    testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2'
522    testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3'
523
524    with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext():
525        testplan.find_subtests()
526
527    out, err = capfd.readouterr()
528    sys.stdout.write(out)
529    sys.stdout.write(err)
530
531    assert all([printout in out for printout in expected_outs])
532
533
534TESTDATA_3 = [
535    (0, 0, [], False, [], TwisterRuntimeError, []),
536    (1, 1, [], False, [], TwisterRuntimeError, []),
537    (1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']),
538#    (1, 0, ['qfile.yaml'], False, ['# empty'], None, ['Quarantine file qfile.yaml is empty']),
539    (1, 0, ['qfile.yaml'], False, ['- platforms:\n  - demo_board_3\n  comment: "board_3"'], None, []),
540]
541
542@pytest.mark.parametrize(
543    'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs',
544    TESTDATA_3,
545    ids=['no tests', 'load errors', 'quarantine verify without quarantine list',
546#         'empty quarantine file',
547         'valid quarantine file']
548)
549def test_testplan_discover(
550    tmp_path,
551    caplog,
552    added_testsuite_count,
553    load_errors,
554    ql,
555    qv,
556    ql_data,
557    exception,
558    expected_logs
559):
560    for qf, data in zip(ql, ql_data):
561        tmp_qf = tmp_path / qf
562        tmp_qf.write_text(data)
563
564    testplan = TestPlan(env=mock.Mock())
565    testplan.options = mock.Mock(
566        test='ts1',
567        quarantine_list=[tmp_path / qf for qf in ql],
568        quarantine_verify=qv,
569    )
570    testplan.testsuites = {
571        'ts1': mock.Mock(id=1),
572        'ts2': mock.Mock(id=2),
573    }
574    testplan.run_individual_testsuite = 'ts0'
575    testplan.load_errors = load_errors
576    testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count)
577    testplan.find_subtests = mock.Mock()
578    testplan.report_duplicates = mock.Mock()
579    testplan.parse_configuration = mock.Mock()
580    testplan.add_configurations = mock.Mock()
581
582    with pytest.raises(exception) if exception else nullcontext():
583        testplan.discover()
584
585    testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1')
586    assert all([log in caplog.text for log in expected_logs])
587
588
589TESTDATA_4 = [
590    (None, None, None, None, '00',
591     TwisterRuntimeError, [], []),
592    (None, True, None, None, '6/4',
593     TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []),
594    (None, None, 'load_tests.json', None, '0/4',
595     TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []),
596    ('suffix', None, None, True, '2/4',
597     None, set(['ts-p4', 'ts-p2', 'ts-p1', 'ts-p3']), [2, 4]),
598]
599
600@pytest.mark.parametrize(
601    'report_suffix, only_failed, load_tests, test_only, subset,' \
602    ' exception, expected_selected_platforms, expected_generate_subset_args',
603    TESTDATA_4,
604    ids=['apply_filters only', 'only failed', 'load tests', 'test only']
605)
606def test_testplan_load(
607    tmp_path,
608    report_suffix,
609    only_failed,
610    load_tests,
611    test_only,
612    subset,
613    exception,
614    expected_selected_platforms,
615    expected_generate_subset_args
616):
617    twister_json = """\
618{
619    "testsuites": [
620        {
621            "name": "ts1",
622            "platform": "t-p1",
623            "toolchain": "zephyr",
624            "testcases": []
625        },
626        {
627            "name": "ts1",
628            "platform": "t-p2",
629            "toolchain": "zephyr",
630            "testcases": []
631        },
632        {
633            "name": "ts2",
634            "platform": "t-p3",
635            "toolchain": "zephyr",
636            "testcases": []
637        },
638        {
639            "name": "ts2",
640            "platform": "t-p4",
641            "toolchain": "zephyr",
642            "testcases": []
643        }
644    ]
645}
646"""
647    twister_file = tmp_path / 'twister.json'
648    twister_file.write_text(twister_json)
649
650    twister_suffix_json = """\
651{
652    "testsuites": [
653        {
654            "name": "ts1",
655            "platform": "ts-p1",
656            "toolchain": "zephyr",
657            "testcases": []
658        },
659        {
660            "name": "ts1",
661            "platform": "ts-p2",
662            "toolchain": "zephyr",
663            "testcases": []
664        },
665        {
666            "name": "ts2",
667            "platform": "ts-p3",
668            "toolchain": "zephyr",
669            "testcases": []
670        },
671        {
672            "name": "ts2",
673            "platform": "ts-p4",
674            "toolchain": "zephyr",
675            "testcases": []
676        }
677    ]
678}
679"""
680    twister_suffix_file = tmp_path / 'twister_suffix.json'
681    twister_suffix_file.write_text(twister_suffix_json)
682
683    load_tests_json = """\
684{
685    "testsuites": [
686        {
687            "name": "ts1",
688            "platform": "lt-p1",
689            "toolchain": "zephyr",
690            "testcases": []
691        },
692        {
693            "name": "ts1",
694            "platform": "lt-p2",
695            "toolchain": "zephyr",
696            "testcases": []
697        },
698        {
699            "name": "ts2",
700            "platform": "lt-p3",
701            "toolchain": "zephyr",
702            \"testcases": []
703        },
704        {
705            "name": "ts2",
706            "platform": "lt-p4",
707            "toolchain": "zephyr",
708            "testcases": []
709        }
710    ]
711}
712"""
713    load_tests_file = tmp_path / 'load_tests.json'
714    load_tests_file.write_text(load_tests_json)
715
716    testplan = TestPlan(env=mock.Mock(outdir=tmp_path))
717    testplan.testsuites = {
718        'ts1': mock.Mock(testcases=[], extra_configs=[]),
719        'ts2': mock.Mock(testcases=[], extra_configs=[]),
720    }
721    testplan.testsuites['ts1'].name = 'ts1'
722    testplan.testsuites['ts2'].name = 'ts2'
723    testplan.options = mock.Mock(
724        report_summary=None,
725        outdir=tmp_path,
726        report_suffix=report_suffix,
727        only_failed=only_failed,
728        load_tests=tmp_path / load_tests if load_tests else None,
729        test_only=test_only,
730        exclude_platform=['t-p0', 't-p1',
731                          'ts-p0', 'ts-p1',
732                          'lt-p0', 'lt-p1'],
733        platform=['t-p1', 't-p2', 't-p3', 't-p4',
734                  'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4',
735                  'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'],
736        subset=subset
737    )
738    testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
739                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
740                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
741    testplan.platforms[0].name = 't-p1'
742    testplan.platforms[1].name = 't-p2'
743    testplan.platforms[2].name = 't-p3'
744    testplan.platforms[3].name = 't-p4'
745    testplan.platforms[4].name = 'ts-p1'
746    testplan.platforms[5].name = 'ts-p2'
747    testplan.platforms[6].name = 'ts-p3'
748    testplan.platforms[7].name = 'ts-p4'
749    testplan.platforms[8].name = 'lt-p1'
750    testplan.platforms[9].name = 'lt-p2'
751    testplan.platforms[10].name = 'lt-p3'
752    testplan.platforms[11].name = 'lt-p4'
753    testplan.platforms[0].aliases = ['t-p1']
754    testplan.platforms[1].aliases = ['t-p2']
755    testplan.platforms[2].aliases = ['t-p3']
756    testplan.platforms[3].aliases = ['t-p4']
757    testplan.platforms[4].aliases = ['ts-p1']
758    testplan.platforms[5].aliases = ['ts-p2']
759    testplan.platforms[6].aliases = ['ts-p3']
760    testplan.platforms[7].aliases = ['ts-p4']
761    testplan.platforms[8].aliases = ['lt-p1']
762    testplan.platforms[9].aliases = ['lt-p2']
763    testplan.platforms[10].aliases = ['lt-p3']
764    testplan.platforms[11].aliases = ['lt-p4']
765    testplan.platforms[0].normalized_name = 't-p1'
766    testplan.platforms[1].normalized_name = 't-p2'
767    testplan.platforms[2].normalized_name = 't-p3'
768    testplan.platforms[3].normalized_name = 't-p4'
769    testplan.platforms[4].normalized_name = 'ts-p1'
770    testplan.platforms[5].normalized_name = 'ts-p2'
771    testplan.platforms[6].normalized_name = 'ts-p3'
772    testplan.platforms[7].normalized_name = 'ts-p4'
773    testplan.platforms[8].normalized_name = 'lt-p1'
774    testplan.platforms[9].normalized_name = 'lt-p2'
775    testplan.platforms[10].normalized_name = 'lt-p3'
776    testplan.platforms[11].normalized_name = 'lt-p4'
777    testplan.generate_subset = mock.Mock()
778    testplan.apply_filters = mock.Mock()
779
780    with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
781         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', return_value=True), \
782         pytest.raises(exception) if exception else nullcontext():
783        testplan.load()
784
785    assert testplan.selected_platforms == expected_selected_platforms
786    if expected_generate_subset_args:
787        testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args)
788    else:
789        testplan.generate_subset.assert_not_called()
790
791
792TESTDATA_5 = [
793    (False, False, None, 1, 2,
794     ['plat1/testA', 'plat1/testB', 'plat1/testC',
795      'plat3/testA', 'plat3/testB', 'plat3/testC']),
796    (False, False, None, 1, 5,
797     ['plat1/testA',
798      'plat3/testA', 'plat3/testB', 'plat3/testC']),
799    (False, False, None, 2, 2,
800     ['plat2/testA', 'plat2/testB']),
801    (True, False, None, 1, 2,
802     ['plat1/testA', 'plat2/testA', 'plat1/testB',
803      'plat3/testA', 'plat3/testB', 'plat3/testC']),
804    (True, False, None, 2, 2,
805     ['plat2/testB', 'plat1/testC']),
806    (True, True, 123, 1, 2,
807     ['plat2/testA', 'plat2/testB', 'plat1/testC',
808      'plat3/testB', 'plat3/testA', 'plat3/testC']),
809    (True, True, 123, 2, 2,
810     ['plat1/testB', 'plat1/testA']),
811]
812
813@pytest.mark.parametrize(
814    'device_testing, shuffle, seed, subset, sets, expected_subset',
815    TESTDATA_5,
816    ids=['subset 1', 'subset 1 out of 5', 'subset 2',
817         'device testing, subset 1', 'device testing, subset 2',
818         'device testing, shuffle with seed, subset 1',
819         'device testing, shuffle with seed, subset 2']
820)
821def test_testplan_generate_subset(
822    device_testing,
823    shuffle,
824    seed,
825    subset,
826    sets,
827    expected_subset
828):
829    testplan = TestPlan(env=mock.Mock())
830    testplan.options = mock.Mock(
831        device_testing=device_testing,
832        shuffle_tests=shuffle,
833        shuffle_tests_seed=seed
834    )
835    testplan.instances = {
836        'plat1/testA': mock.Mock(status=TwisterStatus.NONE),
837        'plat1/testB': mock.Mock(status=TwisterStatus.NONE),
838        'plat1/testC': mock.Mock(status=TwisterStatus.NONE),
839        'plat2/testA': mock.Mock(status=TwisterStatus.NONE),
840        'plat2/testB': mock.Mock(status=TwisterStatus.NONE),
841        'plat3/testA': mock.Mock(status=TwisterStatus.SKIP),
842        'plat3/testB': mock.Mock(status=TwisterStatus.SKIP),
843        'plat3/testC': mock.Mock(status=TwisterStatus.ERROR),
844    }
845
846    testplan.generate_subset(subset, sets)
847
848    assert [instance for instance in testplan.instances.keys()] == \
849           expected_subset
850
851
852def test_testplan_handle_modules():
853    testplan = TestPlan(env=mock.Mock())
854
855    modules = [mock.Mock(meta={'name': 'name1'}),
856               mock.Mock(meta={'name': 'name2'})]
857
858    with mock.patch('twisterlib.testplan.parse_modules', return_value=modules):
859        testplan.handle_modules()
860
861    assert testplan.modules == ['name1', 'name2']
862
863
864TESTDATA_6 = [
865    (True, False, False, 0, 'report_test_tree'),
866    (True, True, False, 0, 'report_test_tree'),
867    (True, False, True, 0, 'report_test_tree'),
868    (True, True, True, 0, 'report_test_tree'),
869    (False, True, False, 0, 'report_test_list'),
870    (False, True, True, 0, 'report_test_list'),
871    (False, False, True, 0, 'report_tag_list'),
872    (False, False, False, 1, None),
873]
874
875@pytest.mark.parametrize(
876    'test_tree, list_tests, list_tags, expected_res, expected_method',
877    TESTDATA_6,
878    ids=['test tree', 'test tree + test list', 'test tree + tag list',
879         'test tree + test list + tag list', 'test list',
880         'test list + tag list', 'tag list', 'no report']
881)
882def test_testplan_report(
883    test_tree,
884    list_tests,
885    list_tags,
886    expected_res,
887    expected_method
888):
889    testplan = TestPlan(env=mock.Mock())
890    testplan.report_test_tree = mock.Mock()
891    testplan.report_test_list = mock.Mock()
892    testplan.report_tag_list = mock.Mock()
893
894    testplan.options = mock.Mock(
895        test_tree=test_tree,
896        list_tests=list_tests,
897        list_tags=list_tags,
898    )
899
900    res = testplan.report()
901
902    assert res == expected_res
903
904    methods = ['report_test_tree', 'report_test_list', 'report_tag_list']
905    if expected_method:
906        methods.remove(expected_method)
907        getattr(testplan, expected_method).assert_called_once()
908    for method in methods:
909        getattr(testplan, method).assert_not_called()
910
911
912TESTDATA_7 = [
913    (
914        [
915            mock.Mock(
916                yamlfile='a.yaml',
917                scenarios=['scenario1', 'scenario2']
918            ),
919            mock.Mock(
920                yamlfile='b.yaml',
921                scenarios=['scenario1']
922            )
923        ],
924        TwisterRuntimeError,
925        'Duplicated test scenarios found:\n' \
926        '- scenario1 found in:\n' \
927        '  - a.yaml\n' \
928        '  - b.yaml\n',
929        []
930    ),
931    (
932        [
933            mock.Mock(
934                yamlfile='a.yaml',
935                scenarios=['scenario.a.1', 'scenario.a.2']
936            ),
937            mock.Mock(
938                yamlfile='b.yaml',
939                scenarios=['scenario.b.1']
940            )
941        ],
942        None,
943        None,
944        ['No duplicates found.']
945    ),
946]
947
948@pytest.mark.parametrize(
949    'testsuites, expected_error, error_msg, expected_logs',
950    TESTDATA_7,
951    ids=['a duplicate', 'no duplicates']
952)
953def test_testplan_report_duplicates(
954    capfd,
955    caplog,
956    testsuites,
957    expected_error,
958    error_msg,
959    expected_logs
960):
961    def mock_get(name):
962        return list(filter(lambda x: name in x.scenarios, testsuites))
963
964    testplan = TestPlan(env=mock.Mock())
965    testplan.scenarios = [scenario for testsuite in testsuites \
966                                   for scenario in testsuite.scenarios]
967    testplan.get_testsuite = mock.Mock(side_effect=mock_get)
968
969    with pytest.raises(expected_error) if expected_error is not None else \
970         nullcontext() as err:
971        testplan.report_duplicates()
972
973    if expected_error:
974        assert str(err._excinfo[1]) == error_msg
975
976    assert all([log in caplog.text for log in expected_logs])
977
978
979def test_testplan_report_tag_list(capfd):
980    testplan = TestPlan(env=mock.Mock())
981    testplan.testsuites = {
982        'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])),
983        'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])),
984        'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])),
985        'testsuite3': mock.Mock(tags=set(['tag']))
986    }
987
988    testplan.report_tag_list()
989
990    out,err = capfd.readouterr()
991    sys.stdout.write(out)
992    sys.stderr.write(err)
993
994    assert '- tag' in out
995    assert '- tag1' in out
996    assert '- tag2' in out
997    assert '- tag3' in out
998
999
1000def test_testplan_report_test_tree(capfd):
1001    testplan = TestPlan(env=mock.Mock())
1002    testplan.get_tests_list = mock.Mock(
1003        return_value=['1.dummy.case.1', '1.dummy.case.2',
1004                      '2.dummy.case.1', '2.dummy.case.2',
1005                      '3.dummy.case.1', '3.dummy.case.2',
1006                      '4.dummy.case.1', '4.dummy.case.2',
1007                      '5.dummy.case.1', '5.dummy.case.2',
1008                      'sample.group1.case1', 'sample.group1.case2',
1009                      'sample.group2.case', 'sample.group3.case1',
1010                      'sample.group3.case2', 'sample.group3.case3']
1011    )
1012
1013    testplan.report_test_tree()
1014
1015    out,err = capfd.readouterr()
1016    sys.stdout.write(out)
1017    sys.stderr.write(err)
1018
1019    expected = """
1020Testsuite
1021├── Samples
1022│   ├── group1
1023│   │   ├── sample.group1.case1
1024│   │   └── sample.group1.case2
1025│   ├── group2
1026│   │   └── sample.group2.case
1027│   └── group3
1028│       ├── sample.group3.case1
1029│       ├── sample.group3.case2
1030│       └── sample.group3.case3
1031└── Tests
1032    ├── 1
1033    │   └── dummy
1034    │       ├── 1.dummy.case.1
1035    │       └── 1.dummy.case.2
1036    ├── 2
1037    │   └── dummy
1038    │       ├── 2.dummy.case.1
1039    │       └── 2.dummy.case.2
1040    ├── 3
1041    │   └── dummy
1042    │       ├── 3.dummy.case.1
1043    │       └── 3.dummy.case.2
1044    ├── 4
1045    │   └── dummy
1046    │       ├── 4.dummy.case.1
1047    │       └── 4.dummy.case.2
1048    └── 5
1049        └── dummy
1050            ├── 5.dummy.case.1
1051            └── 5.dummy.case.2
1052"""
1053    expected = expected[1:]
1054
1055    assert expected in out
1056
1057
1058def test_testplan_report_test_list(capfd):
1059    testplan = TestPlan(env=mock.Mock())
1060    testplan.get_tests_list = mock.Mock(
1061        return_value=['4.dummy.case.1', '4.dummy.case.2',
1062                      '3.dummy.case.2', '2.dummy.case.2',
1063                      '1.dummy.case.1', '1.dummy.case.2',
1064                      '3.dummy.case.1', '2.dummy.case.1',
1065                      '5.dummy.case.1', '5.dummy.case.2']
1066    )
1067
1068    testplan.report_test_list()
1069
1070    out,err = capfd.readouterr()
1071    sys.stdout.write(out)
1072    sys.stderr.write(err)
1073
1074    assert ' - 1.dummy.case.1\n' \
1075           ' - 1.dummy.case.2\n' \
1076           ' - 2.dummy.case.1\n' \
1077           ' - 2.dummy.case.2\n' \
1078           ' - 3.dummy.case.1\n' \
1079           ' - 3.dummy.case.2\n' \
1080           ' - 4.dummy.case.1\n' \
1081           ' - 4.dummy.case.2\n' \
1082           ' - 5.dummy.case.1\n' \
1083           ' - 5.dummy.case.2\n' \
1084           '10 total.' in out
1085
1086
1087def test_testplan_info(capfd):
1088    TestPlan.info('dummy text')
1089
1090    out, err = capfd.readouterr()
1091    sys.stdout.write(out)
1092    sys.stderr.write(err)
1093
1094    assert 'dummy text\n' in out
1095
1096
1097TESTDATA_8 = [
1098    (False, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p2/unit_testing', 'p3/unit_testing']),
1099    (True, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p3/unit_testing']),
1100]
1101
1102@pytest.mark.parametrize(
1103    'override_default_platforms, expected_platform_names, expected_defaults',
1104    TESTDATA_8,
1105    ids=['no override defaults', 'override defaults']
1106)
1107def test_testplan_add_configurations(
1108    tmp_path,
1109    override_default_platforms,
1110    expected_platform_names,
1111    expected_defaults
1112):
1113    env = mock.Mock(board_roots=[tmp_path / 'boards'], soc_roots=[tmp_path], arch_roots=[tmp_path])
1114
1115    testplan = TestPlan(env=env)
1116
1117    testplan.test_config = {
1118        'platforms': {
1119            'override_default_platforms': override_default_platforms,
1120            'default_platforms': ['p3', 'p1e1']
1121        }
1122    }
1123
1124    def mock_gen_plat(board_roots, soc_roots, arch_roots):
1125        assert [tmp_path] == board_roots
1126        assert [tmp_path] == soc_roots
1127        assert [tmp_path] == arch_roots
1128
1129        platforms = [
1130            mock.Mock(aliases=['p1e1/unit_testing', 'p1e1'], twister=False, default=False),
1131            mock.Mock(aliases=['p1e2/unit_testing', 'p1e2'], twister=True, default=False),
1132            mock.Mock(aliases=['p2/unit_testing', 'p2'], twister=True, default=True),
1133            mock.Mock(aliases=['p3/unit_testing', 'p3'], twister=True, default=True),
1134        ]
1135        for platform in platforms:
1136            type(platform).name = mock.PropertyMock(return_value=platform.aliases[0])
1137            yield platform
1138
1139    with mock.patch('twisterlib.testplan.generate_platforms', mock_gen_plat):
1140        testplan.add_configurations()
1141
1142    if expected_defaults is not None:
1143        print(expected_defaults)
1144        print(testplan.default_platforms)
1145        assert sorted(expected_defaults) == sorted(testplan.default_platforms)
1146    if expected_platform_names is not None:
1147        print(expected_platform_names)
1148        print(testplan.platform_names)
1149        platform_names = [p.name for p in testplan.platforms]
1150        assert sorted(expected_platform_names) == sorted(platform_names)
1151
1152
1153def test_testplan_get_all_tests():
1154    testplan = TestPlan(env=mock.Mock())
1155    tc1 = mock.Mock()
1156    tc1.name = 'tc1'
1157    tc2 = mock.Mock()
1158    tc2.name = 'tc2'
1159    tc3 = mock.Mock()
1160    tc3.name = 'tc3'
1161    tc4 = mock.Mock()
1162    tc4.name = 'tc4'
1163    tc5 = mock.Mock()
1164    tc5.name = 'tc5'
1165    ts1 = mock.Mock(testcases=[tc1, tc2])
1166    ts2 = mock.Mock(testcases=[tc3, tc4, tc5])
1167    testplan.testsuites = {
1168        'ts1': ts1,
1169        'ts2': ts2
1170    }
1171
1172    res = testplan.get_all_tests()
1173
1174    assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5']
1175
1176
1177TESTDATA_9 = [
1178    ([], False, True, 11, 1),
1179    ([], False, False, 7, 2),
1180    ([], True, False, 9, 1),
1181    ([], True, True, 9, 1),
1182    ([], True, False, 9, 1),
1183    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, True, 3, 1),
1184    (['good_test/dummy.common.1', 'good_test/dummy.common.2',
1185      'duplicate_test/dummy.common.1', 'duplicate_test/dummy.common.2'], False, True, 4, 1),
1186    (['dummy.common.1', 'dummy.common.2'], False, False, 2, 1),
1187    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, True, 0, 1),
1188]
1189
1190@pytest.mark.parametrize(
1191    'testsuite_filter, use_alt_root, detailed_id, expected_suite_count, expected_errors',
1192    TESTDATA_9,
1193    ids=[
1194        'no testsuite filter, detailed id',
1195        'no testsuite filter, short id',
1196        'no testsuite filter, alt root, detailed id',
1197        'no filter, alt root, detailed id',
1198        'no filter, alt root, short id',
1199        'testsuite filter',
1200        'testsuite filter and valid duplicate',
1201        'testsuite filter, short id and duplicate',
1202        'testsuite filter, alt root',
1203    ]
1204)
1205def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, detailed_id,
1206                                 expected_errors, expected_suite_count):
1207    # tmp_path
1208    # ├ tests  <- test root
1209    # │ ├ good_test
1210    # │ │ └ testcase.yaml
1211    # │ ├ wrong_test
1212    # │ │ └ testcase.yaml
1213    # │ ├ good_sample
1214    # │ │ └ sample.yaml
1215    # │ ├ duplicate_test
1216    # │ │ └ testcase.yaml
1217    # │ └ others
1218    # │   └ other.txt
1219    # └ other_tests  <- alternate test root
1220    #   └ good_test
1221    #     └ testcase.yaml
1222    tmp_test_root_dir = tmp_path / 'tests'
1223    tmp_test_root_dir.mkdir()
1224
1225    tmp_good_test_dir = tmp_test_root_dir / 'good_test'
1226    tmp_good_test_dir.mkdir()
1227    testcase_yaml_1 = """\
1228tests:
1229  dummy.common.1:
1230    build_on_all: true
1231  dummy.common.2:
1232    build_on_all: true
1233  dummy.common.3:
1234    build_on_all: true
1235  dummy.special:
1236    build_on_all: false
1237"""
1238    testfile_1 = tmp_good_test_dir / 'testcase.yaml'
1239    testfile_1.write_text(testcase_yaml_1)
1240
1241    tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test'
1242    tmp_bad_test_dir.mkdir()
1243    testcase_yaml_2 = """\
1244tests:
1245 wrong:
1246  yaml: {]}
1247"""
1248    testfile_2 = tmp_bad_test_dir / 'testcase.yaml'
1249    testfile_2.write_text(testcase_yaml_2)
1250
1251    tmp_good_sample_dir = tmp_test_root_dir / 'good_sample'
1252    tmp_good_sample_dir.mkdir()
1253    samplecase_yaml_1 = """\
1254tests:
1255  sample.dummy.common.1:
1256    tags:
1257    - samples
1258  sample.dummy.common.2:
1259    tags:
1260    - samples
1261  sample.dummy.special.1:
1262    tags:
1263    - samples
1264"""
1265    samplefile_1 = tmp_good_sample_dir / 'sample.yaml'
1266    samplefile_1.write_text(samplecase_yaml_1)
1267
1268    tmp_duplicate_test_dir = tmp_test_root_dir / 'duplicate_test'
1269    tmp_duplicate_test_dir.mkdir()
1270    # The duplicate needs to have the same number of tests as these configurations
1271    # can be read either with duplicate_test first, or good_test first, so number
1272    # of selected tests needs to be the same in both situations.
1273    testcase_yaml_4 = """\
1274tests:
1275  dummy.common.1:
1276    build_on_all: true
1277  dummy.common.2:
1278    build_on_all: true
1279  dummy.common.3:
1280    build_on_all: true
1281  dummy.special:
1282    build_on_all: false
1283"""
1284    testfile_4 = tmp_duplicate_test_dir / 'testcase.yaml'
1285    testfile_4.write_text(testcase_yaml_4)
1286
1287    tmp_other_dir = tmp_test_root_dir / 'others'
1288    tmp_other_dir.mkdir()
1289    _ = tmp_other_dir / 'other.txt'
1290
1291    tmp_alt_test_root_dir = tmp_path / 'other_tests'
1292    tmp_alt_test_root_dir.mkdir()
1293
1294    tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test'
1295    tmp_alt_good_test_dir.mkdir()
1296    testcase_yaml_3 = """\
1297tests:
1298  dummy.alt.1:
1299    build_on_all: true
1300  dummy.alt.2:
1301    build_on_all: true
1302"""
1303    testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml'
1304    testfile_3.write_text(testcase_yaml_3)
1305
1306    env = mock.Mock(
1307        test_roots=[tmp_test_root_dir],
1308        options=mock.Mock(detailed_test_id=detailed_id),
1309        alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else []
1310    )
1311
1312    testplan = TestPlan(env=env)
1313
1314    res = testplan.add_testsuites(testsuite_filter)
1315
1316    assert res == expected_suite_count
1317    assert testplan.load_errors == expected_errors
1318
1319
1320def test_testplan_str():
1321    testplan = TestPlan(env=mock.Mock())
1322    testplan.name = 'my name'
1323
1324    res = testplan.__str__()
1325
1326    assert res == 'my name'
1327
1328
1329TESTDATA_10 = [
1330    ('a platform', True),
1331    ('other platform', False),
1332]
1333
1334@pytest.mark.parametrize(
1335    'name, expect_found',
1336    TESTDATA_10,
1337    ids=['platform exists', 'no platform']
1338)
1339def test_testplan_get_platform(name, expect_found):
1340    testplan = TestPlan(env=mock.Mock())
1341    p1 = mock.Mock()
1342    p1.name = 'some platform'
1343    p1.aliases = [p1.name]
1344    p2 = mock.Mock()
1345    p2.name = 'a platform'
1346    p2.aliases = [p2.name]
1347    testplan.platforms = [p1, p2]
1348
1349    res = testplan.get_platform(name)
1350
1351    if expect_found:
1352        assert res.name == name
1353    else:
1354        assert res is None
1355
1356
1357TESTDATA_11 = [
1358    (True, 'runnable'),
1359    (False, 'buildable'),
1360]
1361
1362@pytest.mark.parametrize(
1363    'device_testing, expected_tfilter',
1364    TESTDATA_11,
1365    ids=['device testing', 'no device testing']
1366)
1367def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
1368    def get_platform(name):
1369        p = mock.Mock()
1370        p.name = name
1371        p.normalized_name = name
1372        return p
1373
1374    ts1tc1 = mock.Mock()
1375    ts1tc1.name = 'TS1.tc1'
1376    ts1 = mock.Mock(testcases=[ts1tc1])
1377    ts1.name = 'TestSuite 1'
1378    ts1.toolchain = 'zephyr'
1379    ts2 = mock.Mock(testcases=[])
1380    ts2.name = 'TestSuite 2'
1381    ts2.toolchain = 'zephyr'
1382    ts3tc1 = mock.Mock()
1383    ts3tc1.name = 'TS3.tc1'
1384    ts3tc2 = mock.Mock()
1385    ts3tc2.name = 'TS3.tc2'
1386    ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2])
1387    ts3.name = 'TestSuite 3'
1388    ts3.toolchain = 'zephyr'
1389    ts4tc1 = mock.Mock()
1390    ts4tc1.name = 'TS4.tc1'
1391    ts4 = mock.Mock(testcases=[ts4tc1])
1392    ts4.name = 'TestSuite 4'
1393    ts4.toolchain = 'zephyr'
1394    ts5 = mock.Mock(testcases=[])
1395    ts5.name = 'TestSuite 5'
1396    ts5.toolchain = 'zephyr'
1397
1398    testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir')))
1399    testplan.options = mock.Mock(device_testing=device_testing, test_only=True, report_summary=None)
1400    testplan.testsuites = {
1401        'TestSuite 1': ts1,
1402        'TestSuite 2': ts2,
1403        'TestSuite 3': ts3,
1404        'TestSuite 4': ts4,
1405        'TestSuite 5': ts5
1406    }
1407
1408    testplan.get_platform = mock.Mock(side_effect=get_platform)
1409
1410    testplan_data = """\
1411{
1412    "testsuites": [
1413        {
1414            "name": "TestSuite 1",
1415            "platform": "Platform 1",
1416            "run_id": 1,
1417            "execution_time": 60.00,
1418            "used_ram": 4096,
1419            "available_ram": 12278,
1420            "used_rom": 1024,
1421            "available_rom": 1047552,
1422            "status": "passed",
1423            "toolchain": "zephyr",
1424            "reason": "OK",
1425            "testcases": [
1426                {
1427                    "identifier": "TS1.tc1",
1428                    "status": "passed",
1429                    "reason": "passed",
1430                    "execution_time": 60.00,
1431                    "log": ""
1432                }
1433            ]
1434        },
1435        {
1436            "name": "TestSuite 2",
1437            "platform": "Platform 1",
1438            "toolchain": "zephyr"
1439        },
1440        {
1441            "name": "TestSuite 3",
1442            "platform": "Platform 1",
1443            "run_id": 1,
1444            "execution_time": 360.00,
1445            "used_ram": 4096,
1446            "available_ram": 12278,
1447            "used_rom": 1024,
1448            "available_rom": 1047552,
1449            "status": "error",
1450            "toolchain": "zephyr",
1451            "reason": "File Not Found Error",
1452            "testcases": [
1453                {
1454                    "identifier": "TS3.tc1",
1455                    "status": "error",
1456                    "reason": "File Not Found Error.",
1457                    "execution_time": 360.00,
1458                    "log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..."
1459                },
1460                {
1461                    "identifier": "TS3.tc2"
1462                }
1463            ]
1464        },
1465        {
1466            "name": "TestSuite 4",
1467            "platform": "Platform 1",
1468            "execution_time": 360.00,
1469            "used_ram": 4096,
1470            "available_ram": 12278,
1471            "used_rom": 1024,
1472            "available_rom": 1047552,
1473            "status": "skipped",
1474            "toolchain": "zephyr",
1475            "reason": "Not in requested test list.",
1476            "testcases": [
1477                {
1478                    "identifier": "TS4.tc1",
1479                    "status": "skipped",
1480                    "reason": "Not in requested test list.",
1481                    "execution_time": 360.00,
1482                    "log": "[INFO] Parsing..."
1483                },
1484                {
1485                    "identifier": "TS3.tc2"
1486                }
1487            ]
1488        },
1489        {
1490            "name": "TestSuite 5",
1491            "platform": "Platform 2",
1492            "toolchain": "zephyr"
1493        }
1494    ]
1495}
1496"""
1497
1498    filter_platform = ['Platform 1']
1499
1500    check_runnable_mock = mock.Mock(return_value=True)
1501
1502    with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \
1503         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \
1504         mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()):
1505        testplan.load_from_file('dummy.yaml', filter_platform)
1506
1507    expected_instances = {
1508        'Platform 1/zephyr/TestSuite 1': {
1509            'metrics': {
1510                'handler_time': 60.0,
1511                'used_ram': 4096,
1512                'used_rom': 1024,
1513                'available_ram': 12278,
1514                'available_rom': 1047552
1515            },
1516            'retries': 0,
1517            'toolchain': 'zephyr',
1518            'testcases': {
1519                'TS1.tc1': {
1520                    'status': TwisterStatus.PASS,
1521                    'reason': 'passed',
1522                    'duration': 60.0,
1523                    'output': ''
1524                }
1525            }
1526        },
1527        'Platform 1/zephyr/TestSuite 2': {
1528            'metrics': {
1529                'handler_time': 0,
1530                'used_ram': 0,
1531                'used_rom': 0,
1532                'available_ram': 0,
1533                'available_rom': 0
1534            },
1535            'retries': 0,
1536            'toolchain': 'zephyr',
1537            'testcases': []
1538        },
1539        'Platform 1/zephyr/TestSuite 3': {
1540            'metrics': {
1541                'handler_time': 360.0,
1542                'used_ram': 4096,
1543                'used_rom': 1024,
1544                'available_ram': 12278,
1545                'available_rom': 1047552
1546            },
1547            'retries': 1,
1548            'toolchain': 'zephyr',
1549            'testcases': {
1550                    'TS3.tc1': {
1551                        'status': TwisterStatus.ERROR,
1552                        'reason': None,
1553                        'duration': 360.0,
1554                        'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...'
1555                    },
1556                    'TS3.tc2': {
1557                        'status': TwisterStatus.NONE,
1558                        'reason': None,
1559                        'duration': 0,
1560                        'output': ''
1561                    }
1562            }
1563        },
1564        'Platform 1/zephyr/TestSuite 4': {
1565            'metrics': {
1566                'handler_time': 360.0,
1567                'used_ram': 4096,
1568                'used_rom': 1024,
1569                'available_ram': 12278,
1570                'available_rom': 1047552
1571            },
1572            'retries': 0,
1573            'toolchain': 'zephyr',
1574            'testcases': {
1575                'TS4.tc1': {
1576                    'status': TwisterStatus.SKIP,
1577                    'reason': 'Not in requested test list.',
1578                    'duration': 360.0,
1579                    'output': '[INFO] Parsing...'
1580                }
1581            }
1582        },
1583    }
1584
1585    for n, i in testplan.instances.items():
1586        assert expected_instances[n]['metrics'] == i.metrics
1587        assert expected_instances[n]['retries'] == i.retries
1588        for t in i.testcases:
1589            assert expected_instances[n]['testcases'][str(t)]['status'] == t.status
1590            assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason
1591            assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration
1592            assert expected_instances[n]['testcases'][str(t)]['output'] == t.output
1593
1594    check_runnable_mock.assert_called_with(mock.ANY, mock.ANY)
1595
1596    expected_logs = [
1597        'loading TestSuite 1...',
1598        'loading TestSuite 2...',
1599        'loading TestSuite 3...',
1600        'loading TestSuite 4...',
1601    ]
1602    assert all([log in caplog.text for log in expected_logs])
1603
1604
1605def test_testplan_add_instances():
1606    testplan = TestPlan(env=mock.Mock())
1607    instance1 = mock.Mock()
1608    instance1.name = 'instance 1'
1609    instance2 = mock.Mock()
1610    instance2.name = 'instance 2'
1611    instance_list = [instance1, instance2]
1612
1613    testplan.add_instances(instance_list)
1614
1615    assert testplan.instances == {
1616        'instance 1': instance1,
1617        'instance 2': instance2,
1618    }
1619
1620
1621def test_testplan_get_testcase():
1622    testplan = TestPlan(env=mock.Mock())
1623    testplan.testsuites = {
1624        'test1.suite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1625        'test1.suite1': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1626        'test1.suite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1627        'test1.suite3': mock.Mock(testcases=[])
1628    }
1629
1630    testplan.testsuites['test1.suite0'].testcases[0].name = 'test1.suite0.case0'
1631    testplan.testsuites['test1.suite0'].testcases[1].name = 'test1.suite0.case1'
1632    #
1633    testplan.testsuites['test1.suite1'].testcases[0].name = 'test1.suite1.case0'
1634    testplan.testsuites['test1.suite1'].testcases[1].name = 'test1.suite1.case0'  # in suite duplicate
1635    #
1636    testplan.testsuites['test1.suite2'].testcases[0].name = 'test1.suite2.case0'
1637    testplan.testsuites['test1.suite2'].testcases[1].name = 'test1.suite1.case0'  # out suite duplicate
1638
1639    id = 'test1.suite1.case0'
1640
1641    res = testplan.get_testcase(id)
1642
1643    assert len(res) == 3
1644    assert testplan.testsuites['test1.suite1'] in res
1645    assert testplan.testsuites['test1.suite2'] in res
1646
1647
1648def test_testplan_verify_platforms_existence(caplog):
1649    testplan = TestPlan(env=mock.Mock())
1650    testplan.platform_names = ['a platform', 'other platform']
1651
1652    platform_names = ['other platform', 'some platform']
1653    log_info = 'PLATFORM ERROR'
1654
1655    with pytest.raises(SystemExit) as se:
1656        testplan.verify_platforms_existence(platform_names, log_info)
1657
1658    assert str(se.value) == '2'
1659    assert 'PLATFORM ERROR - unrecognized platform - some platform'
1660
1661
1662TESTDATA_12 = [
1663    (True),
1664    (False)
1665]
1666
1667@pytest.mark.parametrize(
1668    'exists',
1669    TESTDATA_12,
1670    ids=['links dir exists', 'links dir does not exist']
1671)
1672def test_testplan_create_build_dir_links(exists):
1673    outdir = os.path.join('out', 'dir')
1674    instances_linked = []
1675
1676    def mock_link(links_dir_path, instance):
1677        assert links_dir_path == os.path.join(outdir, 'twister_links')
1678        instances_linked.append(instance)
1679
1680    instances = {
1681        'inst0': mock.Mock(status=TwisterStatus.PASS),
1682        'inst1': mock.Mock(status=TwisterStatus.SKIP),
1683        'inst2': mock.Mock(status=TwisterStatus.ERROR),
1684    }
1685    expected_instances = [instances['inst0'], instances['inst2']]
1686
1687    testplan = TestPlan(env=mock.Mock(outdir=outdir))
1688    testplan._create_build_dir_link = mock.Mock(side_effect=mock_link)
1689    testplan.instances = instances
1690
1691    with mock.patch('os.path.exists', return_value=exists), \
1692         mock.patch('os.mkdir', mock.Mock()) as mkdir_mock:
1693        testplan.create_build_dir_links()
1694
1695    if not exists:
1696        mkdir_mock.assert_called_once()
1697
1698    assert expected_instances == instances_linked
1699
1700
1701TESTDATA_13 = [
1702    ('nt'),
1703    ('Linux')
1704]
1705
1706@pytest.mark.parametrize(
1707    'os_name',
1708    TESTDATA_13,
1709)
1710def test_testplan_create_build_dir_link(os_name):
1711    def mock_makedirs(path, exist_ok=False):
1712        assert exist_ok
1713        assert path == instance_build_dir
1714
1715    def mock_symlink(source, target):
1716        assert source == instance_build_dir
1717        assert target == os.path.join('links', 'path', 'test_0')
1718
1719    def mock_call(cmd, shell=False):
1720        assert shell
1721        assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'),
1722                       instance_build_dir]
1723
1724    def mock_join(*paths):
1725        slash = "\\" if os.name == 'nt' else "/"
1726        return slash.join(paths)
1727
1728    with mock.patch('os.name', os_name), \
1729         mock.patch('os.symlink', side_effect=mock_symlink), \
1730         mock.patch('os.makedirs', side_effect=mock_makedirs), \
1731         mock.patch('subprocess.call', side_effect=mock_call), \
1732         mock.patch('os.path.join', side_effect=mock_join):
1733
1734        testplan = TestPlan(env=mock.Mock())
1735        links_dir_path = os.path.join('links', 'path')
1736        instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir')
1737        instance = mock.Mock(build_dir=instance_build_dir)
1738        testplan._create_build_dir_link(links_dir_path, instance)
1739
1740        assert instance.build_dir == os.path.join('links', 'path', 'test_0')
1741        assert testplan.link_dir_counter == 1
1742
1743
1744TESTDATA_14 = [
1745    ('bad platform', 'dummy reason', [],
1746     'dummy status', 'dummy reason'),
1747    ('good platform', 'quarantined', [],
1748     TwisterStatus.ERROR, 'quarantined but is one of the integration platforms'),
1749    ('good platform', 'dummy reason', [{'type': 'command line filter'}],
1750     'dummy status', 'dummy reason'),
1751    ('good platform', 'dummy reason', [{'type': 'Skip filter'}],
1752     'dummy status', 'dummy reason'),
1753    ('good platform', 'dummy reason', [{'type': 'platform key filter'}],
1754     'dummy status', 'dummy reason'),
1755    ('good platform', 'dummy reason', [{'type': 'Toolchain filter'}],
1756     'dummy status', 'dummy reason'),
1757    ('good platform', 'dummy reason', [{'type': 'Module filter'}],
1758     'dummy status', 'dummy reason'),
1759    ('good platform', 'dummy reason', [{'type': 'testsuite filter'}],
1760     TwisterStatus.ERROR, 'dummy reason but is one of the integration platforms'),
1761]
1762
1763@pytest.mark.parametrize(
1764    'platform_name, reason, filters,' \
1765    ' expected_status, expected_reason',
1766    TESTDATA_14,
1767    ids=['wrong platform', 'quarantined', 'command line filtered',
1768         'skip filtered', 'platform key filtered', 'toolchain filtered',
1769         'module filtered', 'skip to error change']
1770)
1771def test_change_skip_to_error_if_integration(
1772    platform_name,
1773    reason,
1774    filters,
1775    expected_status,
1776    expected_reason
1777):
1778    options = mock.Mock()
1779    platform = mock.Mock()
1780    platform.name = platform_name
1781    testsuite = mock.Mock(integration_platforms=['good platform', 'a platform'])
1782    instance = mock.Mock(
1783        testsuite=testsuite,
1784        platform=platform,
1785        filters=filters,
1786        status='dummy status',
1787        reason=reason
1788    )
1789
1790    change_skip_to_error_if_integration(options, instance)
1791
1792    assert instance.status == expected_status
1793    assert instance.reason == expected_reason
1794