1#!/usr/bin/env python3
2# Copyright (c) 2020 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5
6'''
7This test file contains testsuites for testsuite.py module of twister
8'''
9import sys
10import os
11import mock
12import pytest
13
14from contextlib import nullcontext
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from twisterlib.testplan import TestPlan, change_skip_to_error_if_integration
20from twisterlib.testinstance import TestInstance
21from twisterlib.testsuite import TestSuite
22from twisterlib.platform import Platform
23from twisterlib.quarantine import Quarantine
24from twisterlib.error import TwisterRuntimeError
25
26
27def test_testplan_add_testsuites_short(class_testplan):
28    """ Testing add_testcase function of Testsuite class in twister """
29    # Test 1: Check the list of testsuites after calling add testsuites function is as expected
30    class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
31    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
32    class_testplan.add_testsuites()
33
34    tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/'
35    expected_testsuites = ['test_b.check_1',
36                          'test_b.check_2',
37                          'test_c.check_1',
38                          'test_c.check_2',
39                          'test_a.check_1',
40                          'test_a.check_2',
41                          'test_d.check_1',
42                          'test_e.check_1',
43                          'sample_test.app',
44                          'test_config.main']
45    testsuite_list = []
46    for key in sorted(class_testplan.testsuites.keys()):
47        testsuite_list.append(os.path.basename(os.path.normpath(key)))
48    assert sorted(testsuite_list) == sorted(expected_testsuites)
49
50    # Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects
51    suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1')
52    assert suite.name == tests_rel_dir + 'test_a/test_a.check_1'
53    assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
54
55@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
56def test_add_configurations_short(test_data, class_env, board_root_dir):
57    """ Testing add_configurations function of TestPlan class in Twister
58    Test : Asserting on default platforms list
59    """
60    class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
61    plan = TestPlan(class_env)
62    plan.parse_configuration(config_file=class_env.test_config)
63    if board_root_dir == "board_config":
64        plan.add_configurations()
65        assert sorted(plan.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
66    elif board_root_dir == "board_config_file_not_exist":
67        plan.add_configurations()
68        assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
69
70
71def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
72    """ Testing get_all_testsuites function of TestPlan class in Twister """
73    plan = class_testplan
74    plan.testsuites = all_testsuites_dict
75    expected_tests = ['sample_test.app', 'test_a.check_1.1a',
76                      'test_a.check_1.1c',
77                      'test_a.check_1.2a', 'test_a.check_1.2b',
78                      'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a',
79                      'test_a.check_1.unit_1b', 'test_a.check_2.1a',
80                      'test_a.check_2.1c', 'test_a.check_2.2a',
81                      'test_a.check_2.2b', 'test_a.check_2.Unit_1c',
82                      'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b',
83                      'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
84                      'test_c.check_2', 'test_d.check_1.unit_1a',
85                      'test_d.check_1.unit_1b',
86                      'test_e.check_1.1a', 'test_e.check_1.1b',
87                      'test_config.main']
88
89    assert sorted(plan.get_all_tests()) == sorted(expected_tests)
90
91def test_get_platforms_short(class_testplan, platforms_list):
92    """ Testing get_platforms function of TestPlan class in Twister """
93    plan = class_testplan
94    plan.platforms = platforms_list
95    platform = plan.get_platform("demo_board_1")
96    assert isinstance(platform, Platform)
97    assert platform.name == "demo_board_1"
98
99TESTDATA_PART1 = [
100    ("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"),
101    ("platform_allow", ['demo_board_1'], None, None, "Not in testsuite platform allow list"),
102    ("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"),
103    ("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"),
104    ("arch_exclude", ['x86'], None, None, "In test case arch exclude"),
105    ("arch_allow", ['arm'], None, None, "Not in test case arch allow list"),
106    ("skip", True, None, None, "Skip filter"),
107    ("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"),
108    ("min_flash", "2024", "flash", "1024", "Not enough FLASH"),
109    ("min_ram", "500", "ram", "256", "Not enough RAM"),
110    ("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"),
111    ("build_on_all", True, None, None, "Platform is excluded on command line."),
112    ("build_on_all", True, "level", "foobar", "Unknown test level 'foobar'"),
113    (None, None, "supported_toolchains", ['gcc', 'xcc', 'xt-clang'], "Not supported by the toolchain"),
114]
115
116
117@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
118                         TESTDATA_PART1)
119def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
120                             tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
121    """ Testing apply_filters function of TestPlan class in Twister
122    Part 1: Response of apply_filters function have
123            appropriate values according to the filters
124    """
125    plan = class_testplan
126    if tc_attribute is None and plat_attribute is None:
127        plan.apply_filters()
128
129    plan.platforms = platforms_list
130    plan.platform_names = [p.name for p in platforms_list]
131    plan.testsuites = all_testsuites_dict
132    for plat in plan.platforms:
133        if plat_attribute == "ignore_tags":
134            plat.ignore_tags = plat_value
135        if plat_attribute == "flash":
136            plat.flash = plat_value
137        if plat_attribute == "ram":
138            plat.ram = plat_value
139        if plat_attribute == "env":
140            plat.env = plat_value
141            plat.env_satisfied = False
142        if plat_attribute == "supported_toolchains":
143            plat.supported_toolchains = plat_value
144    for _, testcase in plan.testsuites.items():
145        if tc_attribute == "toolchain_allow":
146            testcase.toolchain_allow = tc_value
147        if tc_attribute == "platform_allow":
148            testcase.platform_allow = tc_value
149        if tc_attribute == "toolchain_exclude":
150            testcase.toolchain_exclude = tc_value
151        if tc_attribute == "platform_exclude":
152            testcase.platform_exclude = tc_value
153        if tc_attribute == "arch_exclude":
154            testcase.arch_exclude = tc_value
155        if tc_attribute == "arch_allow":
156            testcase.arch_allow = tc_value
157        if tc_attribute == "skip":
158            testcase.skip = tc_value
159        if tc_attribute == "tags":
160            testcase.tags = tc_value
161        if tc_attribute == "min_flash":
162            testcase.min_flash = tc_value
163        if tc_attribute == "min_ram":
164            testcase.min_ram = tc_value
165
166    if plat_attribute == "level":
167        plan.options.level = plat_value
168
169    if tc_attribute == "build_on_all":
170        for _, testcase in plan.testsuites.items():
171            testcase.build_on_all = tc_value
172        plan.apply_filters(exclude_platform=['demo_board_1'])
173    elif plat_attribute == "supported_toolchains":
174        plan.apply_filters(force_toolchain=False,
175                                                 exclude_platform=['demo_board_1'],
176                                                 platform=['demo_board_2'])
177    elif tc_attribute is None and plat_attribute is None:
178        plan.apply_filters()
179    else:
180        plan.apply_filters(exclude_platform=['demo_board_1'],
181                                                 platform=['demo_board_2'])
182
183    filtered_instances = list(filter(lambda item:  item.status == "filtered", plan.instances.values()))
184    for d in filtered_instances:
185        assert d.reason == expected_discards
186
187TESTDATA_PART2 = [
188    ("runnable", "True", "Not runnable on device"),
189    ("exclude_tag", ['test_a'], "Command line testsuite exclude filter"),
190    ("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"),
191    ("arch", ['arm_test'], "Command line testsuite arch filter"),
192    ("tag", ['test_d'], "Command line testsuite tag filter")
193    ]
194
195
196@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2)
197def test_apply_filters_part2(class_testplan, all_testsuites_dict,
198                             platforms_list, extra_filter, extra_filter_value, expected_discards):
199    """ Testing apply_filters function of TestPlan class in Twister
200    Part 2 : Response of apply_filters function (discard dictionary) have
201             appropriate values according to the filters
202    """
203
204    class_testplan.platforms = platforms_list
205    class_testplan.platform_names = [p.name for p in platforms_list]
206    class_testplan.testsuites = all_testsuites_dict
207    kwargs = {
208        extra_filter : extra_filter_value,
209        "exclude_platform" : [
210            'demo_board_1'
211            ],
212        "platform" : [
213            'demo_board_2'
214            ]
215        }
216    class_testplan.apply_filters(**kwargs)
217    filtered_instances = list(filter(lambda item:  item.status == "filtered", class_testplan.instances.values()))
218    for d in filtered_instances:
219        assert d.reason == expected_discards
220
221
222TESTDATA_PART3 = [
223    (20, 20, -1, 0),
224    (-2, -1, 10, 20),
225    (0, 0, 0, 0)
226    ]
227
228@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram",
229                         TESTDATA_PART3)
230def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list,
231                             tc_min_flash, plat_flash, tc_min_ram, plat_ram):
232    """ Testing apply_filters function of TestPlan class in Twister
233    Part 3 : Testing edge cases for ram and flash values of platforms & testsuites
234    """
235    class_testplan.platforms = platforms_list
236    class_testplan.platform_names = [p.name for p in platforms_list]
237    class_testplan.testsuites = all_testsuites_dict
238
239    for plat in class_testplan.platforms:
240        plat.flash = plat_flash
241        plat.ram = plat_ram
242    for _, testcase in class_testplan.testsuites.items():
243        testcase.min_ram = tc_min_ram
244        testcase.min_flash = tc_min_flash
245    class_testplan.apply_filters(exclude_platform=['demo_board_1'],
246                                             platform=['demo_board_2'])
247
248    filtered_instances = list(filter(lambda item:  item.status == "filtered", class_testplan.instances.values()))
249    assert not filtered_instances
250
251def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list):
252    """ Testing add_instances() function of TestPlan class in Twister
253    Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
254    Test 2: Values of 'instances' dictionary in Testsuite class are an
255	        instance of 'TestInstance' class
256    Test 3: Values of 'instances' dictionary have expected values.
257    """
258    class_env.outdir = tmp_path
259    plan = TestPlan(class_env)
260    plan.platforms = platforms_list
261    platform = plan.get_platform("demo_board_2")
262    instance_list = []
263    for _, testcase in all_testsuites_dict.items():
264        instance = TestInstance(testcase, platform, class_env.outdir)
265        instance_list.append(instance)
266    plan.add_instances(instance_list)
267    assert list(plan.instances.keys()) == \
268		   [platform.name + '/' + s for s in list(all_testsuites_dict.keys())]
269    assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
270    assert list(plan.instances.values()) == instance_list
271
272
273QUARANTINE_BASIC = {
274    'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3',
275    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3'
276}
277
278QUARANTINE_WITH_REGEXP = {
279    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86',
280    'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
281    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
282    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
283    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86'
284}
285
286QUARANTINE_PLATFORM = {
287    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3',
288    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3',
289    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3',
290    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3',
291    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3',
292    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3',
293    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3',
294    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3',
295    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3'
296}
297
298QUARANTINE_MULTIFILES = {
299    **QUARANTINE_BASIC,
300    **QUARANTINE_WITH_REGEXP
301}
302
303@pytest.mark.parametrize(
304    ("quarantine_files, quarantine_verify, expected_val"),
305    [
306        (['basic.yaml'], False, QUARANTINE_BASIC),
307        (['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP),
308        (['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP),
309        (['platform.yaml'], False, QUARANTINE_PLATFORM),
310        (['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES),
311        (['empty.yaml'], False, {})
312    ],
313    ids=[
314        'basic',
315        'with_regexp',
316        'quarantine_verify',
317        'platform',
318        'multifiles',
319        'empty'
320    ])
321def test_quarantine_short(class_testplan, platforms_list, test_data,
322                    quarantine_files, quarantine_verify, expected_val):
323    """ Testing quarantine feature in Twister
324    """
325    class_testplan.options.all = True
326    class_testplan.platforms = platforms_list
327    class_testplan.platform_names = [p.name for p in platforms_list]
328    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
329    class_testplan.add_testsuites()
330
331    quarantine_list = [
332        os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files
333    ]
334    class_testplan.quarantine = Quarantine(quarantine_list)
335    class_testplan.options.quarantine_verify = quarantine_verify
336    class_testplan.apply_filters()
337
338    for testname, instance in class_testplan.instances.items():
339        if quarantine_verify:
340            if testname in expected_val:
341                assert not instance.status
342            else:
343                assert instance.status == 'filtered'
344                assert instance.reason == "Not under quarantine"
345        else:
346            if testname in expected_val:
347                assert instance.status == 'filtered'
348                assert instance.reason == "Quarantine: " + expected_val[testname]
349            else:
350                assert not instance.status
351
352
353TESTDATA_PART4 = [
354    (os.path.join('test_d', 'test_d.check_1'), ['dummy'],
355     None, 'Snippet not supported'),
356    (os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'],
357     0, None),
358    (os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'],
359     2, 'Snippet not supported'),
360]
361
362@pytest.mark.parametrize(
363    'testpath, required_snippets, expected_filtered_len, expected_filtered_reason',
364    TESTDATA_PART4,
365    ids=['app', 'global', 'multiple']
366)
367def test_required_snippets_short(
368    class_testplan,
369    all_testsuites_dict,
370    platforms_list,
371    testpath,
372    required_snippets,
373    expected_filtered_len,
374    expected_filtered_reason
375):
376    """ Testing required_snippets function of TestPlan class in Twister """
377    plan = class_testplan
378    testpath = os.path.join('scripts', 'tests', 'twister', 'test_data',
379                            'testsuites', 'tests', testpath)
380    testsuite = class_testplan.testsuites.get(testpath)
381    plan.platforms = platforms_list
382    plan.platform_names = [p.name for p in platforms_list]
383    plan.testsuites = {testpath: testsuite}
384
385    print(plan.testsuites)
386
387    for _, testcase in plan.testsuites.items():
388        testcase.exclude_platform = []
389        testcase.required_snippets = required_snippets
390        testcase.build_on_all = True
391
392    plan.apply_filters()
393
394    filtered_instances = list(
395        filter(lambda item: item.status == "filtered", plan.instances.values())
396    )
397    if expected_filtered_len is not None:
398        assert len(filtered_instances) == expected_filtered_len
399    if expected_filtered_reason is not None:
400        for d in filtered_instances:
401            assert d.reason == expected_filtered_reason
402
403
404def test_testplan_get_level():
405    testplan = TestPlan(env=mock.Mock())
406    lvl1 = mock.Mock()
407    lvl1.name = 'a lvl'
408    lvl2 = mock.Mock()
409    lvl2.name = 'a lvl'
410    lvl3 = mock.Mock()
411    lvl3.name = 'other lvl'
412    testplan.levels.append(lvl1)
413    testplan.levels.append(lvl2)
414    testplan.levels.append(lvl3)
415
416    name = 'a lvl'
417
418    res = testplan.get_level(name)
419    assert res == lvl1
420
421    res = testplan.get_level(name)
422    assert res == lvl1
423
424    lvl_missed = mock.Mock()
425    lvl_missed.name = 'missed lvl'
426    res = testplan.get_level('missed_lvl')
427    assert res is None
428
429    testplan.levels.remove(lvl1)
430    testplan.levels.remove(lvl2)
431
432    res = testplan.get_level(name)
433    assert res is None
434
435
436TESTDATA_1 = [
437    ('', {}),
438    (
439"""\
440levels:
441  - name: lvl1
442    adds:
443      - sc1
444      - sc2
445    inherits: []
446  - name: lvl2
447    adds:
448      - sc1-1
449      - sc1-2
450    inherits: [lvl1]
451""",
452    {
453        'lvl1': ['sc1', 'sc2'],
454        'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2']
455    }
456    ),
457]
458
459@pytest.mark.parametrize(
460    'config_yaml, expected_scenarios',
461    TESTDATA_1,
462    ids=['no config', 'valid config']
463)
464def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios):
465    testplan = TestPlan(env=mock.Mock())
466    testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2']
467
468    tmp_config_file = tmp_path / 'config_file.yaml'
469    if config_yaml:
470        tmp_config_file.write_text(config_yaml)
471
472    with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext():
473        testplan.parse_configuration(tmp_config_file)
474
475    if not testplan.levels:
476        assert expected_scenarios == {}
477    for level in testplan.levels:
478        assert sorted(level.scenarios) == sorted(expected_scenarios[level.name])
479
480
481TESTDATA_2 = [
482    ([], [], False),
483    (['ts1.tc3'], [], True),
484    (['ts2.tc2'], ['- ts2'], False),
485]
486
487@pytest.mark.parametrize(
488    'sub_tests, expected_outs, expect_error',
489    TESTDATA_2,
490    ids=['no subtests', 'subtests not found', 'valid subtests']
491)
492def test_testplan_find_subtests(
493    capfd,
494    sub_tests,
495    expected_outs,
496    expect_error
497):
498    testplan = TestPlan(env=mock.Mock())
499    testplan.options = mock.Mock(sub_test=sub_tests)
500    testplan.run_individual_testsuite = []
501    testplan.testsuites = {
502        'ts1': mock.Mock(
503            testcases=[
504                mock.Mock(),
505                mock.Mock(),
506            ]
507        ),
508        'ts2': mock.Mock(
509            testcases=[
510                mock.Mock(),
511                mock.Mock(),
512                mock.Mock(),
513            ]
514        )
515    }
516    testplan.testsuites['ts1'].name = 'ts1'
517    testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1'
518    testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2'
519    testplan.testsuites['ts2'].name = 'ts2'
520    testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1'
521    testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2'
522    testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3'
523
524    with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext():
525        testplan.find_subtests()
526
527    out, err = capfd.readouterr()
528    sys.stdout.write(out)
529    sys.stdout.write(err)
530
531    assert all([printout in out for printout in expected_outs])
532
533
534TESTDATA_3 = [
535    (0, 0, [], False, [], TwisterRuntimeError, []),
536    (1, 1, [], False, [], TwisterRuntimeError, []),
537    (1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']),
538#    (1, 0, ['qfile.yaml'], False, ['# empty'], None, ['Quarantine file qfile.yaml is empty']),
539    (1, 0, ['qfile.yaml'], False, ['- platforms:\n  - demo_board_3\n  comment: "board_3"'], None, []),
540]
541
542@pytest.mark.parametrize(
543    'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs',
544    TESTDATA_3,
545    ids=['no tests', 'load errors', 'quarantine verify without quarantine list',
546#         'empty quarantine file',
547         'valid quarantine file']
548)
549def test_testplan_discover(
550    tmp_path,
551    caplog,
552    added_testsuite_count,
553    load_errors,
554    ql,
555    qv,
556    ql_data,
557    exception,
558    expected_logs
559):
560    for qf, data in zip(ql, ql_data):
561        tmp_qf = tmp_path / qf
562        tmp_qf.write_text(data)
563
564    testplan = TestPlan(env=mock.Mock())
565    testplan.options = mock.Mock(
566        test='ts1',
567        quarantine_list=[tmp_path / qf for qf in ql],
568        quarantine_verify=qv,
569    )
570    testplan.testsuites = {
571        'ts1': mock.Mock(id=1),
572        'ts2': mock.Mock(id=2),
573    }
574    testplan.run_individual_testsuite = 'ts0'
575    testplan.load_errors = load_errors
576    testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count)
577    testplan.find_subtests = mock.Mock()
578    testplan.report_duplicates = mock.Mock()
579    testplan.parse_configuration = mock.Mock()
580    testplan.add_configurations = mock.Mock()
581
582    with pytest.raises(exception) if exception else nullcontext():
583        testplan.discover()
584
585    testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1')
586    assert all([log in caplog.text for log in expected_logs])
587
588
589TESTDATA_4 = [
590    (None, None, None, None, '00',
591     TwisterRuntimeError, [], []),
592    (None, True, None, None, '6/4',
593     TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []),
594    (None, None, 'load_tests.json', None, '0/4',
595     TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []),
596    ('suffix', None, None, True, '2/4',
597     None, set(['ts-p4', 'ts-p2', 'ts-p3']), [2, 4]),
598]
599
600@pytest.mark.parametrize(
601    'report_suffix, only_failed, load_tests, test_only, subset,' \
602    ' exception, expected_selected_platforms, expected_generate_subset_args',
603    TESTDATA_4,
604    ids=['apply_filters only', 'only failed', 'load tests', 'test only']
605)
606def test_testplan_load(
607    tmp_path,
608    report_suffix,
609    only_failed,
610    load_tests,
611    test_only,
612    subset,
613    exception,
614    expected_selected_platforms,
615    expected_generate_subset_args
616):
617    twister_json = """\
618{
619    "testsuites": [
620        {
621            "name": "ts1",
622            "platform": "t-p1",
623            "testcases": []
624        },
625        {
626            "name": "ts1",
627            "platform": "t-p2",
628            "testcases": []
629        },
630        {
631            "name": "ts2",
632            "platform": "t-p3",
633            "testcases": []
634        },
635        {
636            "name": "ts2",
637            "platform": "t-p4",
638            "testcases": []
639        }
640    ]
641}
642"""
643    twister_file = tmp_path / 'twister.json'
644    twister_file.write_text(twister_json)
645
646    twister_suffix_json = """\
647{
648    "testsuites": [
649        {
650            "name": "ts1",
651            "platform": "ts-p1",
652            "testcases": []
653        },
654        {
655            "name": "ts1",
656            "platform": "ts-p2",
657            "testcases": []
658        },
659        {
660            "name": "ts2",
661            "platform": "ts-p3",
662            "testcases": []
663        },
664        {
665            "name": "ts2",
666            "platform": "ts-p4",
667            "testcases": []
668        }
669    ]
670}
671"""
672    twister_suffix_file = tmp_path / 'twister_suffix.json'
673    twister_suffix_file.write_text(twister_suffix_json)
674
675    load_tests_json = """\
676{
677    "testsuites": [
678        {
679            "name": "ts1",
680            "platform": "lt-p1",
681            "testcases": []
682        },
683        {
684            "name": "ts1",
685            "platform": "lt-p2",
686            "testcases": []
687        },
688        {
689            "name": "ts2",
690            "platform": "lt-p3",
691            \"testcases": []
692        },
693        {
694            "name": "ts2",
695            "platform": "lt-p4",
696            "testcases": []
697        }
698    ]
699}
700"""
701    load_tests_file = tmp_path / 'load_tests.json'
702    load_tests_file.write_text(load_tests_json)
703
704    testplan = TestPlan(env=mock.Mock(outdir=tmp_path))
705    testplan.testsuites = {
706        'ts1': mock.Mock(testcases=[], extra_configs=[]),
707        'ts2': mock.Mock(testcases=[], extra_configs=[]),
708    }
709    testplan.testsuites['ts1'].name = 'ts1'
710    testplan.testsuites['ts2'].name = 'ts2'
711    testplan.options = mock.Mock(
712        report_summary=None,
713        outdir=tmp_path,
714        report_suffix=report_suffix,
715        only_failed=only_failed,
716        load_tests=tmp_path / load_tests if load_tests else None,
717        test_only=test_only,
718        exclude_platform=['t-p0', 't-p1',
719                          'ts-p0', 'ts-p1',
720                          'lt-p0', 'lt-p1'],
721        platform=['t-p1', 't-p2', 't-p3', 't-p4',
722                  'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4',
723                  'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'],
724        subset=subset
725    )
726    testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
727                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
728                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
729    testplan.platforms[0].name = 't-p1'
730    testplan.platforms[1].name = 't-p2'
731    testplan.platforms[2].name = 't-p3'
732    testplan.platforms[3].name = 't-p4'
733    testplan.platforms[4].name = 'ts-p1'
734    testplan.platforms[5].name = 'ts-p2'
735    testplan.platforms[6].name = 'ts-p3'
736    testplan.platforms[7].name = 'ts-p4'
737    testplan.platforms[8].name = 'lt-p1'
738    testplan.platforms[9].name = 'lt-p2'
739    testplan.platforms[10].name = 'lt-p3'
740    testplan.platforms[11].name = 'lt-p4'
741    testplan.platforms[0].normalized_name = 't-p1'
742    testplan.platforms[1].normalized_name = 't-p2'
743    testplan.platforms[2].normalized_name = 't-p3'
744    testplan.platforms[3].normalized_name = 't-p4'
745    testplan.platforms[4].normalized_name = 'ts-p1'
746    testplan.platforms[5].normalized_name = 'ts-p2'
747    testplan.platforms[6].normalized_name = 'ts-p3'
748    testplan.platforms[7].normalized_name = 'ts-p4'
749    testplan.platforms[8].normalized_name = 'lt-p1'
750    testplan.platforms[9].normalized_name = 'lt-p2'
751    testplan.platforms[10].normalized_name = 'lt-p3'
752    testplan.platforms[11].normalized_name = 'lt-p4'
753    testplan.generate_subset = mock.Mock()
754    testplan.apply_filters = mock.Mock()
755
756    with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
757         pytest.raises(exception) if exception else nullcontext():
758        testplan.load()
759
760    assert testplan.selected_platforms == expected_selected_platforms
761    if expected_generate_subset_args:
762        testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args)
763    else:
764        testplan.generate_subset.assert_not_called()
765
766
767TESTDATA_5 = [
768    (False, False, None, 1, 2,
769     ['plat1/testA', 'plat1/testB', 'plat1/testC',
770      'plat3/testA', 'plat3/testB', 'plat3/testC']),
771    (False, False, None, 1, 5,
772     ['plat1/testA',
773      'plat3/testA', 'plat3/testB', 'plat3/testC']),
774    (False, False, None, 2, 2,
775     ['plat2/testA', 'plat2/testB']),
776    (True, False, None, 1, 2,
777     ['plat1/testA', 'plat2/testA', 'plat1/testB',
778      'plat3/testA', 'plat3/testB', 'plat3/testC']),
779    (True, False, None, 2, 2,
780     ['plat2/testB', 'plat1/testC']),
781    (True, True, 123, 1, 2,
782     ['plat2/testA', 'plat2/testB', 'plat1/testC',
783      'plat3/testB', 'plat3/testA', 'plat3/testC']),
784    (True, True, 123, 2, 2,
785     ['plat1/testB', 'plat1/testA']),
786]
787
788@pytest.mark.parametrize(
789    'device_testing, shuffle, seed, subset, sets, expected_subset',
790    TESTDATA_5,
791    ids=['subset 1', 'subset 1 out of 5', 'subset 2',
792         'device testing, subset 1', 'device testing, subset 2',
793         'device testing, shuffle with seed, subset 1',
794         'device testing, shuffle with seed, subset 2']
795)
796def test_testplan_generate_subset(
797    device_testing,
798    shuffle,
799    seed,
800    subset,
801    sets,
802    expected_subset
803):
804    testplan = TestPlan(env=mock.Mock())
805    testplan.options = mock.Mock(
806        device_testing=device_testing,
807        shuffle_tests=shuffle,
808        shuffle_tests_seed=seed
809    )
810    testplan.instances = {
811        'plat1/testA': mock.Mock(status=None),
812        'plat1/testB': mock.Mock(status=None),
813        'plat1/testC': mock.Mock(status=None),
814        'plat2/testA': mock.Mock(status=None),
815        'plat2/testB': mock.Mock(status=None),
816        'plat3/testA': mock.Mock(status='skipped'),
817        'plat3/testB': mock.Mock(status='skipped'),
818        'plat3/testC': mock.Mock(status='error'),
819    }
820
821    testplan.generate_subset(subset, sets)
822
823    assert [instance for instance in testplan.instances.keys()] == \
824           expected_subset
825
826
827def test_testplan_handle_modules():
828    testplan = TestPlan(env=mock.Mock())
829
830    modules = [mock.Mock(meta={'name': 'name1'}),
831               mock.Mock(meta={'name': 'name2'})]
832
833    with mock.patch('twisterlib.testplan.parse_modules', return_value=modules):
834        testplan.handle_modules()
835
836    assert testplan.modules == ['name1', 'name2']
837
838
839TESTDATA_6 = [
840    (True, False, False, 0, 'report_test_tree'),
841    (True, True, False, 0, 'report_test_tree'),
842    (True, False, True, 0, 'report_test_tree'),
843    (True, True, True, 0, 'report_test_tree'),
844    (False, True, False, 0, 'report_test_list'),
845    (False, True, True, 0, 'report_test_list'),
846    (False, False, True, 0, 'report_tag_list'),
847    (False, False, False, 1, None),
848]
849
850@pytest.mark.parametrize(
851    'test_tree, list_tests, list_tags, expected_res, expected_method',
852    TESTDATA_6,
853    ids=['test tree', 'test tree + test list', 'test tree + tag list',
854         'test tree + test list + tag list', 'test list',
855         'test list + tag list', 'tag list', 'no report']
856)
857def test_testplan_report(
858    test_tree,
859    list_tests,
860    list_tags,
861    expected_res,
862    expected_method
863):
864    testplan = TestPlan(env=mock.Mock())
865    testplan.report_test_tree = mock.Mock()
866    testplan.report_test_list = mock.Mock()
867    testplan.report_tag_list = mock.Mock()
868
869    testplan.options = mock.Mock(
870        test_tree=test_tree,
871        list_tests=list_tests,
872        list_tags=list_tags,
873    )
874
875    res = testplan.report()
876
877    assert res == expected_res
878
879    methods = ['report_test_tree', 'report_test_list', 'report_tag_list']
880    if expected_method:
881        methods.remove(expected_method)
882        getattr(testplan, expected_method).assert_called_once()
883    for method in methods:
884        getattr(testplan, method).assert_not_called()
885
886
887TESTDATA_7 = [
888    (
889        [
890            mock.Mock(
891                yamlfile='a.yaml',
892                scenarios=['scenario1', 'scenario2']
893            ),
894            mock.Mock(
895                yamlfile='b.yaml',
896                scenarios=['scenario1']
897            )
898        ],
899        TwisterRuntimeError,
900        'Duplicated test scenarios found:\n' \
901        '- scenario1 found in:\n' \
902        '  - a.yaml\n' \
903        '  - b.yaml\n',
904        []
905    ),
906    (
907        [
908            mock.Mock(
909                yamlfile='a.yaml',
910                scenarios=['scenario.a.1', 'scenario.a.2']
911            ),
912            mock.Mock(
913                yamlfile='b.yaml',
914                scenarios=['scenario.b.1']
915            )
916        ],
917        None,
918        None,
919        ['No duplicates found.']
920    ),
921]
922
923@pytest.mark.parametrize(
924    'testsuites, expected_error, error_msg, expected_logs',
925    TESTDATA_7,
926    ids=['a duplicate', 'no duplicates']
927)
928def test_testplan_report_duplicates(
929    capfd,
930    caplog,
931    testsuites,
932    expected_error,
933    error_msg,
934    expected_logs
935):
936    def mock_get(name):
937        return list(filter(lambda x: name in x.scenarios, testsuites))
938
939    testplan = TestPlan(env=mock.Mock())
940    testplan.scenarios = [scenario for testsuite in testsuites \
941                                   for scenario in testsuite.scenarios]
942    testplan.get_testsuite = mock.Mock(side_effect=mock_get)
943
944    with pytest.raises(expected_error) if expected_error is not None else \
945         nullcontext() as err:
946        testplan.report_duplicates()
947
948    if expected_error:
949        assert str(err._excinfo[1]) == error_msg
950
951    assert all([log in caplog.text for log in expected_logs])
952
953
954def test_testplan_report_tag_list(capfd):
955    testplan = TestPlan(env=mock.Mock())
956    testplan.testsuites = {
957        'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])),
958        'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])),
959        'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])),
960        'testsuite3': mock.Mock(tags=set(['tag']))
961    }
962
963    testplan.report_tag_list()
964
965    out,err = capfd.readouterr()
966    sys.stdout.write(out)
967    sys.stderr.write(err)
968
969    assert '- tag' in out
970    assert '- tag1' in out
971    assert '- tag2' in out
972    assert '- tag3' in out
973
974
975def test_testplan_report_test_tree(capfd):
976    testplan = TestPlan(env=mock.Mock())
977    testplan.get_tests_list = mock.Mock(
978        return_value=['1.dummy.case.1', '1.dummy.case.2',
979                      '2.dummy.case.1', '2.dummy.case.2',
980                      '3.dummy.case.1', '3.dummy.case.2',
981                      '4.dummy.case.1', '4.dummy.case.2',
982                      '5.dummy.case.1', '5.dummy.case.2',
983                      'sample.group1.case1', 'sample.group1.case2',
984                      'sample.group2.case', 'sample.group3.case1',
985                      'sample.group3.case2', 'sample.group3.case3']
986    )
987
988    testplan.report_test_tree()
989
990    out,err = capfd.readouterr()
991    sys.stdout.write(out)
992    sys.stderr.write(err)
993
994    expected = """
995Testsuite
996├── Samples
997│   ├── group1
998│   │   ├── sample.group1.case1
999│   │   └── sample.group1.case2
1000│   ├── group2
1001│   │   └── sample.group2.case
1002│   └── group3
1003│       ├── sample.group3.case1
1004│       ├── sample.group3.case2
1005│       └── sample.group3.case3
1006└── Tests
1007    ├── 1
1008    │   └── dummy
1009    │       ├── 1.dummy.case.1
1010    │       └── 1.dummy.case.2
1011    ├── 2
1012    │   └── dummy
1013    │       ├── 2.dummy.case.1
1014    │       └── 2.dummy.case.2
1015    ├── 3
1016    │   └── dummy
1017    │       ├── 3.dummy.case.1
1018    │       └── 3.dummy.case.2
1019    ├── 4
1020    │   └── dummy
1021    │       ├── 4.dummy.case.1
1022    │       └── 4.dummy.case.2
1023    └── 5
1024        └── dummy
1025            ├── 5.dummy.case.1
1026            └── 5.dummy.case.2
1027"""
1028    expected = expected[1:]
1029
1030    assert expected in out
1031
1032
1033def test_testplan_report_test_list(capfd):
1034    testplan = TestPlan(env=mock.Mock())
1035    testplan.get_tests_list = mock.Mock(
1036        return_value=['4.dummy.case.1', '4.dummy.case.2',
1037                      '3.dummy.case.2', '2.dummy.case.2',
1038                      '1.dummy.case.1', '1.dummy.case.2',
1039                      '3.dummy.case.1', '2.dummy.case.1',
1040                      '5.dummy.case.1', '5.dummy.case.2']
1041    )
1042
1043    testplan.report_test_list()
1044
1045    out,err = capfd.readouterr()
1046    sys.stdout.write(out)
1047    sys.stderr.write(err)
1048
1049    assert ' - 1.dummy.case.1\n' \
1050           ' - 1.dummy.case.2\n' \
1051           ' - 2.dummy.case.1\n' \
1052           ' - 2.dummy.case.2\n' \
1053           ' - 3.dummy.case.1\n' \
1054           ' - 3.dummy.case.2\n' \
1055           ' - 4.dummy.case.1\n' \
1056           ' - 4.dummy.case.2\n' \
1057           ' - 5.dummy.case.1\n' \
1058           ' - 5.dummy.case.2\n' \
1059           '10 total.' in out
1060
1061
1062def test_testplan_info(capfd):
1063    TestPlan.info('dummy text')
1064
1065    out, err = capfd.readouterr()
1066    sys.stdout.write(out)
1067    sys.stderr.write(err)
1068
1069    assert 'dummy text\n' in out
1070
1071
1072TESTDATA_8 = [
1073    (False, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p2']),
1074    (False, True, None, None),
1075    (True, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p3']),
1076]
1077
1078@pytest.mark.parametrize(
1079    'override_default_platforms, create_duplicate, expected_platform_names, expected_defaults',
1080    TESTDATA_8,
1081    ids=['no override defaults', 'create duplicate', 'override defaults']
1082)
1083def test_testplan_add_configurations(
1084    tmp_path,
1085    override_default_platforms,
1086    create_duplicate,
1087    expected_platform_names,
1088    expected_defaults
1089):
1090    # tmp_path
1091    # └ boards  <- board root
1092    #   ├ x86
1093    #   │ ├ p1
1094    #   │ | ├ p1e1.yaml
1095    #   │ | └ p1e2.yaml
1096    #   │ └ p2
1097    #   │   ├ p2.yaml
1098    #   │   └ p2-1.yaml <- duplicate
1099    #   │   └ p2-2.yaml <- load error
1100    #   └ arm
1101    #     └ p3
1102    #       ├ p3.yaml
1103    #       └ p3_B.conf
1104
1105    tmp_board_root_dir = tmp_path / 'boards'
1106    tmp_board_root_dir.mkdir()
1107
1108    tmp_arch1_dir = tmp_board_root_dir / 'x86'
1109    tmp_arch1_dir.mkdir()
1110
1111    tmp_p1_dir = tmp_arch1_dir / 'p1'
1112    tmp_p1_dir.mkdir()
1113
1114    p1e1_bs_yaml = """\
1115boards:
1116
1117  - name: ple1
1118    vendor: zephyr
1119    socs:
1120      - name: unit_testing
1121  - name: ple2
1122    vendor: zephyr
1123    socs:
1124      - name: unit_testing
1125"""
1126    p1e1_yamlfile = tmp_p1_dir / 'board.yml'
1127    p1e1_yamlfile.write_text(p1e1_bs_yaml)
1128
1129    p1e1_yaml = """\
1130identifier: p1e1
1131name: Platform 1 Edition 1
1132type: native
1133arch: x86
1134vendor: vendor1
1135toolchain:
1136  - zephyr
1137twister: False
1138"""
1139    p1e1_yamlfile = tmp_p1_dir / 'p1e1.yaml'
1140    p1e1_yamlfile.write_text(p1e1_yaml)
1141
1142    p1e2_yaml = """\
1143identifier: p1e2
1144name: Platform 1 Edition 2
1145type: native
1146arch: x86
1147vendor: vendor1
1148toolchain:
1149  - zephyr
1150"""
1151    p1e2_yamlfile = tmp_p1_dir / 'p1e2.yaml'
1152    p1e2_yamlfile.write_text(p1e2_yaml)
1153
1154    tmp_p2_dir = tmp_arch1_dir / 'p2'
1155    tmp_p2_dir.mkdir()
1156
1157    p2_bs_yaml = """\
1158boards:
1159
1160  - name: p2
1161    vendor: zephyr
1162    socs:
1163      - name: unit_testing
1164  - name: p2_2
1165    vendor: zephyr
1166    socs:
1167      - name: unit_testing
1168"""
1169    p2_yamlfile = tmp_p2_dir / 'board.yml'
1170    p2_yamlfile.write_text(p2_bs_yaml)
1171
1172    p2_yaml = """\
1173identifier: p2
1174name: Platform 2
1175type: sim
1176arch: x86
1177vendor: vendor2
1178toolchain:
1179  - zephyr
1180testing:
1181  default: True
1182"""
1183    p2_yamlfile = tmp_p2_dir / 'p2.yaml'
1184    p2_yamlfile.write_text(p2_yaml)
1185
1186    if create_duplicate:
1187        p2_yamlfile = tmp_p2_dir / 'p2-1.yaml'
1188        p2_yamlfile.write_text(p2_yaml)
1189
1190    p2_2_yaml = """\
1191testing:
1192  ć#@%!#!#^#@%@:1.0
1193identifier: p2_2
1194name: Platform 2 2
1195type: sim
1196arch: x86
1197vendor: vendor2
1198toolchain:
1199  - zephyr
1200"""
1201    p2_2_yamlfile = tmp_p2_dir / 'p2-2.yaml'
1202    p2_2_yamlfile.write_text(p2_2_yaml)
1203
1204    tmp_arch2_dir = tmp_board_root_dir / 'arm'
1205    tmp_arch2_dir.mkdir()
1206
1207    tmp_p3_dir = tmp_arch2_dir / 'p3'
1208    tmp_p3_dir.mkdir()
1209
1210    p3_bs_yaml = """\
1211boards:
1212
1213  - name: p3
1214    vendor: zephyr
1215    socs:
1216      - name: unit_testing
1217"""
1218    p3_yamlfile = tmp_p3_dir / 'board.yml'
1219    p3_yamlfile.write_text(p3_bs_yaml)
1220
1221    p3_yaml = """\
1222identifier: p3
1223name: Platform 3
1224type: unit
1225arch: arm
1226vendor: vendor3
1227toolchain:
1228  - zephyr
1229"""
1230    p3_yamlfile = tmp_p3_dir / 'p3.yaml'
1231    p3_yamlfile.write_text(p3_yaml)
1232    p3_yamlfile = tmp_p3_dir / 'p3_B.conf'
1233    p3_yamlfile.write_text('')
1234
1235    env = mock.Mock(board_roots=[tmp_board_root_dir])
1236
1237    testplan = TestPlan(env=env)
1238
1239    testplan.test_config = {
1240        'platforms': {
1241            'override_default_platforms': override_default_platforms,
1242            'default_platforms': ['p3', 'p1e1']
1243        }
1244    }
1245
1246    with pytest.raises(Exception) if create_duplicate else nullcontext():
1247        testplan.add_configurations()
1248
1249    if expected_defaults is not None:
1250        assert sorted(expected_defaults) == sorted(testplan.default_platforms)
1251    if expected_platform_names is not None:
1252        assert sorted(expected_platform_names) == sorted(testplan.platform_names)
1253
1254
1255def test_testplan_get_all_tests():
1256    testplan = TestPlan(env=mock.Mock())
1257    tc1 = mock.Mock()
1258    tc1.name = 'tc1'
1259    tc2 = mock.Mock()
1260    tc2.name = 'tc2'
1261    tc3 = mock.Mock()
1262    tc3.name = 'tc3'
1263    tc4 = mock.Mock()
1264    tc4.name = 'tc4'
1265    tc5 = mock.Mock()
1266    tc5.name = 'tc5'
1267    ts1 = mock.Mock(testcases=[tc1, tc2])
1268    ts2 = mock.Mock(testcases=[tc3, tc4, tc5])
1269    testplan.testsuites = {
1270        'ts1': ts1,
1271        'ts2': ts2
1272    }
1273
1274    res = testplan.get_all_tests()
1275
1276    assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5']
1277
1278
1279TESTDATA_9 = [
1280    ([], False, 7),
1281    ([], True, 5),
1282    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, 3),
1283    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, 0),
1284]
1285
1286@pytest.mark.parametrize(
1287    'testsuite_filter, use_alt_root, expected_suite_count',
1288    TESTDATA_9,
1289    ids=['no testsuite filter', 'no testsuite filter, alt root',
1290         'testsuite filter', 'testsuite filter, alt root']
1291)
1292def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, expected_suite_count):
1293    # tmp_path
1294    # ├ tests  <- test root
1295    # │ ├ good_test
1296    # │ │ └ testcase.yaml
1297    # │ ├ wrong_test
1298    # │ │ └ testcase.yaml
1299    # │ ├ good_sample
1300    # │ │ └ sample.yaml
1301    # │ └ others
1302    # │   └ other.txt
1303    # └ other_tests  <- alternate test root
1304    #   └ good_test
1305    #     └ testcase.yaml
1306    tmp_test_root_dir = tmp_path / 'tests'
1307    tmp_test_root_dir.mkdir()
1308
1309    tmp_good_test_dir = tmp_test_root_dir / 'good_test'
1310    tmp_good_test_dir.mkdir()
1311    testcase_yaml_1 = """\
1312tests:
1313  dummy.common.1:
1314    build_on_all: true
1315  dummy.common.2:
1316    build_on_all: true
1317  dummy.common.3:
1318    build_on_all: true
1319  dummy.special:
1320    build_on_all: false
1321"""
1322    testfile_1 = tmp_good_test_dir / 'testcase.yaml'
1323    testfile_1.write_text(testcase_yaml_1)
1324
1325    tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test'
1326    tmp_bad_test_dir.mkdir()
1327    testcase_yaml_2 = """\
1328tests:
1329 wrong:
1330  yaml: {]}
1331"""
1332    testfile_2 = tmp_bad_test_dir / 'testcase.yaml'
1333    testfile_2.write_text(testcase_yaml_2)
1334
1335    tmp_good_sample_dir = tmp_test_root_dir / 'good_sample'
1336    tmp_good_sample_dir.mkdir()
1337    samplecase_yaml_1 = """\
1338tests:
1339  sample.dummy.common.1:
1340    tags:
1341    - samples
1342  sample.dummy.common.2:
1343    tags:
1344    - samples
1345  sample.dummy.special.1:
1346    tags:
1347    - samples
1348"""
1349    samplefile_1 = tmp_good_sample_dir / 'sample.yaml'
1350    samplefile_1.write_text(samplecase_yaml_1)
1351
1352    tmp_other_dir = tmp_test_root_dir / 'others'
1353    tmp_other_dir.mkdir()
1354    _ = tmp_other_dir / 'other.txt'
1355
1356    tmp_alt_test_root_dir = tmp_path / 'other_tests'
1357    tmp_alt_test_root_dir.mkdir()
1358
1359    tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test'
1360    tmp_alt_good_test_dir.mkdir()
1361    testcase_yaml_3 = """\
1362tests:
1363  dummy.alt.1:
1364    build_on_all: true
1365  dummy.alt.2:
1366    build_on_all: true
1367"""
1368    testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml'
1369    testfile_3.write_text(testcase_yaml_3)
1370
1371    env = mock.Mock(
1372        test_roots=[tmp_test_root_dir],
1373        alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else []
1374    )
1375
1376    testplan = TestPlan(env=env)
1377
1378    res = testplan.add_testsuites(testsuite_filter)
1379
1380    assert res == expected_suite_count
1381
1382
1383def test_testplan_str():
1384    testplan = TestPlan(env=mock.Mock())
1385    testplan.name = 'my name'
1386
1387    res = testplan.__str__()
1388
1389    assert res == 'my name'
1390
1391
1392TESTDATA_10 = [
1393    ('a platform', True),
1394    ('other platform', False),
1395]
1396
1397@pytest.mark.parametrize(
1398    'name, expect_found',
1399    TESTDATA_10,
1400    ids=['platform exists', 'no platform']
1401)
1402def test_testplan_get_platform(name, expect_found):
1403    testplan = TestPlan(env=mock.Mock())
1404    p1 = mock.Mock()
1405    p1.name = 'some platform'
1406    p2 = mock.Mock()
1407    p2.name = 'a platform'
1408    testplan.platforms = [p1, p2]
1409
1410    res = testplan.get_platform(name)
1411
1412    if expect_found:
1413        assert res.name == name
1414    else:
1415        assert res is None
1416
1417
1418TESTDATA_11 = [
1419    (True, 'runnable'),
1420    (False, 'buildable'),
1421]
1422
1423@pytest.mark.parametrize(
1424    'device_testing, expected_tfilter',
1425    TESTDATA_11,
1426    ids=['device testing', 'no device testing']
1427)
1428def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
1429    def get_platform(name):
1430        p = mock.Mock()
1431        p.name = name
1432        p.normalized_name = name
1433        return p
1434
1435    ts1tc1 = mock.Mock()
1436    ts1tc1.name = 'TS1.tc1'
1437    ts1 = mock.Mock(testcases=[ts1tc1])
1438    ts1.name = 'TestSuite 1'
1439    ts2 = mock.Mock(testcases=[])
1440    ts2.name = 'TestSuite 2'
1441    ts3tc1 = mock.Mock()
1442    ts3tc1.name = 'TS3.tc1'
1443    ts3tc2 = mock.Mock()
1444    ts3tc2.name = 'TS3.tc2'
1445    ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2])
1446    ts3.name = 'TestSuite 3'
1447    ts4tc1 = mock.Mock()
1448    ts4tc1.name = 'TS4.tc1'
1449    ts4 = mock.Mock(testcases=[ts4tc1])
1450    ts4.name = 'TestSuite 4'
1451    ts5 = mock.Mock(testcases=[])
1452    ts5.name = 'TestSuite 5'
1453
1454    testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir')))
1455    testplan.options = mock.Mock(device_testing=device_testing, test_only=True, report_summary=None)
1456    testplan.testsuites = {
1457        'TestSuite 1': ts1,
1458        'TestSuite 2': ts2,
1459        'TestSuite 3': ts3,
1460        'TestSuite 4': ts4,
1461        'TestSuite 5': ts5
1462    }
1463
1464    testplan.get_platform = mock.Mock(side_effect=get_platform)
1465
1466    testplan_data = """\
1467{
1468    "testsuites": [
1469        {
1470            "name": "TestSuite 1",
1471            "platform": "Platform 1",
1472            "run_id": 1,
1473            "execution_time": 60.00,
1474            "used_ram": 4096,
1475            "available_ram": 12278,
1476            "used_rom": 1024,
1477            "available_rom": 1047552,
1478            "status": "passed",
1479            "reason": "OK",
1480            "testcases": [
1481                {
1482                    "identifier": "TS1.tc1",
1483                    "status": "passed",
1484                    "reason": "passed",
1485                    "execution_time": 60.00,
1486                    "log": ""
1487                }
1488            ]
1489        },
1490        {
1491            "name": "TestSuite 2",
1492            "platform": "Platform 1"
1493        },
1494        {
1495            "name": "TestSuite 3",
1496            "platform": "Platform 1",
1497            "run_id": 1,
1498            "execution_time": 360.00,
1499            "used_ram": 4096,
1500            "available_ram": 12278,
1501            "used_rom": 1024,
1502            "available_rom": 1047552,
1503            "status": "error",
1504            "reason": "File Not Found Error",
1505            "testcases": [
1506                {
1507                    "identifier": "TS3.tc1",
1508                    "status": "error",
1509                    "reason": "File Not Found Error.",
1510                    "execution_time": 360.00,
1511                    "log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..."
1512                },
1513                {
1514                    "identifier": "TS3.tc2"
1515                }
1516            ]
1517        },
1518        {
1519            "name": "TestSuite 4",
1520            "platform": "Platform 1",
1521            "execution_time": 360.00,
1522            "used_ram": 4096,
1523            "available_ram": 12278,
1524            "used_rom": 1024,
1525            "available_rom": 1047552,
1526            "status": "skipped",
1527            "reason": "Not in requested test list.",
1528            "testcases": [
1529                {
1530                    "identifier": "TS4.tc1",
1531                    "status": "skipped",
1532                    "reason": "Not in requested test list.",
1533                    "execution_time": 360.00,
1534                    "log": "[INFO] Parsing..."
1535                },
1536                {
1537                    "identifier": "TS3.tc2"
1538                }
1539            ]
1540        },
1541        {
1542            "name": "TestSuite 5",
1543            "platform": "Platform 2"
1544        }
1545    ]
1546}
1547"""
1548
1549    filter_platform = ['Platform 1']
1550
1551    check_runnable_mock = mock.Mock(return_value=True)
1552
1553    with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \
1554         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \
1555         mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()):
1556        testplan.load_from_file('dummy.yaml', filter_platform)
1557
1558    expected_instances = {
1559        'Platform 1/TestSuite 1': {
1560            'metrics': {
1561                'handler_time': 60.0,
1562                'used_ram': 4096,
1563                'used_rom': 1024,
1564                'available_ram': 12278,
1565                'available_rom': 1047552
1566            },
1567            'retries': 0,
1568            'testcases': {
1569                'TS1.tc1': {
1570                    'status': 'passed',
1571                    'reason': None,
1572                    'duration': 60.0,
1573                    'output': ''
1574                }
1575            }
1576        },
1577        'Platform 1/TestSuite 2': {
1578            'metrics': {
1579                'handler_time': 0,
1580                'used_ram': 0,
1581                'used_rom': 0,
1582                'available_ram': 0,
1583                'available_rom': 0
1584            },
1585            'retries': 0,
1586            'testcases': []
1587        },
1588        'Platform 1/TestSuite 3': {
1589            'metrics': {
1590                'handler_time': 360.0,
1591                'used_ram': 4096,
1592                'used_rom': 1024,
1593                'available_ram': 12278,
1594                'available_rom': 1047552
1595            },
1596            'retries': 1,
1597            'testcases': {
1598                    'TS3.tc1': {
1599                        'status': 'error',
1600                        'reason': None,
1601                        'duration': 360.0,
1602                        'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...'
1603                    },
1604                    'TS3.tc2': {
1605                        'status': None,
1606                        'reason': None,
1607                        'duration': 0,
1608                        'output': ''
1609                    }
1610            }
1611        },
1612        'Platform 1/TestSuite 4': {
1613            'metrics': {
1614                'handler_time': 360.0,
1615                'used_ram': 4096,
1616                'used_rom': 1024,
1617                'available_ram': 12278,
1618                'available_rom': 1047552
1619            },
1620            'retries': 0,
1621            'testcases': {
1622                'TS4.tc1': {
1623                    'status': 'skipped',
1624                    'reason': 'Not in requested test list.',
1625                    'duration': 360.0,
1626                    'output': '[INFO] Parsing...'
1627                }
1628            }
1629        },
1630    }
1631
1632    for n, i in testplan.instances.items():
1633        assert expected_instances[n]['metrics'] == i.metrics
1634        assert expected_instances[n]['retries'] == i.retries
1635        for t in i.testcases:
1636            assert expected_instances[n]['testcases'][str(t)]['status'] == t.status
1637            assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason
1638            assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration
1639            assert expected_instances[n]['testcases'][str(t)]['output'] == t.output
1640
1641    check_runnable_mock.assert_called_with(mock.ANY, expected_tfilter, mock.ANY, mock.ANY)
1642
1643    expected_logs = [
1644        'loading TestSuite 1...',
1645        'loading TestSuite 2...',
1646        'loading TestSuite 3...',
1647        'loading TestSuite 4...',
1648    ]
1649    assert all([log in caplog.text for log in expected_logs])
1650
1651
1652def test_testplan_add_instances():
1653    testplan = TestPlan(env=mock.Mock())
1654    instance1 = mock.Mock()
1655    instance1.name = 'instance 1'
1656    instance2 = mock.Mock()
1657    instance2.name = 'instance 2'
1658    instance_list = [instance1, instance2]
1659
1660    testplan.add_instances(instance_list)
1661
1662    assert testplan.instances == {
1663        'instance 1': instance1,
1664        'instance 2': instance2,
1665    }
1666
1667
1668def test_testplan_get_testsuite():
1669    testplan = TestPlan(env=mock.Mock())
1670    testplan.testsuites = {
1671        'testsuite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1672        'testsuite1': mock.Mock(testcases=[mock.Mock()]),
1673        'testsuite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1674        'testsuite3': mock.Mock(testcases=[])
1675    }
1676    testplan.testsuites['testsuite0'].testcases[0].name = 'testcase name 0'
1677    testplan.testsuites['testsuite0'].testcases[1].name = 'testcase name 1'
1678    testplan.testsuites['testsuite1'].testcases[0].name = 'sample id'
1679    testplan.testsuites['testsuite2'].testcases[0].name = 'dummy id'
1680    testplan.testsuites['testsuite2'].testcases[1].name = 'sample id'
1681
1682    id = 'sample id'
1683
1684    res = testplan.get_testsuite(id)
1685
1686    assert len(res) == 2
1687    assert testplan.testsuites['testsuite1'] in res
1688    assert testplan.testsuites['testsuite2'] in res
1689
1690
1691def test_testplan_verify_platforms_existence(caplog):
1692    testplan = TestPlan(env=mock.Mock())
1693    testplan.platform_names = ['a platform', 'other platform']
1694
1695    platform_names = ['other platform', 'some platform']
1696    log_info = 'PLATFORM ERROR'
1697
1698    with pytest.raises(SystemExit) as se:
1699        testplan.verify_platforms_existence(platform_names, log_info)
1700
1701    assert str(se.value) == '2'
1702    assert 'PLATFORM ERROR - unrecognized platform - some platform'
1703
1704
1705TESTDATA_12 = [
1706    (True),
1707    (False)
1708]
1709
1710@pytest.mark.parametrize(
1711    'exists',
1712    TESTDATA_12,
1713    ids=['links dir exists', 'links dir does not exist']
1714)
1715def test_testplan_create_build_dir_links(exists):
1716    outdir = os.path.join('out', 'dir')
1717    instances_linked = []
1718
1719    def mock_link(links_dir_path, instance):
1720        assert links_dir_path == os.path.join(outdir, 'twister_links')
1721        instances_linked.append(instance)
1722
1723    instances = {
1724        'inst0': mock.Mock(status='passed'),
1725        'inst1': mock.Mock(status='skipped'),
1726        'inst2': mock.Mock(status='error'),
1727    }
1728    expected_instances = [instances['inst0'], instances['inst2']]
1729
1730    testplan = TestPlan(env=mock.Mock(outdir=outdir))
1731    testplan._create_build_dir_link = mock.Mock(side_effect=mock_link)
1732    testplan.instances = instances
1733
1734    with mock.patch('os.path.exists', return_value=exists), \
1735         mock.patch('os.mkdir', mock.Mock()) as mkdir_mock:
1736        testplan.create_build_dir_links()
1737
1738    if not exists:
1739        mkdir_mock.assert_called_once()
1740
1741    assert expected_instances == instances_linked
1742
1743
1744TESTDATA_13 = [
1745    ('nt'),
1746    ('Linux')
1747]
1748
1749@pytest.mark.parametrize(
1750    'os_name',
1751    TESTDATA_13,
1752)
1753def test_testplan_create_build_dir_link(os_name):
1754    def mock_makedirs(path, exist_ok=False):
1755        assert exist_ok
1756        assert path == instance_build_dir
1757
1758    def mock_symlink(source, target):
1759        assert source == instance_build_dir
1760        assert target == os.path.join('links', 'path', 'test_0')
1761
1762    def mock_call(cmd, shell=False):
1763        assert shell
1764        assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'),
1765                       instance_build_dir]
1766
1767    def mock_join(*paths):
1768        slash = "\\" if os.name == 'nt' else "/"
1769        return slash.join(paths)
1770
1771    with mock.patch('os.name', os_name), \
1772         mock.patch('os.symlink', side_effect=mock_symlink), \
1773         mock.patch('os.makedirs', side_effect=mock_makedirs), \
1774         mock.patch('subprocess.call', side_effect=mock_call), \
1775         mock.patch('os.path.join', side_effect=mock_join):
1776
1777        testplan = TestPlan(env=mock.Mock())
1778        links_dir_path = os.path.join('links', 'path')
1779        instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir')
1780        instance = mock.Mock(build_dir=instance_build_dir)
1781        testplan._create_build_dir_link(links_dir_path, instance)
1782
1783        assert instance.build_dir == os.path.join('links', 'path', 'test_0')
1784        assert testplan.link_dir_counter == 1
1785
1786
1787TESTDATA_14 = [
1788    ('bad platform', 'dummy reason', [],
1789     'dummy status', 'dummy reason'),
1790    ('good platform', 'quarantined', [],
1791     'error', 'quarantined but is one of the integration platforms'),
1792    ('good platform', 'dummy reason', [{'type': 'command line filter'}],
1793     'dummy status', 'dummy reason'),
1794    ('good platform', 'dummy reason', [{'type': 'Skip filter'}],
1795     'dummy status', 'dummy reason'),
1796    ('good platform', 'dummy reason', [{'type': 'platform key filter'}],
1797     'dummy status', 'dummy reason'),
1798    ('good platform', 'dummy reason', [{'type': 'Toolchain filter'}],
1799     'dummy status', 'dummy reason'),
1800    ('good platform', 'dummy reason', [{'type': 'Module filter'}],
1801     'dummy status', 'dummy reason'),
1802    ('good platform', 'dummy reason', [{'type': 'testsuite filter'}],
1803     'error', 'dummy reason but is one of the integration platforms'),
1804]
1805
1806@pytest.mark.parametrize(
1807    'platform_name, reason, filters,' \
1808    ' expected_status, expected_reason',
1809    TESTDATA_14,
1810    ids=['wrong platform', 'quarantined', 'command line filtered',
1811         'skip filtered', 'platform key filtered', 'toolchain filtered',
1812         'module filtered', 'skip to error change']
1813)
1814def test_change_skip_to_error_if_integration(
1815    platform_name,
1816    reason,
1817    filters,
1818    expected_status,
1819    expected_reason
1820):
1821    options = mock.Mock()
1822    platform = mock.Mock()
1823    platform.name = platform_name
1824    testsuite = mock.Mock(integration_platforms=['good platform', 'a platform'])
1825    instance = mock.Mock(
1826        testsuite=testsuite,
1827        platform=platform,
1828        filters=filters,
1829        status='dummy status',
1830        reason=reason
1831    )
1832
1833    change_skip_to_error_if_integration(options, instance)
1834
1835    assert instance.status == expected_status
1836    assert instance.reason == expected_reason
1837