1#!/usr/bin/env python3
2# Copyright (c) 2020 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5
6'''
7This test file contains testsuites for testsuite.py module of twister
8'''
9import sys
10import os
11import mock
12import pytest
13
14from contextlib import nullcontext
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from twisterlib.testplan import TestPlan, change_skip_to_error_if_integration
20from twisterlib.testinstance import TestInstance
21from twisterlib.testsuite import TestSuite
22from twisterlib.platform import Platform
23from twisterlib.quarantine import Quarantine
24from twisterlib.error import TwisterRuntimeError
25
26
27def test_testplan_add_testsuites_short(class_testplan):
28    """ Testing add_testcase function of Testsuite class in twister """
29    # Test 1: Check the list of testsuites after calling add testsuites function is as expected
30    class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
31    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
32    class_testplan.add_testsuites()
33
34    tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/'
35    expected_testsuites = ['test_b.check_1',
36                          'test_b.check_2',
37                          'test_c.check_1',
38                          'test_c.check_2',
39                          'test_a.check_1',
40                          'test_a.check_2',
41                          'test_d.check_1',
42                          'test_e.check_1',
43                          'sample_test.app',
44                          'test_config.main']
45    testsuite_list = []
46    for key in sorted(class_testplan.testsuites.keys()):
47        testsuite_list.append(os.path.basename(os.path.normpath(key)))
48    assert sorted(testsuite_list) == sorted(expected_testsuites)
49
50    # Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects
51    suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1')
52    assert suite.name == tests_rel_dir + 'test_a/test_a.check_1'
53    assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
54
55@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
56def test_add_configurations_short(test_data, class_env, board_root_dir):
57    """ Testing add_configurations function of TestPlan class in Twister
58    Test : Asserting on default platforms list
59    """
60    class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
61    plan = TestPlan(class_env)
62    plan.parse_configuration(config_file=class_env.test_config)
63    if board_root_dir == "board_config":
64        plan.add_configurations()
65        assert sorted(plan.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
66    elif board_root_dir == "board_config_file_not_exist":
67        plan.add_configurations()
68        assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
69
70
71def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
72    """ Testing get_all_testsuites function of TestPlan class in Twister """
73    plan = class_testplan
74    plan.testsuites = all_testsuites_dict
75    expected_tests = ['sample_test.app', 'test_a.check_1.1a',
76                      'test_a.check_1.1c',
77                      'test_a.check_1.2a', 'test_a.check_1.2b',
78                      'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a',
79                      'test_a.check_1.unit_1b', 'test_a.check_2.1a',
80                      'test_a.check_2.1c', 'test_a.check_2.2a',
81                      'test_a.check_2.2b', 'test_a.check_2.Unit_1c',
82                      'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b',
83                      'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
84                      'test_c.check_2', 'test_d.check_1.unit_1a',
85                      'test_d.check_1.unit_1b',
86                      'test_e.check_1.1a', 'test_e.check_1.1b',
87                      'test_config.main']
88
89    assert sorted(plan.get_all_tests()) == sorted(expected_tests)
90
91def test_get_platforms_short(class_testplan, platforms_list):
92    """ Testing get_platforms function of TestPlan class in Twister """
93    plan = class_testplan
94    plan.platforms = platforms_list
95    platform = plan.get_platform("demo_board_1")
96    assert isinstance(platform, Platform)
97    assert platform.name == "demo_board_1"
98
99TESTDATA_PART1 = [
100    ("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"),
101    ("platform_allow", ['demo_board_1'], None, None, "Not in testsuite platform allow list"),
102    ("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"),
103    ("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"),
104    ("arch_exclude", ['x86_demo'], None, None, "In test case arch exclude"),
105    ("arch_allow", ['arm'], None, None, "Not in test case arch allow list"),
106    ("skip", True, None, None, "Skip filter"),
107    ("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"),
108    ("min_flash", "2024", "flash", "1024", "Not enough FLASH"),
109    ("min_ram", "500", "ram", "256", "Not enough RAM"),
110    ("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"),
111    ("build_on_all", True, None, None, "Platform is excluded on command line."),
112    (None, None, "supported_toolchains", ['gcc'], "Not supported by the toolchain"),
113]
114
115
116@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
117                         TESTDATA_PART1)
118def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
119                             tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
120    """ Testing apply_filters function of TestPlan class in Twister
121    Part 1: Response of apply_filters function have
122            appropriate values according to the filters
123    """
124    plan = class_testplan
125    if tc_attribute is None and plat_attribute is None:
126        plan.apply_filters()
127
128    plan.platforms = platforms_list
129    plan.platform_names = [p.name for p in platforms_list]
130    plan.testsuites = all_testsuites_dict
131    for plat in plan.platforms:
132        if plat_attribute == "ignore_tags":
133            plat.ignore_tags = plat_value
134        if plat_attribute == "flash":
135            plat.flash = plat_value
136        if plat_attribute == "ram":
137            plat.ram = plat_value
138        if plat_attribute == "env":
139            plat.env = plat_value
140            plat.env_satisfied = False
141        if plat_attribute == "supported_toolchains":
142            plat.supported_toolchains = plat_value
143    for _, testcase in plan.testsuites.items():
144        if tc_attribute == "toolchain_allow":
145            testcase.toolchain_allow = tc_value
146        if tc_attribute == "platform_allow":
147            testcase.platform_allow = tc_value
148        if tc_attribute == "toolchain_exclude":
149            testcase.toolchain_exclude = tc_value
150        if tc_attribute == "platform_exclude":
151            testcase.platform_exclude = tc_value
152        if tc_attribute == "arch_exclude":
153            testcase.arch_exclude = tc_value
154        if tc_attribute == "arch_allow":
155            testcase.arch_allow = tc_value
156        if tc_attribute == "skip":
157            testcase.skip = tc_value
158        if tc_attribute == "tags":
159            testcase.tags = tc_value
160        if tc_attribute == "min_flash":
161            testcase.min_flash = tc_value
162        if tc_attribute == "min_ram":
163            testcase.min_ram = tc_value
164
165    if tc_attribute == "build_on_all":
166        for _, testcase in plan.testsuites.items():
167            testcase.build_on_all = tc_value
168        plan.apply_filters(exclude_platform=['demo_board_1'])
169    elif plat_attribute == "supported_toolchains":
170        plan.apply_filters(force_toolchain=False,
171                                                 exclude_platform=['demo_board_1'],
172                                                 platform=['demo_board_2'])
173    elif tc_attribute is None and plat_attribute is None:
174        plan.apply_filters()
175    else:
176        plan.apply_filters(exclude_platform=['demo_board_1'],
177                                                 platform=['demo_board_2'])
178
179    filtered_instances = list(filter(lambda item:  item.status == "filtered", plan.instances.values()))
180    for d in filtered_instances:
181        assert d.reason == expected_discards
182
183TESTDATA_PART2 = [
184    ("runnable", "True", "Not runnable on device"),
185    ("exclude_tag", ['test_a'], "Command line testsuite exclude filter"),
186    ("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"),
187    ("arch", ['arm_test'], "Command line testsuite arch filter"),
188    ("tag", ['test_d'], "Command line testsuite tag filter")
189    ]
190
191
192@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2)
193def test_apply_filters_part2(class_testplan, all_testsuites_dict,
194                             platforms_list, extra_filter, extra_filter_value, expected_discards):
195    """ Testing apply_filters function of TestPlan class in Twister
196    Part 2 : Response of apply_filters function (discard dictionary) have
197             appropriate values according to the filters
198    """
199
200    class_testplan.platforms = platforms_list
201    class_testplan.platform_names = [p.name for p in platforms_list]
202    class_testplan.testsuites = all_testsuites_dict
203    kwargs = {
204        extra_filter : extra_filter_value,
205        "exclude_platform" : [
206            'demo_board_1'
207            ],
208        "platform" : [
209            'demo_board_2'
210            ]
211        }
212    class_testplan.apply_filters(**kwargs)
213    filtered_instances = list(filter(lambda item:  item.status == "filtered", class_testplan.instances.values()))
214    for d in filtered_instances:
215        assert d.reason == expected_discards
216
217
218TESTDATA_PART3 = [
219    (20, 20, -1, 0),
220    (-2, -1, 10, 20),
221    (0, 0, 0, 0)
222    ]
223
224@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram",
225                         TESTDATA_PART3)
226def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list,
227                             tc_min_flash, plat_flash, tc_min_ram, plat_ram):
228    """ Testing apply_filters function of TestPlan class in Twister
229    Part 3 : Testing edge cases for ram and flash values of platforms & testsuites
230    """
231    class_testplan.platforms = platforms_list
232    class_testplan.platform_names = [p.name for p in platforms_list]
233    class_testplan.testsuites = all_testsuites_dict
234
235    for plat in class_testplan.platforms:
236        plat.flash = plat_flash
237        plat.ram = plat_ram
238    for _, testcase in class_testplan.testsuites.items():
239        testcase.min_ram = tc_min_ram
240        testcase.min_flash = tc_min_flash
241    class_testplan.apply_filters(exclude_platform=['demo_board_1'],
242                                             platform=['demo_board_2'])
243
244    filtered_instances = list(filter(lambda item:  item.status == "filtered", class_testplan.instances.values()))
245    assert not filtered_instances
246
247def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list):
248    """ Testing add_instances() function of TestPlan class in Twister
249    Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
250    Test 2: Values of 'instances' dictionary in Testsuite class are an
251	        instance of 'TestInstance' class
252    Test 3: Values of 'instances' dictionary have expected values.
253    """
254    class_env.outdir = tmp_path
255    plan = TestPlan(class_env)
256    plan.platforms = platforms_list
257    platform = plan.get_platform("demo_board_2")
258    instance_list = []
259    for _, testcase in all_testsuites_dict.items():
260        instance = TestInstance(testcase, platform, class_env.outdir)
261        instance_list.append(instance)
262    plan.add_instances(instance_list)
263    assert list(plan.instances.keys()) == \
264		   [platform.name + '/' + s for s in list(all_testsuites_dict.keys())]
265    assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
266    assert list(plan.instances.values()) == instance_list
267
268
269QUARANTINE_BASIC = {
270    'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3',
271    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3'
272}
273
274QUARANTINE_WITH_REGEXP = {
275    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86',
276    'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
277    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
278    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
279    'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86'
280}
281
282QUARANTINE_PLATFORM = {
283    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3',
284    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3',
285    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3',
286    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3',
287    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3',
288    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3',
289    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3',
290    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3',
291    'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3'
292}
293
294QUARANTINE_MULTIFILES = {
295    **QUARANTINE_BASIC,
296    **QUARANTINE_WITH_REGEXP
297}
298
299@pytest.mark.parametrize(
300    ("quarantine_files, quarantine_verify, expected_val"),
301    [
302        (['basic.yaml'], False, QUARANTINE_BASIC),
303        (['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP),
304        (['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP),
305        (['platform.yaml'], False, QUARANTINE_PLATFORM),
306        (['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES),
307        (['empty.yaml'], False, {})
308    ],
309    ids=[
310        'basic',
311        'with_regexp',
312        'quarantine_verify',
313        'platform',
314        'multifiles',
315        'empty'
316    ])
317def test_quarantine_short(class_testplan, platforms_list, test_data,
318                    quarantine_files, quarantine_verify, expected_val):
319    """ Testing quarantine feature in Twister
320    """
321    class_testplan.options.all = True
322    class_testplan.platforms = platforms_list
323    class_testplan.platform_names = [p.name for p in platforms_list]
324    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
325    class_testplan.add_testsuites()
326
327    quarantine_list = [
328        os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files
329    ]
330    class_testplan.quarantine = Quarantine(quarantine_list)
331    class_testplan.options.quarantine_verify = quarantine_verify
332    class_testplan.apply_filters()
333
334    for testname, instance in class_testplan.instances.items():
335        if quarantine_verify:
336            if testname in expected_val:
337                assert not instance.status
338            else:
339                assert instance.status == 'filtered'
340                assert instance.reason == "Not under quarantine"
341        else:
342            if testname in expected_val:
343                assert instance.status == 'filtered'
344                assert instance.reason == "Quarantine: " + expected_val[testname]
345            else:
346                assert not instance.status
347
348
349TESTDATA_PART4 = [
350    (os.path.join('test_d', 'test_d.check_1'), ['dummy'],
351     None, 'Snippet not supported'),
352    (os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'],
353     0, None),
354    (os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'],
355     2, 'Snippet not supported'),
356]
357
358@pytest.mark.parametrize(
359    'testpath, required_snippets, expected_filtered_len, expected_filtered_reason',
360    TESTDATA_PART4,
361    ids=['app', 'global', 'multiple']
362)
363def test_required_snippets_short(
364    class_testplan,
365    all_testsuites_dict,
366    platforms_list,
367    testpath,
368    required_snippets,
369    expected_filtered_len,
370    expected_filtered_reason
371):
372    """ Testing required_snippets function of TestPlan class in Twister """
373    plan = class_testplan
374    testpath = os.path.join('scripts', 'tests', 'twister', 'test_data',
375                            'testsuites', 'tests', testpath)
376    testsuite = class_testplan.testsuites.get(testpath)
377    plan.platforms = platforms_list
378    plan.platform_names = [p.name for p in platforms_list]
379    plan.testsuites = {testpath: testsuite}
380
381    print(plan.testsuites)
382
383    for _, testcase in plan.testsuites.items():
384        testcase.exclude_platform = []
385        testcase.required_snippets = required_snippets
386        testcase.build_on_all = True
387
388    plan.apply_filters()
389
390    filtered_instances = list(
391        filter(lambda item: item.status == "filtered", plan.instances.values())
392    )
393    if expected_filtered_len is not None:
394        assert len(filtered_instances) == expected_filtered_len
395    if expected_filtered_reason is not None:
396        for d in filtered_instances:
397            assert d.reason == expected_filtered_reason
398
399
400def test_testplan_get_level():
401    testplan = TestPlan(env=mock.Mock())
402    lvl1 = mock.Mock()
403    lvl1.name = 'a lvl'
404    lvl2 = mock.Mock()
405    lvl2.name = 'a lvl'
406    lvl3 = mock.Mock()
407    lvl3.name = 'other lvl'
408    testplan.levels.append(lvl1)
409    testplan.levels.append(lvl2)
410    testplan.levels.append(lvl3)
411
412    name = 'a lvl'
413
414    res = testplan.get_level(name)
415    assert res == lvl1
416
417    res = testplan.get_level(name)
418    assert res == lvl1
419
420    testplan.levels.remove(lvl1)
421    testplan.levels.remove(lvl2)
422
423    res = testplan.get_level(name)
424    assert res is None
425
426
427TESTDATA_1 = [
428    ('', {}),
429    (
430"""\
431levels:
432  - name: lvl1
433    adds:
434      - sc1
435      - sc2
436    inherits: []
437  - name: lvl2
438    adds:
439      - sc1-1
440      - sc1-2
441    inherits: [lvl1]
442""",
443    {
444        'lvl1': ['sc1', 'sc2'],
445        'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2']
446    }
447    ),
448]
449
450@pytest.mark.parametrize(
451    'config_yaml, expected_scenarios',
452    TESTDATA_1,
453    ids=['no config', 'valid config']
454)
455def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios):
456    testplan = TestPlan(env=mock.Mock())
457    testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2']
458
459    tmp_config_file = tmp_path / 'config_file.yaml'
460    if config_yaml:
461        tmp_config_file.write_text(config_yaml)
462
463    with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext():
464        testplan.parse_configuration(tmp_config_file)
465
466    if not testplan.levels:
467        assert expected_scenarios == {}
468    for level in testplan.levels:
469        assert sorted(level.scenarios) == sorted(expected_scenarios[level.name])
470
471
472TESTDATA_2 = [
473    ([], [], False),
474    (['ts1.tc3'], [], True),
475    (['ts2.tc2'], ['- ts2'], False),
476]
477
478@pytest.mark.parametrize(
479    'sub_tests, expected_outs, expect_error',
480    TESTDATA_2,
481    ids=['no subtests', 'subtests not found', 'valid subtests']
482)
483def test_testplan_find_subtests(
484    capfd,
485    sub_tests,
486    expected_outs,
487    expect_error
488):
489    testplan = TestPlan(env=mock.Mock())
490    testplan.options = mock.Mock(sub_test=sub_tests)
491    testplan.run_individual_testsuite = []
492    testplan.testsuites = {
493        'ts1': mock.Mock(
494            testcases=[
495                mock.Mock(),
496                mock.Mock(),
497            ]
498        ),
499        'ts2': mock.Mock(
500            testcases=[
501                mock.Mock(),
502                mock.Mock(),
503                mock.Mock(),
504            ]
505        )
506    }
507    testplan.testsuites['ts1'].name = 'ts1'
508    testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1'
509    testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2'
510    testplan.testsuites['ts2'].name = 'ts2'
511    testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1'
512    testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2'
513    testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3'
514
515    with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext():
516        testplan.find_subtests()
517
518    out, err = capfd.readouterr()
519    sys.stdout.write(out)
520    sys.stdout.write(err)
521
522    assert all([printout in out for printout in expected_outs])
523
524
525TESTDATA_3 = [
526    (0, 0, [], False, [], TwisterRuntimeError, []),
527    (1, 1, [], False, [], TwisterRuntimeError, []),
528    (1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']),
529#    (1, 0, ['qfile.yaml'], False, ['# empty'], None, ['Quarantine file qfile.yaml is empty']),
530    (1, 0, ['qfile.yaml'], False, ['- platforms:\n  - demo_board_3\n  comment: "board_3"'], None, []),
531]
532
533@pytest.mark.parametrize(
534    'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs',
535    TESTDATA_3,
536    ids=['no tests', 'load errors', 'quarantine verify without quarantine list',
537#         'empty quarantine file',
538         'valid quarantine file']
539)
540def test_testplan_discover(
541    tmp_path,
542    caplog,
543    added_testsuite_count,
544    load_errors,
545    ql,
546    qv,
547    ql_data,
548    exception,
549    expected_logs
550):
551    for qf, data in zip(ql, ql_data):
552        tmp_qf = tmp_path / qf
553        tmp_qf.write_text(data)
554
555    testplan = TestPlan(env=mock.Mock())
556    testplan.options = mock.Mock(
557        test='ts1',
558        quarantine_list=[tmp_path / qf for qf in ql],
559        quarantine_verify=qv,
560    )
561    testplan.testsuites = {
562        'ts1': mock.Mock(id=1),
563        'ts2': mock.Mock(id=2),
564    }
565    testplan.run_individual_testsuite = 'ts0'
566    testplan.load_errors = load_errors
567    testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count)
568    testplan.find_subtests = mock.Mock()
569    testplan.report_duplicates = mock.Mock()
570    testplan.parse_configuration = mock.Mock()
571    testplan.add_configurations = mock.Mock()
572
573    with pytest.raises(exception) if exception else nullcontext():
574        testplan.discover()
575
576    testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1')
577    assert all([log in caplog.text for log in expected_logs])
578
579
580TESTDATA_4 = [
581    (None, None, None, None, '00',
582     TwisterRuntimeError, [], []),
583    (None, True, None, None, '6/4',
584     TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []),
585    (None, None, 'load_tests.json', None, '0/4',
586     TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []),
587    ('suffix', None, None, True, '2/4',
588     None, set(['ts-p4', 'ts-p2', 'ts-p3']), [2, 4]),
589]
590
591@pytest.mark.parametrize(
592    'report_suffix, only_failed, load_tests, test_only, subset,' \
593    ' exception, expected_selected_platforms, expected_generate_subset_args',
594    TESTDATA_4,
595    ids=['apply_filters only', 'only failed', 'load tests', 'test only']
596)
597def test_testplan_load(
598    tmp_path,
599    report_suffix,
600    only_failed,
601    load_tests,
602    test_only,
603    subset,
604    exception,
605    expected_selected_platforms,
606    expected_generate_subset_args
607):
608    twister_json = """\
609{
610    "testsuites": [
611        {
612            "name": "ts1",
613            "platform": "t-p1",
614            "testcases": []
615        },
616        {
617            "name": "ts1",
618            "platform": "t-p2",
619            "testcases": []
620        },
621        {
622            "name": "ts2",
623            "platform": "t-p3",
624            "testcases": []
625        },
626        {
627            "name": "ts2",
628            "platform": "t-p4",
629            "testcases": []
630        }
631    ]
632}
633"""
634    twister_file = tmp_path / 'twister.json'
635    twister_file.write_text(twister_json)
636
637    twister_suffix_json = """\
638{
639    "testsuites": [
640        {
641            "name": "ts1",
642            "platform": "ts-p1",
643            "testcases": []
644        },
645        {
646            "name": "ts1",
647            "platform": "ts-p2",
648            "testcases": []
649        },
650        {
651            "name": "ts2",
652            "platform": "ts-p3",
653            "testcases": []
654        },
655        {
656            "name": "ts2",
657            "platform": "ts-p4",
658            "testcases": []
659        }
660    ]
661}
662"""
663    twister_suffix_file = tmp_path / 'twister_suffix.json'
664    twister_suffix_file.write_text(twister_suffix_json)
665
666    load_tests_json = """\
667{
668    "testsuites": [
669        {
670            "name": "ts1",
671            "platform": "lt-p1",
672            "testcases": []
673        },
674        {
675            "name": "ts1",
676            "platform": "lt-p2",
677            "testcases": []
678        },
679        {
680            "name": "ts2",
681            "platform": "lt-p3",
682            \"testcases": []
683        },
684        {
685            "name": "ts2",
686            "platform": "lt-p4",
687            "testcases": []
688        }
689    ]
690}
691"""
692    load_tests_file = tmp_path / 'load_tests.json'
693    load_tests_file.write_text(load_tests_json)
694
695    testplan = TestPlan(env=mock.Mock(outdir=tmp_path))
696    testplan.testsuites = {
697        'ts1': mock.Mock(testcases=[], extra_configs=[]),
698        'ts2': mock.Mock(testcases=[], extra_configs=[]),
699    }
700    testplan.testsuites['ts1'].name = 'ts1'
701    testplan.testsuites['ts2'].name = 'ts2'
702    testplan.options = mock.Mock(
703        outdir=tmp_path,
704        report_suffix=report_suffix,
705        only_failed=only_failed,
706        load_tests=tmp_path / load_tests if load_tests else None,
707        test_only=test_only,
708        exclude_platform=['t-p0', 't-p1',
709                          'ts-p0', 'ts-p1',
710                          'lt-p0', 'lt-p1'],
711        platform=['t-p1', 't-p2', 't-p3', 't-p4',
712                  'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4',
713                  'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'],
714        subset=subset
715    )
716    testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
717                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
718                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
719    testplan.platforms[0].name = 't-p1'
720    testplan.platforms[1].name = 't-p2'
721    testplan.platforms[2].name = 't-p3'
722    testplan.platforms[3].name = 't-p4'
723    testplan.platforms[4].name = 'ts-p1'
724    testplan.platforms[5].name = 'ts-p2'
725    testplan.platforms[6].name = 'ts-p3'
726    testplan.platforms[7].name = 'ts-p4'
727    testplan.platforms[8].name = 'lt-p1'
728    testplan.platforms[9].name = 'lt-p2'
729    testplan.platforms[10].name = 'lt-p3'
730    testplan.platforms[11].name = 'lt-p4'
731    testplan.generate_subset = mock.Mock()
732    testplan.apply_filters = mock.Mock()
733
734    with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
735         pytest.raises(exception) if exception else nullcontext():
736        testplan.load()
737
738    assert testplan.selected_platforms == expected_selected_platforms
739    if expected_generate_subset_args:
740        testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args)
741    else:
742        testplan.generate_subset.assert_not_called()
743
744
745TESTDATA_5 = [
746    (False, False, None, 1, 2,
747     ['plat1/testA', 'plat1/testB', 'plat1/testC',
748      'plat3/testA', 'plat3/testB', 'plat3/testC']),
749    (False, False, None, 1, 5,
750     ['plat1/testA',
751      'plat3/testA', 'plat3/testB', 'plat3/testC']),
752    (False, False, None, 2, 2,
753     ['plat2/testA', 'plat2/testB']),
754    (True, False, None, 1, 2,
755     ['plat1/testA', 'plat2/testA', 'plat1/testB',
756      'plat3/testA', 'plat3/testB', 'plat3/testC']),
757    (True, False, None, 2, 2,
758     ['plat2/testB', 'plat1/testC']),
759    (True, True, 123, 1, 2,
760     ['plat2/testA', 'plat2/testB', 'plat1/testC',
761      'plat3/testB', 'plat3/testA', 'plat3/testC']),
762    (True, True, 123, 2, 2,
763     ['plat1/testB', 'plat1/testA']),
764]
765
766@pytest.mark.parametrize(
767    'device_testing, shuffle, seed, subset, sets, expected_subset',
768    TESTDATA_5,
769    ids=['subset 1', 'subset 1 out of 5', 'subset 2',
770         'device testing, subset 1', 'device testing, subset 2',
771         'device testing, shuffle with seed, subset 1',
772         'device testing, shuffle with seed, subset 2']
773)
774def test_testplan_generate_subset(
775    device_testing,
776    shuffle,
777    seed,
778    subset,
779    sets,
780    expected_subset
781):
782    testplan = TestPlan(env=mock.Mock())
783    testplan.options = mock.Mock(
784        device_testing=device_testing,
785        shuffle_tests=shuffle,
786        shuffle_tests_seed=seed
787    )
788    testplan.instances = {
789        'plat1/testA': mock.Mock(status=None),
790        'plat1/testB': mock.Mock(status=None),
791        'plat1/testC': mock.Mock(status=None),
792        'plat2/testA': mock.Mock(status=None),
793        'plat2/testB': mock.Mock(status=None),
794        'plat3/testA': mock.Mock(status='skipped'),
795        'plat3/testB': mock.Mock(status='skipped'),
796        'plat3/testC': mock.Mock(status='error'),
797    }
798
799    testplan.generate_subset(subset, sets)
800
801    assert [instance for instance in testplan.instances.keys()] == \
802           expected_subset
803
804
805def test_testplan_handle_modules():
806    testplan = TestPlan(env=mock.Mock())
807
808    modules = [mock.Mock(meta={'name': 'name1'}),
809               mock.Mock(meta={'name': 'name2'})]
810
811    with mock.patch('twisterlib.testplan.parse_modules', return_value=modules):
812        testplan.handle_modules()
813
814    assert testplan.modules == ['name1', 'name2']
815
816
817TESTDATA_6 = [
818    (True, False, False, 0, 'report_test_tree'),
819    (True, True, False, 0, 'report_test_tree'),
820    (True, False, True, 0, 'report_test_tree'),
821    (True, True, True, 0, 'report_test_tree'),
822    (False, True, False, 0, 'report_test_list'),
823    (False, True, True, 0, 'report_test_list'),
824    (False, False, True, 0, 'report_tag_list'),
825    (False, False, False, 1, None),
826]
827
828@pytest.mark.parametrize(
829    'test_tree, list_tests, list_tags, expected_res, expected_method',
830    TESTDATA_6,
831    ids=['test tree', 'test tree + test list', 'test tree + tag list',
832         'test tree + test list + tag list', 'test list',
833         'test list + tag list', 'tag list', 'no report']
834)
835def test_testplan_report(
836    test_tree,
837    list_tests,
838    list_tags,
839    expected_res,
840    expected_method
841):
842    testplan = TestPlan(env=mock.Mock())
843    testplan.report_test_tree = mock.Mock()
844    testplan.report_test_list = mock.Mock()
845    testplan.report_tag_list = mock.Mock()
846
847    testplan.options = mock.Mock(
848        test_tree=test_tree,
849        list_tests=list_tests,
850        list_tags=list_tags,
851    )
852
853    res = testplan.report()
854
855    assert res == expected_res
856
857    methods = ['report_test_tree', 'report_test_list', 'report_tag_list']
858    if expected_method:
859        methods.remove(expected_method)
860        getattr(testplan, expected_method).assert_called_once()
861    for method in methods:
862        getattr(testplan, method).assert_not_called()
863
864
865TESTDATA_7 = [
866    (
867        [
868            mock.Mock(
869                yamlfile='a.yaml',
870                scenarios=['scenario1', 'scenario2']
871            ),
872            mock.Mock(
873                yamlfile='b.yaml',
874                scenarios=['scenario1']
875            )
876        ],
877        TwisterRuntimeError,
878        'Duplicated test scenarios found:\n' \
879        '- scenario1 found in:\n' \
880        '  - a.yaml\n' \
881        '  - b.yaml\n',
882        []
883    ),
884    (
885        [
886            mock.Mock(
887                yamlfile='a.yaml',
888                scenarios=['scenario.a.1', 'scenario.a.2']
889            ),
890            mock.Mock(
891                yamlfile='b.yaml',
892                scenarios=['scenario.b.1']
893            )
894        ],
895        None,
896        None,
897        ['No duplicates found.']
898    ),
899]
900
901@pytest.mark.parametrize(
902    'testsuites, expected_error, error_msg, expected_logs',
903    TESTDATA_7,
904    ids=['a duplicate', 'no duplicates']
905)
906def test_testplan_report_duplicates(
907    capfd,
908    caplog,
909    testsuites,
910    expected_error,
911    error_msg,
912    expected_logs
913):
914    def mock_get(name):
915        return list(filter(lambda x: name in x.scenarios, testsuites))
916
917    testplan = TestPlan(env=mock.Mock())
918    testplan.scenarios = [scenario for testsuite in testsuites \
919                                   for scenario in testsuite.scenarios]
920    testplan.get_testsuite = mock.Mock(side_effect=mock_get)
921
922    with pytest.raises(expected_error) if expected_error is not None else \
923         nullcontext() as err:
924        testplan.report_duplicates()
925
926    if expected_error:
927        assert str(err._excinfo[1]) == error_msg
928
929    assert all([log in caplog.text for log in expected_logs])
930
931
932def test_testplan_report_tag_list(capfd):
933    testplan = TestPlan(env=mock.Mock())
934    testplan.testsuites = {
935        'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])),
936        'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])),
937        'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])),
938        'testsuite3': mock.Mock(tags=set(['tag']))
939    }
940
941    testplan.report_tag_list()
942
943    out,err = capfd.readouterr()
944    sys.stdout.write(out)
945    sys.stderr.write(err)
946
947    assert '- tag' in out
948    assert '- tag1' in out
949    assert '- tag2' in out
950    assert '- tag3' in out
951
952
953def test_testplan_report_test_tree(capfd):
954    testplan = TestPlan(env=mock.Mock())
955    testplan.get_all_tests = mock.Mock(
956        return_value=['1.dummy.case.1', '1.dummy.case.2',
957                      '2.dummy.case.1', '2.dummy.case.2',
958                      '3.dummy.case.1', '3.dummy.case.2',
959                      '4.dummy.case.1', '4.dummy.case.2',
960                      '5.dummy.case.1', '5.dummy.case.2',
961                      'sample.group1.case1', 'sample.group1.case2',
962                      'sample.group2.case', 'sample.group3.case1',
963                      'sample.group3.case2', 'sample.group3.case3']
964    )
965
966    testplan.report_test_tree()
967
968    out,err = capfd.readouterr()
969    sys.stdout.write(out)
970    sys.stderr.write(err)
971
972    expected = """
973Testsuite
974├── Samples
975│   ├── group1
976│   │   ├── sample.group1.case1
977│   │   └── sample.group1.case2
978│   ├── group2
979│   │   └── sample.group2.case
980│   └── group3
981│       ├── sample.group3.case1
982│       ├── sample.group3.case2
983│       └── sample.group3.case3
984└── Tests
985    ├── 1
986    │   └── dummy
987    │       ├── 1.dummy.case.1
988    │       └── 1.dummy.case.2
989    ├── 2
990    │   └── dummy
991    │       ├── 2.dummy.case.1
992    │       └── 2.dummy.case.2
993    ├── 3
994    │   └── dummy
995    │       ├── 3.dummy.case.1
996    │       └── 3.dummy.case.2
997    ├── 4
998    │   └── dummy
999    │       ├── 4.dummy.case.1
1000    │       └── 4.dummy.case.2
1001    └── 5
1002        └── dummy
1003            ├── 5.dummy.case.1
1004            └── 5.dummy.case.2
1005"""
1006    expected = expected[1:]
1007
1008    assert expected in out
1009
1010
1011def test_testplan_report_test_list(capfd):
1012    testplan = TestPlan(env=mock.Mock())
1013    testplan.get_all_tests = mock.Mock(
1014        return_value=['4.dummy.case.1', '4.dummy.case.2',
1015                      '3.dummy.case.2', '2.dummy.case.2',
1016                      '1.dummy.case.1', '1.dummy.case.2',
1017                      '3.dummy.case.1', '2.dummy.case.1',
1018                      '5.dummy.case.1', '5.dummy.case.2']
1019    )
1020
1021    testplan.report_test_list()
1022
1023    out,err = capfd.readouterr()
1024    sys.stdout.write(out)
1025    sys.stderr.write(err)
1026
1027    assert ' - 1.dummy.case.1\n' \
1028           ' - 1.dummy.case.2\n' \
1029           ' - 2.dummy.case.1\n' \
1030           ' - 2.dummy.case.2\n' \
1031           ' - 3.dummy.case.1\n' \
1032           ' - 3.dummy.case.2\n' \
1033           ' - 4.dummy.case.1\n' \
1034           ' - 4.dummy.case.2\n' \
1035           ' - 5.dummy.case.1\n' \
1036           ' - 5.dummy.case.2\n' \
1037           '10 total.' in out
1038
1039
1040def test_testplan_config(caplog):
1041    testplan = TestPlan(env=mock.Mock())
1042    testplan.coverage_platform = 'dummy cov'
1043
1044    testplan.config()
1045
1046    assert 'coverage platform: dummy cov' in caplog.text
1047
1048
1049def test_testplan_info(capfd):
1050    TestPlan.info('dummy text')
1051
1052    out, err = capfd.readouterr()
1053    sys.stdout.write(out)
1054    sys.stderr.write(err)
1055
1056    assert 'dummy text\n' in out
1057
1058
1059TESTDATA_8 = [
1060    (False, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p2']),
1061    (False, True, None, None),
1062    (True, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p3']),
1063]
1064
1065@pytest.mark.parametrize(
1066    'override_default_platforms, create_duplicate, expected_platform_names, expected_defaults',
1067    TESTDATA_8,
1068    ids=['no override defaults', 'create duplicate', 'override defaults']
1069)
1070def test_testplan_add_configurations(
1071    tmp_path,
1072    override_default_platforms,
1073    create_duplicate,
1074    expected_platform_names,
1075    expected_defaults
1076):
1077    # tmp_path
1078    # └ boards  <- board root
1079    #   ├ arch1
1080    #   │ ├ p1
1081    #   │ | ├ p1e1.yaml
1082    #   │ | └ p1e2.yaml
1083    #   │ └ p2
1084    #   │   ├ p2.yaml
1085    #   │   └ p2-1.yaml <- duplicate
1086    #   │   └ p2-2.yaml <- load error
1087    #   └ arch2
1088    #     └ p3
1089    #       ├ p3.yaml
1090    #       └ p3_B.conf
1091
1092    tmp_board_root_dir = tmp_path / 'boards'
1093    tmp_board_root_dir.mkdir()
1094
1095    tmp_arch1_dir = tmp_board_root_dir / 'arch1'
1096    tmp_arch1_dir.mkdir()
1097
1098    tmp_p1_dir = tmp_arch1_dir / 'p1'
1099    tmp_p1_dir.mkdir()
1100
1101    p1e1_yaml = """\
1102identifier: p1e1
1103name: Platform 1 Edition 1
1104type: native
1105arch: arch1
1106vendor: vendor1
1107toolchain:
1108  - zephyr
1109twister: False
1110"""
1111    p1e1_yamlfile = tmp_p1_dir / 'p1e1.yaml'
1112    p1e1_yamlfile.write_text(p1e1_yaml)
1113
1114    p1e2_yaml = """\
1115identifier: p1e2
1116name: Platform 1 Edition 2
1117type: native
1118arch: arch1
1119vendor: vendor1
1120toolchain:
1121  - zephyr
1122"""
1123    p1e2_yamlfile = tmp_p1_dir / 'p1e2.yaml'
1124    p1e2_yamlfile.write_text(p1e2_yaml)
1125
1126    tmp_p2_dir = tmp_arch1_dir / 'p2'
1127    tmp_p2_dir.mkdir()
1128
1129    p2_yaml = """\
1130identifier: p2
1131name: Platform 2
1132type: sim
1133arch: arch1
1134vendor: vendor2
1135toolchain:
1136  - zephyr
1137testing:
1138  default: True
1139"""
1140    p2_yamlfile = tmp_p2_dir / 'p2.yaml'
1141    p2_yamlfile.write_text(p2_yaml)
1142
1143    if create_duplicate:
1144        p2_yamlfile = tmp_p2_dir / 'p2-1.yaml'
1145        p2_yamlfile.write_text(p2_yaml)
1146
1147    p2_2_yaml = """\
1148testing:
1149  ć#@%!#!#^#@%@:1.0
1150identifier: p2_2
1151name: Platform 2 2
1152type: sim
1153arch: arch1
1154vendor: vendor2
1155toolchain:
1156  - zephyr
1157"""
1158    p2_2_yamlfile = tmp_p2_dir / 'p2-2.yaml'
1159    p2_2_yamlfile.write_text(p2_2_yaml)
1160
1161    tmp_arch2_dir = tmp_board_root_dir / 'arch2'
1162    tmp_arch2_dir.mkdir()
1163
1164    tmp_p3_dir = tmp_arch2_dir / 'p3'
1165    tmp_p3_dir.mkdir()
1166
1167    p3_yaml = """\
1168identifier: p3
1169name: Platform 3
1170type: unit
1171arch: arch2
1172vendor: vendor3
1173toolchain:
1174  - zephyr
1175"""
1176    p3_yamlfile = tmp_p3_dir / 'p3.yaml'
1177    p3_yamlfile.write_text(p3_yaml)
1178    p3_yamlfile = tmp_p3_dir / 'p3_B.conf'
1179    p3_yamlfile.write_text('')
1180
1181    env = mock.Mock(board_roots=[tmp_board_root_dir])
1182
1183    testplan = TestPlan(env=env)
1184
1185    testplan.test_config = {
1186        'platforms': {
1187            'override_default_platforms': override_default_platforms,
1188            'default_platforms': ['p3', 'p1e1']
1189        }
1190    }
1191
1192    with pytest.raises(Exception) if create_duplicate else nullcontext():
1193        testplan.add_configurations()
1194
1195    if expected_defaults is not None:
1196        assert sorted(expected_defaults) == sorted(testplan.default_platforms)
1197    if expected_platform_names is not None:
1198        assert sorted(expected_platform_names) == sorted(testplan.platform_names)
1199
1200
1201def test_testplan_get_all_tests():
1202    testplan = TestPlan(env=mock.Mock())
1203    tc1 = mock.Mock()
1204    tc1.name = 'tc1'
1205    tc2 = mock.Mock()
1206    tc2.name = 'tc2'
1207    tc3 = mock.Mock()
1208    tc3.name = 'tc3'
1209    tc4 = mock.Mock()
1210    tc4.name = 'tc4'
1211    tc5 = mock.Mock()
1212    tc5.name = 'tc5'
1213    ts1 = mock.Mock(testcases=[tc1, tc2])
1214    ts2 = mock.Mock(testcases=[tc3, tc4, tc5])
1215    testplan.testsuites = {
1216        'ts1': ts1,
1217        'ts2': ts2
1218    }
1219
1220    res = testplan.get_all_tests()
1221
1222    assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5']
1223
1224
1225TESTDATA_9 = [
1226    ([], False, 7),
1227    ([], True, 5),
1228    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, 3),
1229    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, 0),
1230]
1231
1232@pytest.mark.parametrize(
1233    'testsuite_filter, use_alt_root, expected_suite_count',
1234    TESTDATA_9,
1235    ids=['no testsuite filter', 'no testsuite filter, alt root',
1236         'testsuite filter', 'testsuite filter, alt root']
1237)
1238def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, expected_suite_count):
1239    # tmp_path
1240    # ├ tests  <- test root
1241    # │ ├ good_test
1242    # │ │ └ testcase.yaml
1243    # │ ├ wrong_test
1244    # │ │ └ testcase.yaml
1245    # │ ├ good_sample
1246    # │ │ └ sample.yaml
1247    # │ └ others
1248    # │   └ other.txt
1249    # └ other_tests  <- alternate test root
1250    #   └ good_test
1251    #     └ testcase.yaml
1252    tmp_test_root_dir = tmp_path / 'tests'
1253    tmp_test_root_dir.mkdir()
1254
1255    tmp_good_test_dir = tmp_test_root_dir / 'good_test'
1256    tmp_good_test_dir.mkdir()
1257    testcase_yaml_1 = """\
1258tests:
1259  dummy.common.1:
1260    build_on_all: true
1261  dummy.common.2:
1262    build_on_all: true
1263  dummy.common.3:
1264    build_on_all: true
1265  dummy.special:
1266    build_on_all: false
1267"""
1268    testfile_1 = tmp_good_test_dir / 'testcase.yaml'
1269    testfile_1.write_text(testcase_yaml_1)
1270
1271    tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test'
1272    tmp_bad_test_dir.mkdir()
1273    testcase_yaml_2 = """\
1274tests:
1275 wrong:
1276  yaml: {]}
1277"""
1278    testfile_2 = tmp_bad_test_dir / 'testcase.yaml'
1279    testfile_2.write_text(testcase_yaml_2)
1280
1281    tmp_good_sample_dir = tmp_test_root_dir / 'good_sample'
1282    tmp_good_sample_dir.mkdir()
1283    samplecase_yaml_1 = """\
1284tests:
1285  sample.dummy.common.1:
1286    tags:
1287    - samples
1288  sample.dummy.common.2:
1289    tags:
1290    - samples
1291  sample.dummy.special.1:
1292    tags:
1293    - samples
1294"""
1295    samplefile_1 = tmp_good_sample_dir / 'sample.yaml'
1296    samplefile_1.write_text(samplecase_yaml_1)
1297
1298    tmp_other_dir = tmp_test_root_dir / 'others'
1299    tmp_other_dir.mkdir()
1300    _ = tmp_other_dir / 'other.txt'
1301
1302    tmp_alt_test_root_dir = tmp_path / 'other_tests'
1303    tmp_alt_test_root_dir.mkdir()
1304
1305    tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test'
1306    tmp_alt_good_test_dir.mkdir()
1307    testcase_yaml_3 = """\
1308tests:
1309  dummy.alt.1:
1310    build_on_all: true
1311  dummy.alt.2:
1312    build_on_all: true
1313"""
1314    testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml'
1315    testfile_3.write_text(testcase_yaml_3)
1316
1317    env = mock.Mock(
1318        test_roots=[tmp_test_root_dir],
1319        alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else []
1320    )
1321
1322    testplan = TestPlan(env=env)
1323
1324    res = testplan.add_testsuites(testsuite_filter)
1325
1326    assert res == expected_suite_count
1327
1328
1329def test_testplan_str():
1330    testplan = TestPlan(env=mock.Mock())
1331    testplan.name = 'my name'
1332
1333    res = testplan.__str__()
1334
1335    assert res == 'my name'
1336
1337
1338TESTDATA_10 = [
1339    ('a platform', True),
1340    ('other platform', False),
1341]
1342
1343@pytest.mark.parametrize(
1344    'name, expect_found',
1345    TESTDATA_10,
1346    ids=['platform exists', 'no platform']
1347)
1348def test_testplan_get_platform(name, expect_found):
1349    testplan = TestPlan(env=mock.Mock())
1350    p1 = mock.Mock()
1351    p1.name = 'some platform'
1352    p2 = mock.Mock()
1353    p2.name = 'a platform'
1354    testplan.platforms = [p1, p2]
1355
1356    res = testplan.get_platform(name)
1357
1358    if expect_found:
1359        assert res.name == name
1360    else:
1361        assert res is None
1362
1363
1364TESTDATA_11 = [
1365    (True, 'runnable'),
1366    (False, 'buildable'),
1367]
1368
1369@pytest.mark.parametrize(
1370    'device_testing, expected_tfilter',
1371    TESTDATA_11,
1372    ids=['device testing', 'no device testing']
1373)
1374def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
1375    def get_platform(name):
1376        p = mock.Mock()
1377        p.name = name
1378        return p
1379
1380    ts1tc1 = mock.Mock()
1381    ts1tc1.name = 'TS1.tc1'
1382    ts1 = mock.Mock(testcases=[ts1tc1])
1383    ts1.name = 'TestSuite 1'
1384    ts2 = mock.Mock(testcases=[])
1385    ts2.name = 'TestSuite 2'
1386    ts3tc1 = mock.Mock()
1387    ts3tc1.name = 'TS3.tc1'
1388    ts3tc2 = mock.Mock()
1389    ts3tc2.name = 'TS3.tc2'
1390    ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2])
1391    ts3.name = 'TestSuite 3'
1392    ts4tc1 = mock.Mock()
1393    ts4tc1.name = 'TS4.tc1'
1394    ts4 = mock.Mock(testcases=[ts4tc1])
1395    ts4.name = 'TestSuite 4'
1396    ts5 = mock.Mock(testcases=[])
1397    ts5.name = 'TestSuite 5'
1398
1399    testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir')))
1400    testplan.options = mock.Mock(device_testing=device_testing, test_only=True)
1401    testplan.testsuites = {
1402        'TestSuite 1': ts1,
1403        'TestSuite 2': ts2,
1404        'TestSuite 3': ts3,
1405        'TestSuite 4': ts4,
1406        'TestSuite 5': ts5
1407    }
1408
1409    testplan.get_platform = mock.Mock(side_effect=get_platform)
1410
1411    testplan_data = """\
1412{
1413    "testsuites": [
1414        {
1415            "name": "TestSuite 1",
1416            "platform": "Platform 1",
1417            "run_id": 1,
1418            "execution_time": 60.00,
1419            "used_ram": 4096,
1420            "available_ram": 12278,
1421            "used_rom": 1024,
1422            "available_rom": 1047552,
1423            "status": "passed",
1424            "reason": "OK",
1425            "testcases": [
1426                {
1427                    "identifier": "TS1.tc1",
1428                    "status": "passed",
1429                    "reason": "passed",
1430                    "execution_time": 60.00,
1431                    "log": ""
1432                }
1433            ]
1434        },
1435        {
1436            "name": "TestSuite 2",
1437            "platform": "Platform 1"
1438        },
1439        {
1440            "name": "TestSuite 3",
1441            "platform": "Platform 1",
1442            "run_id": 1,
1443            "execution_time": 360.00,
1444            "used_ram": 4096,
1445            "available_ram": 12278,
1446            "used_rom": 1024,
1447            "available_rom": 1047552,
1448            "status": "error",
1449            "reason": "File Not Found Error",
1450            "testcases": [
1451                {
1452                    "identifier": "TS3.tc1",
1453                    "status": "error",
1454                    "reason": "File Not Found Error.",
1455                    "execution_time": 360.00,
1456                    "log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..."
1457                },
1458                {
1459                    "identifier": "TS3.tc2"
1460                }
1461            ]
1462        },
1463        {
1464            "name": "TestSuite 4",
1465            "platform": "Platform 1",
1466            "execution_time": 360.00,
1467            "used_ram": 4096,
1468            "available_ram": 12278,
1469            "used_rom": 1024,
1470            "available_rom": 1047552,
1471            "status": "skipped",
1472            "reason": "Not in requested test list.",
1473            "testcases": [
1474                {
1475                    "identifier": "TS4.tc1",
1476                    "status": "skipped",
1477                    "reason": "Not in requested test list.",
1478                    "execution_time": 360.00,
1479                    "log": "[INFO] Parsing..."
1480                },
1481                {
1482                    "identifier": "TS3.tc2"
1483                }
1484            ]
1485        },
1486        {
1487            "name": "TestSuite 5",
1488            "platform": "Platform 2"
1489        }
1490    ]
1491}
1492"""
1493
1494    filter_platform = ['Platform 1']
1495
1496    check_runnable_mock = mock.Mock(return_value=True)
1497
1498    with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \
1499         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \
1500         mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()):
1501        testplan.load_from_file('dummy.yaml', filter_platform)
1502
1503    expected_instances = {
1504        'Platform 1/TestSuite 1': {
1505            'metrics': {
1506                'handler_time': 60.0,
1507                'used_ram': 4096,
1508                'used_rom': 1024,
1509                'available_ram': 12278,
1510                'available_rom': 1047552
1511            },
1512            'retries': 0,
1513            'testcases': {
1514                'TS1.tc1': {
1515                    'status': 'passed',
1516                    'reason': None,
1517                    'duration': 60.0,
1518                    'output': ''
1519                }
1520            }
1521        },
1522        'Platform 1/TestSuite 2': {
1523            'metrics': {
1524                'handler_time': 0,
1525                'used_ram': 0,
1526                'used_rom': 0,
1527                'available_ram': 0,
1528                'available_rom': 0
1529            },
1530            'retries': 0,
1531            'testcases': []
1532        },
1533        'Platform 1/TestSuite 3': {
1534            'metrics': {
1535                'handler_time': 360.0,
1536                'used_ram': 4096,
1537                'used_rom': 1024,
1538                'available_ram': 12278,
1539                'available_rom': 1047552
1540            },
1541            'retries': 1,
1542            'testcases': {
1543                    'TS3.tc1': {
1544                        'status': 'error',
1545                        'reason': None,
1546                        'duration': 360.0,
1547                        'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...'
1548                    },
1549                    'TS3.tc2': {
1550                        'status': None,
1551                        'reason': None,
1552                        'duration': 0,
1553                        'output': ''
1554                    }
1555            }
1556        },
1557        'Platform 1/TestSuite 4': {
1558            'metrics': {
1559                'handler_time': 360.0,
1560                'used_ram': 4096,
1561                'used_rom': 1024,
1562                'available_ram': 12278,
1563                'available_rom': 1047552
1564            },
1565            'retries': 0,
1566            'testcases': {
1567                'TS4.tc1': {
1568                    'status': 'skipped',
1569                    'reason': 'Not in requested test list.',
1570                    'duration': 360.0,
1571                    'output': '[INFO] Parsing...'
1572                }
1573            }
1574        },
1575    }
1576
1577    for n, i in testplan.instances.items():
1578        assert expected_instances[n]['metrics'] == i.metrics
1579        assert expected_instances[n]['retries'] == i.retries
1580        for t in i.testcases:
1581            assert expected_instances[n]['testcases'][str(t)]['status'] == t.status
1582            assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason
1583            assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration
1584            assert expected_instances[n]['testcases'][str(t)]['output'] == t.output
1585
1586    check_runnable_mock.assert_called_with(mock.ANY, expected_tfilter, mock.ANY, mock.ANY)
1587
1588    expected_logs = [
1589        'loading TestSuite 1...',
1590        'loading TestSuite 2...',
1591        'loading TestSuite 3...',
1592        'loading TestSuite 4...',
1593    ]
1594    assert all([log in caplog.text for log in expected_logs])
1595
1596
1597def test_testplan_add_instances():
1598    testplan = TestPlan(env=mock.Mock())
1599    instance1 = mock.Mock()
1600    instance1.name = 'instance 1'
1601    instance2 = mock.Mock()
1602    instance2.name = 'instance 2'
1603    instance_list = [instance1, instance2]
1604
1605    testplan.add_instances(instance_list)
1606
1607    assert testplan.instances == {
1608        'instance 1': instance1,
1609        'instance 2': instance2,
1610    }
1611
1612
1613def test_testplan_get_testsuite():
1614    testplan = TestPlan(env=mock.Mock())
1615    testplan.testsuites = {
1616        'testsuite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1617        'testsuite1': mock.Mock(testcases=[mock.Mock()]),
1618        'testsuite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1619        'testsuite3': mock.Mock(testcases=[])
1620    }
1621    testplan.testsuites['testsuite0'].testcases[0].name = 'testcase name 0'
1622    testplan.testsuites['testsuite0'].testcases[1].name = 'testcase name 1'
1623    testplan.testsuites['testsuite1'].testcases[0].name = 'sample id'
1624    testplan.testsuites['testsuite2'].testcases[0].name = 'dummy id'
1625    testplan.testsuites['testsuite2'].testcases[1].name = 'sample id'
1626
1627    id = 'sample id'
1628
1629    res = testplan.get_testsuite(id)
1630
1631    assert len(res) == 2
1632    assert testplan.testsuites['testsuite1'] in res
1633    assert testplan.testsuites['testsuite2'] in res
1634
1635
1636def test_testplan_verify_platforms_existence(caplog):
1637    testplan = TestPlan(env=mock.Mock())
1638    testplan.platform_names = ['a platform', 'other platform']
1639
1640    platform_names = ['other platform', 'some platform']
1641    log_info = 'PLATFORM ERROR'
1642
1643    with pytest.raises(SystemExit) as se:
1644        testplan.verify_platforms_existence(platform_names, log_info)
1645
1646    assert str(se.value) == '2'
1647    assert 'PLATFORM ERROR - unrecognized platform - some platform'
1648
1649
1650TESTDATA_12 = [
1651    (True),
1652    (False)
1653]
1654
1655@pytest.mark.parametrize(
1656    'exists',
1657    TESTDATA_12,
1658    ids=['links dir exists', 'links dir does not exist']
1659)
1660def test_testplan_create_build_dir_links(exists):
1661    outdir = os.path.join('out', 'dir')
1662    instances_linked = []
1663
1664    def mock_link(links_dir_path, instance):
1665        assert links_dir_path == os.path.join(outdir, 'twister_links')
1666        instances_linked.append(instance)
1667
1668    instances = {
1669        'inst0': mock.Mock(status='passed'),
1670        'inst1': mock.Mock(status='skipped'),
1671        'inst2': mock.Mock(status='error'),
1672    }
1673    expected_instances = [instances['inst0'], instances['inst2']]
1674
1675    testplan = TestPlan(env=mock.Mock(outdir=outdir))
1676    testplan._create_build_dir_link = mock.Mock(side_effect=mock_link)
1677    testplan.instances = instances
1678
1679    with mock.patch('os.path.exists', return_value=exists), \
1680         mock.patch('os.mkdir', mock.Mock()) as mkdir_mock:
1681        testplan.create_build_dir_links()
1682
1683    if not exists:
1684        mkdir_mock.assert_called_once()
1685
1686    assert expected_instances == instances_linked
1687
1688
1689TESTDATA_13 = [
1690    ('nt'),
1691    ('Linux')
1692]
1693
1694@pytest.mark.parametrize(
1695    'os_name',
1696    TESTDATA_13,
1697)
1698def test_testplan_create_build_dir_link(os_name):
1699    def mock_makedirs(path, exist_ok=False):
1700        assert exist_ok
1701        assert path == instance_build_dir
1702
1703    def mock_symlink(source, target):
1704        assert source == instance_build_dir
1705        assert target == os.path.join('links', 'path', 'test_0')
1706
1707    def mock_call(cmd, shell=False):
1708        assert shell
1709        assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'),
1710                       instance_build_dir]
1711
1712    def mock_join(*paths):
1713        slash = "\\" if os.name == 'nt' else "/"
1714        return slash.join(paths)
1715
1716    with mock.patch('os.name', os_name), \
1717         mock.patch('os.symlink', side_effect=mock_symlink), \
1718         mock.patch('os.makedirs', side_effect=mock_makedirs), \
1719         mock.patch('subprocess.call', side_effect=mock_call), \
1720         mock.patch('os.path.join', side_effect=mock_join):
1721
1722        testplan = TestPlan(env=mock.Mock())
1723        links_dir_path = os.path.join('links', 'path')
1724        instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir')
1725        instance = mock.Mock(build_dir=instance_build_dir)
1726        testplan._create_build_dir_link(links_dir_path, instance)
1727
1728        assert instance.build_dir == os.path.join('links', 'path', 'test_0')
1729        assert testplan.link_dir_counter == 1
1730
1731
1732TESTDATA_14 = [
1733    ('bad platform', 'dummy reason', [],
1734     'dummy status', 'dummy reason'),
1735    ('good platform', 'quarantined', [],
1736     'error', 'quarantined but is one of the integration platforms'),
1737    ('good platform', 'dummy reason', [{'type': 'command line filter'}],
1738     'dummy status', 'dummy reason'),
1739    ('good platform', 'dummy reason', [{'type': 'Skip filter'}],
1740     'dummy status', 'dummy reason'),
1741    ('good platform', 'dummy reason', [{'type': 'platform key filter'}],
1742     'dummy status', 'dummy reason'),
1743    ('good platform', 'dummy reason', [{'type': 'Toolchain filter'}],
1744     'dummy status', 'dummy reason'),
1745    ('good platform', 'dummy reason', [{'type': 'Module filter'}],
1746     'dummy status', 'dummy reason'),
1747    ('good platform', 'dummy reason', [{'type': 'testsuite filter'}],
1748     'error', 'dummy reason but is one of the integration platforms'),
1749]
1750
1751@pytest.mark.parametrize(
1752    'platform_name, reason, filters,' \
1753    ' expected_status, expected_reason',
1754    TESTDATA_14,
1755    ids=['wrong platform', 'quarantined', 'command line filtered',
1756         'skip filtered', 'platform key filtered', 'toolchain filtered',
1757         'module filtered', 'skip to error change']
1758)
1759def test_change_skip_to_error_if_integration(
1760    platform_name,
1761    reason,
1762    filters,
1763    expected_status,
1764    expected_reason
1765):
1766    options = mock.Mock()
1767    platform = mock.Mock()
1768    platform.name = platform_name
1769    testsuite = mock.Mock(integration_platforms=['good platform', 'a platform'])
1770    instance = mock.Mock(
1771        testsuite=testsuite,
1772        platform=platform,
1773        filters=filters,
1774        status='dummy status',
1775        reason=reason
1776    )
1777
1778    change_skip_to_error_if_integration(options, instance)
1779
1780    assert instance.status == expected_status
1781    assert instance.reason == expected_reason
1782