1#!/usr/bin/env python3
2# Copyright (c) 2020 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5# pylint: disable=line-too-long
6"""
7Tests for testinstance class
8"""
9
10import os
11from contextlib import nullcontext
12from unittest import mock
13
14import pytest
15from expr_parser import reserved
16from twisterlib.error import BuildError
17from twisterlib.handlers import QEMUHandler
18from twisterlib.platform import Simulator
19from twisterlib.runner import TwisterRunner
20from twisterlib.statuses import TwisterStatus
21from twisterlib.testinstance import TestInstance
22
23TESTDATA_PART_1 = [
24    (False, False, "console", None, "qemu", False, [], (False, True)),
25    (False, False, "console", "native", "qemu", False, [], (False, True)),
26    (True, False, "console", "native", "nsim", False, [], (True, False)),
27    (True, True, "console", "native", "renode", False, [], (True, False)),
28    (False, False, "sensor", "native", "", False, [], (True, False)),
29    (False, False, "sensor", None, "", False, [], (True, False)),
30    (False, True, "sensor", "native", "", True, [], (True, False)),
31]
32@pytest.mark.parametrize(
33    "build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected",
34    TESTDATA_PART_1
35)
36def test_check_build_or_run(
37    class_testplan,
38    all_testsuites_dict,
39    platforms_list,
40    build_only,
41    slow,
42    harness,
43    platform_type,
44    platform_sim,
45    device_testing,
46    fixture,
47    expected
48):
49    """" Test to check the conditions for build_only and run scenarios
50    Scenario 1: Test when different parameters are passed, build_only and run are set correctly
51    Scenario 2: Test if build_only is enabled when the OS is Windows"""
52
53    class_testplan.testsuites = all_testsuites_dict
54    testsuite = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/tests/'
55                                              'test_a/test_a.check_1')
56    print(testsuite)
57
58    class_testplan.platforms = platforms_list
59    platform = class_testplan.get_platform("demo_board_2")
60    platform.type = platform_type
61    platform.simulators = [Simulator({"name": platform_sim})] if platform_sim else []
62    testsuite.harness = harness
63    testsuite.build_only = build_only
64    testsuite.slow = slow
65
66    testinstance = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
67    env = mock.Mock(
68        options=mock.Mock(
69            device_testing=False,
70            enable_slow=slow,
71            fixtures=fixture,
72            filter="",
73            sim_name=platform_sim
74        )
75    )
76    run = testinstance.check_runnable(env.options)
77    _, r = expected
78    assert run == r
79
80    with mock.patch('os.name', 'nt'):
81        # path to QEMU binary is not in QEMU_BIN_PATH environment variable
82        run = testinstance.check_runnable(env.options)
83        assert not run
84
85        # mock path to QEMU binary in QEMU_BIN_PATH environment variable
86        with mock.patch('os.environ', {'QEMU_BIN_PATH': ''}):
87            run = testinstance.check_runnable(env.options)
88            _, r = expected
89            assert run == r
90
91
92TESTDATA_PART_2 = [
93    (True, True, True, ["demo_board_2/unit_testing"], "native",
94     None, '\nCONFIG_COVERAGE=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'),
95    (True, False, True, ["demo_board_2/unit_testing"], "native",
96     None, '\nCONFIG_COVERAGE=y\nCONFIG_ASAN=y'),
97    (False, False, True, ["demo_board_2/unit_testing"], 'native',
98     None, '\nCONFIG_COVERAGE=y'),
99    (True, False, True, ["demo_board_2/unit_testing"], 'mcu',
100     None, '\nCONFIG_COVERAGE=y'),
101    (False, False, False, ["demo_board_2/unit_testing"], 'native', None, ''),
102    (False, False, True, ['demo_board_1'], 'native', None, ''),
103    (True, False, False, ["demo_board_2"], 'native', None, '\nCONFIG_ASAN=y'),
104    (False, True, False, ["demo_board_2"], 'native', None, '\nCONFIG_UBSAN=y'),
105    (False, False, False, ["demo_board_2"], 'native',
106     ["CONFIG_LOG=y"], 'CONFIG_LOG=y'),
107    (False, False, False, ["demo_board_2"], 'native',
108     ["arch:x86:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
109    (False, False, False, ["demo_board_2"], 'native',
110     ["arch:arm:CONFIG_LOG=y"], ''),
111    (False, False, False, ["demo_board_2"], 'native',
112     ["platform:demo_board_2/unit_testing:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
113    (False, False, False, ["demo_board_2"], 'native',
114     ["platform:demo_board_1:CONFIG_LOG=y"], ''),
115]
116
117@pytest.mark.parametrize(
118    'enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type,'
119    ' extra_configs, expected_content',
120    TESTDATA_PART_2
121)
122def test_create_overlay(
123    class_testplan,
124    all_testsuites_dict,
125    platforms_list,
126    enable_asan,
127    enable_ubsan,
128    enable_coverage,
129    coverage_platform,
130    platform_type,
131    extra_configs,
132    expected_content
133):
134    """Test correct content is written to testcase_extra.conf based on if conditions."""
135    class_testplan.testsuites = all_testsuites_dict
136    testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
137                                             'test_app/sample_test.app')
138
139    if extra_configs:
140        testcase.extra_configs = extra_configs
141
142    class_testplan.platforms = platforms_list
143    platform = class_testplan.get_platform("demo_board_2")
144
145    testinstance = TestInstance(testcase, platform, 'zephyr', class_testplan.env.outdir)
146    platform.type = platform_type
147    assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content
148
149def test_calculate_sizes(class_testplan, all_testsuites_dict, platforms_list):
150    """ Test Calculate sizes method for zephyr elf"""
151    class_testplan.testsuites = all_testsuites_dict
152    testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
153                                             'test_app/sample_test.app')
154    class_testplan.platforms = platforms_list
155    platform = class_testplan.get_platform("demo_board_2")
156    testinstance = TestInstance(testcase, platform, 'zephyr', class_testplan.env.outdir)
157
158    with pytest.raises(BuildError):
159        assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
160
161TESTDATA_PART_3 = [
162    (
163        'CONFIG_ARCH_HAS_THREAD_LOCAL_STORAGE and' \
164        ' CONFIG_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE and' \
165        ' not (CONFIG_TOOLCHAIN_ARCMWDT_SUPPORTS_THREAD_LOCAL_STORAGE and CONFIG_USERSPACE)',
166        ['kconfig']
167    ),
168    (
169        '(dt_compat_enabled("st,stm32-flash-controller") or' \
170        ' dt_compat_enabled("st,stm32h7-flash-controller")) and' \
171        ' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")',
172        ['dts']
173    ),
174    (
175        '((CONFIG_FLASH_HAS_DRIVER_ENABLED and not CONFIG_TRUSTED_EXECUTION_NONSECURE) and' \
176        ' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")) or' \
177        ' (CONFIG_FLASH_HAS_DRIVER_ENABLED and CONFIG_TRUSTED_EXECUTION_NONSECURE and' \
178        ' dt_label_with_parent_compat_enabled("slot1_ns_partition", "fixed-partitions"))',
179        ['dts', 'kconfig']
180    ),
181    (
182        '((CONFIG_CPU_AARCH32_CORTEX_R or CONFIG_CPU_CORTEX_M) and' \
183        ' CONFIG_CPU_HAS_FPU and TOOLCHAIN_HAS_NEWLIB == 1) or CONFIG_ARCH_POSIX',
184        ['full']
185    )
186]
187
188@pytest.mark.parametrize("filter_expr, expected_stages", TESTDATA_PART_3)
189def test_which_filter_stages(filter_expr, expected_stages):
190    logic_keys = reserved.keys()
191    stages = TwisterRunner.get_cmake_filter_stages(filter_expr, logic_keys)
192    assert sorted(stages) == sorted(expected_stages)
193
194
195@pytest.fixture(name='testinstance')
196def sample_testinstance(all_testsuites_dict, class_testplan, platforms_list, request):
197    testsuite_path = 'scripts/tests/twister/test_data/testsuites'
198    if request.param['testsuite_kind']  == 'sample':
199        testsuite_path += '/samples/test_app/sample_test.app'
200    elif request.param['testsuite_kind'] == 'tests':
201        testsuite_path += '/tests/test_a/test_a.check_1'
202
203    class_testplan.testsuites = all_testsuites_dict
204    testsuite = class_testplan.testsuites.get(testsuite_path)
205    class_testplan.platforms = platforms_list
206    platform = class_testplan.get_platform(request.param.get('board_name', 'demo_board_2'))
207
208    testinstance = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
209    return testinstance
210
211
212TESTDATA_1 = [
213    (False),
214    (True),
215]
216
217@pytest.mark.parametrize('detailed_test_id', TESTDATA_1)
218def test_testinstance_init(all_testsuites_dict, class_testplan, platforms_list, detailed_test_id):
219    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
220    class_testplan.testsuites = all_testsuites_dict
221    testsuite = class_testplan.testsuites.get(testsuite_path)
222    testsuite.detailed_test_id = detailed_test_id
223    class_testplan.platforms = platforms_list
224    platform = class_testplan.get_platform("demo_board_2/unit_testing")
225
226    testinstance = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
227
228    if detailed_test_id:
229        assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.normalized_name, 'zephyr', testsuite_path)
230    else:
231        assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.normalized_name, 'zephyr', testsuite.source_dir_rel, testsuite.name)
232
233
234@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
235def test_testinstance_record(testinstance):
236    testinstance.testcases = [mock.Mock()]
237    recording = [ {'field_1':  'recording_1_1', 'field_2': 'recording_1_2'},
238                  {'field_1':  'recording_2_1', 'field_2': 'recording_2_2'}
239                ]
240    with mock.patch(
241        'builtins.open',
242        mock.mock_open(read_data='')
243    ) as mock_file, \
244        mock.patch(
245        'csv.DictWriter.writerow',
246        mock.Mock()
247    ) as mock_writeheader, \
248        mock.patch(
249        'csv.DictWriter.writerows',
250        mock.Mock()
251    ) as mock_writerows:
252        testinstance.record(recording)
253
254    print(mock_file.mock_calls)
255
256    mock_file.assert_called_with(
257        os.path.join(testinstance.build_dir, 'recording.csv'),
258        'w'
259    )
260
261    mock_writeheader.assert_has_calls([mock.call({ k:k for k in recording[0]})])
262    mock_writerows.assert_has_calls([mock.call(recording)])
263
264
265@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
266def test_testinstance_add_filter(testinstance):
267    reason = 'dummy reason'
268    filter_type = 'dummy type'
269
270    testinstance.add_filter(reason, filter_type)
271
272    assert {'type': filter_type, 'reason': reason} in testinstance.filters
273    assert testinstance.status == TwisterStatus.FILTER
274    assert testinstance.reason == reason
275    assert testinstance.filter_type == filter_type
276
277
278def test_testinstance_init_cases(all_testsuites_dict, class_testplan, platforms_list):
279    testsuite_path = 'scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'
280    class_testplan.testsuites = all_testsuites_dict
281    testsuite = class_testplan.testsuites.get(testsuite_path)
282    class_testplan.platforms = platforms_list
283    platform = class_testplan.get_platform("demo_board_2")
284
285    testinstance = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
286
287    testinstance.init_cases()
288
289    assert all(
290        [
291            any(
292                [
293                    tcc.name == tc.name and tcc.freeform == tc.freeform \
294                        for tcc in testinstance.testsuite.testcases
295                ]
296            ) for tc in testsuite.testcases
297        ]
298    )
299
300
301@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
302def test_testinstance_get_run_id(testinstance):
303    res = testinstance._get_run_id()
304
305    assert isinstance(res, str)
306
307
308TESTDATA_2 = [
309    ('another reason', 'another reason'),
310    (None, 'dummy reason'),
311]
312
313@pytest.mark.parametrize('reason, expected_reason', TESTDATA_2)
314@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
315def test_testinstance_add_missing_case_status(testinstance, reason, expected_reason):
316    testinstance.reason = 'dummy reason'
317
318    status = TwisterStatus.PASS
319
320    assert len(testinstance.testcases) > 1, 'Selected testsuite does not have enough testcases.'
321
322    testinstance.testcases[0].status = TwisterStatus.STARTED
323    testinstance.testcases[-1].status = TwisterStatus.NONE
324
325    testinstance.add_missing_case_status(status, reason)
326
327    assert testinstance.testcases[0].status == TwisterStatus.FAIL
328    assert testinstance.testcases[-1].status == TwisterStatus.PASS
329    assert testinstance.testcases[-1].reason == expected_reason
330
331
332def test_testinstance_dunders(all_testsuites_dict, class_testplan, platforms_list):
333    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
334    class_testplan.testsuites = all_testsuites_dict
335    testsuite = class_testplan.testsuites.get(testsuite_path)
336    class_testplan.platforms = platforms_list
337    platform = class_testplan.get_platform("demo_board_2")
338
339    testinstance = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
340    testinstance_copy = TestInstance(testsuite, platform, 'zephyr', class_testplan.env.outdir)
341
342    d = testinstance.__getstate__()
343
344    d['name'] = 'dummy name'
345    testinstance_copy.__setstate__(d)
346
347    d['name'] = 'another name'
348    testinstance.__setstate__(d)
349
350    assert testinstance < testinstance_copy
351
352    testinstance_copy.__setstate__(d)
353
354    assert not testinstance < testinstance_copy
355    assert not testinstance_copy < testinstance
356
357    assert testinstance.__repr__() == f'<TestSuite {testsuite_path} on demo_board_2/unit_testing>'
358
359
360@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
361def test_testinstance_set_case_status_by_name(testinstance):
362    name = 'test_a.check_1.2a'
363    status = TwisterStatus.PASS
364    reason = 'dummy reason'
365
366    tc = testinstance.set_case_status_by_name(name, status, reason)
367
368    assert tc.name == name
369    assert tc.status == status
370    assert tc.reason == reason
371
372    tc = testinstance.set_case_status_by_name(name, status, None)
373
374    assert tc.reason == reason
375
376
377@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
378def test_testinstance_add_testcase(testinstance):
379    name = 'test_a.check_1.3a'
380    freeform = True
381
382    tc = testinstance.add_testcase(name, freeform)
383
384    assert tc in testinstance.testcases
385
386
387@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
388def test_testinstance_get_case_by_name(testinstance):
389    name = 'test_a.check_1.2a'
390
391    tc = testinstance.get_case_by_name(name)
392
393    assert tc.name == name
394
395    name = 'test_a.check_1.3a'
396
397    tc = testinstance.get_case_by_name(name)
398
399    assert tc is None
400
401
402@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
403def test_testinstance_get_case_or_create(caplog, testinstance):
404    name = 'test_a.check_1.2a'
405
406    tc = testinstance.get_case_or_create(name)
407
408    assert tc.name == name
409
410    name = 'test_a.check_1.3a'
411
412    tc = testinstance.get_case_or_create(name)
413
414    assert tc.name == name
415    assert 'Could not find a matching testcase for test_a.check_1.3a' in caplog.text
416
417
418TESTDATA_3 = [
419    (None, 'nonexistent harness', False),
420    ('nonexistent fixture', 'console', False),
421    (None, 'console', True),
422    ('dummy fixture', 'console', True),
423]
424
425@pytest.mark.parametrize(
426    'fixture, harness, expected_can_run',
427    TESTDATA_3,
428    ids=['improper harness', 'fixture not in list', 'no fixture specified', 'fixture in list']
429)
430def test_testinstance_testsuite_runnable(
431    all_testsuites_dict,
432    class_testplan,
433    fixture,
434    harness,
435    expected_can_run
436):
437    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
438    class_testplan.testsuites = all_testsuites_dict
439    testsuite = class_testplan.testsuites.get(testsuite_path)
440
441    testsuite.harness = harness
442    testsuite.harness_config['fixture'] = fixture
443
444    fixtures = ['dummy fixture']
445
446    can_run = TestInstance.testsuite_runnable(testsuite, fixtures)\
447
448    assert can_run == expected_can_run
449
450
451TESTDATA_4 = [
452    (True, mock.ANY, mock.ANY, mock.ANY, None, [], False),
453    (False, True, mock.ANY, mock.ANY, 'device', [], True),
454    (False, False, 'qemu', mock.ANY, 'qemu', ['QEMU_PIPE=1'], True),
455    (False, False, 'armfvp', mock.ANY, 'armfvp', [], True),
456    (False, False, None, 'unit', 'unit', ['COVERAGE=1'], True),
457    (False, False, None, 'dummy type', '', [], False),
458]
459
460@pytest.mark.parametrize(
461    'preexisting_handler, device_testing, platform_sim, testsuite_type,' \
462    ' expected_handler_type, expected_handler_args, expected_handler_ready',
463    TESTDATA_4,
464    ids=['preexisting handler', 'device testing', 'qemu simulation',
465         'non-qemu simulation with exec', 'unit teting', 'no handler']
466)
467@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
468def test_testinstance_setup_handler(
469    testinstance,
470    preexisting_handler,
471    device_testing,
472    platform_sim,
473    testsuite_type,
474    expected_handler_type,
475    expected_handler_args,
476    expected_handler_ready
477):
478    testinstance.handler = mock.Mock() if preexisting_handler else None
479    testinstance.platform.simulators = [Simulator({"name": platform_sim, "exec": 'dummy exec'})] if platform_sim else []
480    testinstance.testsuite.type = testsuite_type
481    env = mock.Mock(
482        options=mock.Mock(
483            device_testing=device_testing,
484            enable_coverage=True,
485            sim_name=platform_sim,
486        )
487    )
488
489    with mock.patch.object(QEMUHandler, 'get_fifo', return_value=1), \
490         mock.patch('shutil.which', return_value=True):
491        testinstance.setup_handler(env)
492
493    if expected_handler_type:
494        assert testinstance.handler.type_str == expected_handler_type
495        assert testinstance.handler.ready == expected_handler_ready
496    assert all([arg in testinstance.handler.args for arg in expected_handler_args])
497
498
499TESTDATA_5 = [
500    ('nt', 'renode', mock.ANY, mock.ANY,
501     mock.ANY, mock.ANY, mock.ANY,
502     mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
503    ('linux', mock.ANY, mock.ANY, mock.ANY,
504     True, mock.ANY, mock.ANY,
505     mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
506    ('linux', mock.ANY, mock.ANY, mock.ANY,
507     False, True, mock.ANY,
508     False, mock.ANY, mock.ANY, mock.ANY, False),
509    ('linux', 'qemu', mock.ANY, mock.ANY,
510     False, mock.ANY, 'pytest',
511     mock.ANY, 'not runnable', mock.ANY, None, True),
512    ('linux', 'renode', 'renode', True,
513     False, mock.ANY, 'console',
514     mock.ANY, 'not runnable', [], None, True),
515    ('linux', 'renode', 'renode', False,
516     False, mock.ANY, 'not pytest',
517     mock.ANY, 'not runnable', mock.ANY, None, False),
518    ('linux', 'qemu', mock.ANY, mock.ANY,
519     False, mock.ANY, 'console',
520     mock.ANY, 'not runnable', ['?'], mock.Mock(duts=[mock.Mock(platform='demo_board_2', fixtures=[])]), True),
521]
522
523@pytest.mark.parametrize(
524    'os_name, platform_sim, platform_sim_exec, exec_exists,' \
525    ' testsuite_build_only, testsuite_slow, testsuite_harness,' \
526    ' enable_slow, filter, fixtures, hardware_map, expected',
527    TESTDATA_5,
528    ids=['windows', 'build only', 'skip slow', 'pytest harness', 'sim', 'no sim', 'hardware map']
529)
530@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
531def test_testinstance_check_runnable(
532    testinstance,
533    os_name,
534    platform_sim,
535    platform_sim_exec,
536    exec_exists,
537    testsuite_build_only,
538    testsuite_slow,
539    testsuite_harness,
540    enable_slow,
541    filter,
542    fixtures,
543    hardware_map,
544    expected
545):
546    testinstance.platform.simulators = [Simulator({"name": platform_sim, "exec": platform_sim_exec})]
547    testinstance.testsuite.build_only = testsuite_build_only
548    testinstance.testsuite.slow = testsuite_slow
549    testinstance.testsuite.harness = testsuite_harness
550
551    env = mock.Mock(
552        options=mock.Mock(
553            device_testing=False,
554            enable_slow=enable_slow,
555            fixtures=fixtures,
556            filter=filter,
557            sim_name=platform_sim
558        )
559    )
560    with mock.patch('os.name', os_name), \
561         mock.patch('shutil.which', return_value=exec_exists):
562        res = testinstance.check_runnable(env.options, hardware_map)
563
564    assert res == expected
565
566
567TESTDATA_6 = [
568    (True, 'build.log'),
569    (False, ''),
570]
571
572@pytest.mark.parametrize('from_buildlog, expected_buildlog_filepath', TESTDATA_6)
573@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
574def test_testinstance_calculate_sizes(testinstance, from_buildlog, expected_buildlog_filepath):
575    expected_elf_filepath = 'dummy.elf'
576    expected_extra_sections = []
577    expected_warning = True
578    testinstance.get_elf_file = mock.Mock(return_value='dummy.elf')
579    testinstance.get_buildlog_file = mock.Mock(return_value='build.log')
580
581    sc_mock = mock.Mock()
582    mock_sc = mock.Mock(return_value=sc_mock)
583
584    with mock.patch('twisterlib.testinstance.SizeCalculator', mock_sc):
585        res = testinstance.calculate_sizes(from_buildlog, expected_warning)
586
587    assert res == sc_mock
588    mock_sc.assert_called_once_with(
589        elf_filename=expected_elf_filepath,
590        extra_sections=expected_extra_sections,
591        buildlog_filepath=expected_buildlog_filepath,
592        generate_warning=expected_warning
593    )
594
595
596TESTDATA_7 = [
597    (True, None),
598    (False, BuildError),
599]
600
601@pytest.mark.parametrize('sysbuild, expected_error', TESTDATA_7)
602@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
603def test_testinstance_get_elf_file(caplog, tmp_path, testinstance, sysbuild, expected_error):
604    sysbuild_dir = tmp_path / 'sysbuild'
605    sysbuild_dir.mkdir()
606    zephyr_dir = sysbuild_dir / 'zephyr'
607    zephyr_dir.mkdir()
608    sysbuild_elf = zephyr_dir / 'dummy.elf'
609    sysbuild_elf.write_bytes(b'0')
610    sysbuild_elf2 = zephyr_dir / 'dummy2.elf'
611    sysbuild_elf2.write_bytes(b'0')
612
613    testinstance.sysbuild = sysbuild
614    testinstance.domains = mock.Mock(
615        get_default_domain=mock.Mock(
616            return_value=mock.Mock(
617                build_dir=sysbuild_dir
618            )
619        )
620    )
621
622    with pytest.raises(expected_error) if expected_error else nullcontext():
623        testinstance.get_elf_file()
624
625    if expected_error is None:
626        assert 'multiple ELF files detected: ' in caplog.text
627
628
629TESTDATA_8 = [
630    (True, None),
631    (False, BuildError),
632]
633
634@pytest.mark.parametrize('create_build_log, expected_error', TESTDATA_8)
635@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
636def test_testinstance_get_buildlog_file(tmp_path, testinstance, create_build_log, expected_error):
637    if create_build_log:
638        build_dir = tmp_path / 'build'
639        build_dir.mkdir()
640        build_log = build_dir / 'build.log'
641        build_log.write_text('')
642        testinstance.build_dir = build_dir
643
644    with pytest.raises(expected_error) if expected_error else nullcontext():
645        res = testinstance.get_buildlog_file()
646
647    if expected_error is None:
648        assert res == str(build_log)
649
650
651TESTDATA_9 = [
652    (
653        {'ztest_suite_repeat': 5, 'ztest_test_repeat': 10, 'ztest_test_shuffle': True},
654        '\nCONFIG_ZTEST_REPEAT=y\nCONFIG_ZTEST_SUITE_REPEAT_COUNT=5\nCONFIG_ZTEST_TEST_REPEAT_COUNT=10\nCONFIG_ZTEST_SHUFFLE=y'
655    ),
656    (
657        {'ztest_suite_repeat': 3},
658        '\nCONFIG_ZTEST_REPEAT=y\nCONFIG_ZTEST_SUITE_REPEAT_COUNT=3'
659    ),
660    (
661        {'ztest_test_repeat': 7},
662        '\nCONFIG_ZTEST_REPEAT=y\nCONFIG_ZTEST_TEST_REPEAT_COUNT=7'
663    ),
664    (
665        {'ztest_test_shuffle': True},
666        '\nCONFIG_ZTEST_REPEAT=y\nCONFIG_ZTEST_SHUFFLE=y'
667    ),
668    (
669        {},
670        ''
671    ),
672]
673
674@pytest.mark.parametrize('harness_config, expected_content', TESTDATA_9)
675def test_create_overlay_with_harness_config(class_testplan, all_testsuites_dict, platforms_list, harness_config, expected_content):
676    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
677    class_testplan.testsuites = all_testsuites_dict
678    testsuite = class_testplan.testsuites.get(testsuite_path)
679    testsuite.harness_config = harness_config
680    class_testplan.platforms = platforms_list
681    platform = class_testplan.get_platform("demo_board_2")
682    testinstance = TestInstance(testsuite, platform,'zephyr', class_testplan.env.outdir)
683    assert testinstance.create_overlay(platform) == expected_content
684