1#!/usr/bin/env python3
2# Copyright (c) 2020 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5# pylint: disable=line-too-long
6"""
7Tests for testinstance class
8"""
9
10from contextlib import nullcontext
11import os
12import sys
13import pytest
14import mock
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from pylib.twister.twisterlib.platform import Simulator
20from twisterlib.statuses import TwisterStatus
21from twisterlib.testinstance import TestInstance
22from twisterlib.error import BuildError
23from twisterlib.runner import TwisterRunner
24from twisterlib.handlers import QEMUHandler
25from expr_parser import reserved
26
27
28TESTDATA_PART_1 = [
29    (False, False, "console", None, "qemu", False, [], (False, True)),
30    (False, False, "console", "native", "qemu", False, [], (False, True)),
31    (True, False, "console", "native", "nsim", False, [], (True, False)),
32    (True, True, "console", "native", "renode", False, [], (True, False)),
33    (False, False, "sensor", "native", "", False, [], (True, False)),
34    (False, False, "sensor", None, "", False, [], (True, False)),
35    (False, True, "sensor", "native", "", True, [], (True, False)),
36]
37@pytest.mark.parametrize(
38    "build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected",
39    TESTDATA_PART_1
40)
41def test_check_build_or_run(
42    class_testplan,
43    all_testsuites_dict,
44    platforms_list,
45    build_only,
46    slow,
47    harness,
48    platform_type,
49    platform_sim,
50    device_testing,
51    fixture,
52    expected
53):
54    """" Test to check the conditions for build_only and run scenarios
55    Scenario 1: Test when different parameters are passed, build_only and run are set correctly
56    Scenario 2: Test if build_only is enabled when the OS is Windows"""
57
58    class_testplan.testsuites = all_testsuites_dict
59    testsuite = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/tests/'
60                                              'test_a/test_a.check_1')
61    print(testsuite)
62
63    class_testplan.platforms = platforms_list
64    platform = class_testplan.get_platform("demo_board_2")
65    platform.type = platform_type
66    platform.simulators = [Simulator({"name": platform_sim})] if platform_sim else []
67    testsuite.harness = harness
68    testsuite.build_only = build_only
69    testsuite.slow = slow
70
71    testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
72    env = mock.Mock(
73        options=mock.Mock(
74            device_testing=False,
75            enable_slow=slow,
76            fixtures=fixture,
77            filter="",
78            sim_name=platform_sim
79        )
80    )
81    run = testinstance.check_runnable(env.options)
82    _, r = expected
83    assert run == r
84
85    with mock.patch('os.name', 'nt'):
86        # path to QEMU binary is not in QEMU_BIN_PATH environment variable
87        run = testinstance.check_runnable(env.options)
88        assert not run
89
90        # mock path to QEMU binary in QEMU_BIN_PATH environment variable
91        with mock.patch('os.environ', {'QEMU_BIN_PATH': ''}):
92            run = testinstance.check_runnable(env.options)
93            _, r = expected
94            assert run == r
95
96
97TESTDATA_PART_2 = [
98    (True, True, True, ["demo_board_2/unit_testing"], "native",
99     None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'),
100    (True, False, True, ["demo_board_2/unit_testing"], "native",
101     None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'),
102    (False, False, True, ["demo_board_2/unit_testing"], 'native',
103     None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
104    (True, False, True, ["demo_board_2/unit_testing"], 'mcu',
105     None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
106    (False, False, False, ["demo_board_2/unit_testing"], 'native', None, ''),
107    (False, False, True, ['demo_board_1'], 'native', None, ''),
108    (True, False, False, ["demo_board_2"], 'native', None, '\nCONFIG_ASAN=y'),
109    (False, True, False, ["demo_board_2"], 'native', None, '\nCONFIG_UBSAN=y'),
110    (False, False, False, ["demo_board_2"], 'native',
111     ["CONFIG_LOG=y"], 'CONFIG_LOG=y'),
112    (False, False, False, ["demo_board_2"], 'native',
113     ["arch:x86:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
114    (False, False, False, ["demo_board_2"], 'native',
115     ["arch:arm:CONFIG_LOG=y"], ''),
116    (False, False, False, ["demo_board_2"], 'native',
117     ["platform:demo_board_2/unit_testing:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
118    (False, False, False, ["demo_board_2"], 'native',
119     ["platform:demo_board_1:CONFIG_LOG=y"], ''),
120]
121
122@pytest.mark.parametrize(
123    'enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type,'
124    ' extra_configs, expected_content',
125    TESTDATA_PART_2
126)
127def test_create_overlay(
128    class_testplan,
129    all_testsuites_dict,
130    platforms_list,
131    enable_asan,
132    enable_ubsan,
133    enable_coverage,
134    coverage_platform,
135    platform_type,
136    extra_configs,
137    expected_content
138):
139    """Test correct content is written to testcase_extra.conf based on if conditions."""
140    class_testplan.testsuites = all_testsuites_dict
141    testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
142                                             'test_app/sample_test.app')
143
144    if extra_configs:
145        testcase.extra_configs = extra_configs
146
147    class_testplan.platforms = platforms_list
148    platform = class_testplan.get_platform("demo_board_2")
149
150    testinstance = TestInstance(testcase, platform, class_testplan.env.outdir)
151    platform.type = platform_type
152    assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content
153
154def test_calculate_sizes(class_testplan, all_testsuites_dict, platforms_list):
155    """ Test Calculate sizes method for zephyr elf"""
156    class_testplan.testsuites = all_testsuites_dict
157    testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
158                                             'test_app/sample_test.app')
159    class_testplan.platforms = platforms_list
160    platform = class_testplan.get_platform("demo_board_2")
161    testinstance = TestInstance(testcase, platform, class_testplan.env.outdir)
162
163    with pytest.raises(BuildError):
164        assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
165
166TESTDATA_PART_3 = [
167    (
168        'CONFIG_ARCH_HAS_THREAD_LOCAL_STORAGE and' \
169        ' CONFIG_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE and' \
170        ' not (CONFIG_TOOLCHAIN_ARCMWDT_SUPPORTS_THREAD_LOCAL_STORAGE and CONFIG_USERSPACE)',
171        ['kconfig']
172    ),
173    (
174        '(dt_compat_enabled("st,stm32-flash-controller") or' \
175        ' dt_compat_enabled("st,stm32h7-flash-controller")) and' \
176        ' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")',
177        ['dts']
178    ),
179    (
180        '((CONFIG_FLASH_HAS_DRIVER_ENABLED and not CONFIG_TRUSTED_EXECUTION_NONSECURE) and' \
181        ' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")) or' \
182        ' (CONFIG_FLASH_HAS_DRIVER_ENABLED and CONFIG_TRUSTED_EXECUTION_NONSECURE and' \
183        ' dt_label_with_parent_compat_enabled("slot1_ns_partition", "fixed-partitions"))',
184        ['dts', 'kconfig']
185    ),
186    (
187        '((CONFIG_CPU_AARCH32_CORTEX_R or CONFIG_CPU_CORTEX_M) and' \
188        ' CONFIG_CPU_HAS_FPU and TOOLCHAIN_HAS_NEWLIB == 1) or CONFIG_ARCH_POSIX',
189        ['full']
190    )
191]
192
193@pytest.mark.parametrize("filter_expr, expected_stages", TESTDATA_PART_3)
194def test_which_filter_stages(filter_expr, expected_stages):
195    logic_keys = reserved.keys()
196    stages = TwisterRunner.get_cmake_filter_stages(filter_expr, logic_keys)
197    assert sorted(stages) == sorted(expected_stages)
198
199
200@pytest.fixture(name='testinstance')
201def sample_testinstance(all_testsuites_dict, class_testplan, platforms_list, request):
202    testsuite_path = 'scripts/tests/twister/test_data/testsuites'
203    if request.param['testsuite_kind']  == 'sample':
204        testsuite_path += '/samples/test_app/sample_test.app'
205    elif request.param['testsuite_kind'] == 'tests':
206        testsuite_path += '/tests/test_a/test_a.check_1'
207
208    class_testplan.testsuites = all_testsuites_dict
209    testsuite = class_testplan.testsuites.get(testsuite_path)
210    class_testplan.platforms = platforms_list
211    platform = class_testplan.get_platform(request.param.get('board_name', 'demo_board_2'))
212
213    testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
214    return testinstance
215
216
217TESTDATA_1 = [
218    (False),
219    (True),
220]
221
222@pytest.mark.parametrize('detailed_test_id', TESTDATA_1)
223def test_testinstance_init(all_testsuites_dict, class_testplan, platforms_list, detailed_test_id):
224    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
225    class_testplan.testsuites = all_testsuites_dict
226    testsuite = class_testplan.testsuites.get(testsuite_path)
227    testsuite.detailed_test_id = detailed_test_id
228    class_testplan.platforms = platforms_list
229    platform = class_testplan.get_platform("demo_board_2/unit_testing")
230
231    testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
232
233    if detailed_test_id:
234        assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.normalized_name, testsuite_path)
235    else:
236        assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.normalized_name, testsuite.source_dir_rel, testsuite.name)
237
238
239@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
240def test_testinstance_record(testinstance):
241    testinstance.testcases = [mock.Mock()]
242    recording = [ {'field_1':  'recording_1_1', 'field_2': 'recording_1_2'},
243                  {'field_1':  'recording_2_1', 'field_2': 'recording_2_2'}
244                ]
245    with mock.patch(
246        'builtins.open',
247        mock.mock_open(read_data='')
248    ) as mock_file, \
249        mock.patch(
250        'csv.DictWriter.writerow',
251        mock.Mock()
252    ) as mock_writeheader, \
253        mock.patch(
254        'csv.DictWriter.writerows',
255        mock.Mock()
256    ) as mock_writerows:
257        testinstance.record(recording)
258
259    print(mock_file.mock_calls)
260
261    mock_file.assert_called_with(
262        os.path.join(testinstance.build_dir, 'recording.csv'),
263        'w'
264    )
265
266    mock_writeheader.assert_has_calls([mock.call({ k:k for k in recording[0]})])
267    mock_writerows.assert_has_calls([mock.call(recording)])
268
269
270@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
271def test_testinstance_add_filter(testinstance):
272    reason = 'dummy reason'
273    filter_type = 'dummy type'
274
275    testinstance.add_filter(reason, filter_type)
276
277    assert {'type': filter_type, 'reason': reason} in testinstance.filters
278    assert testinstance.status == TwisterStatus.FILTER
279    assert testinstance.reason == reason
280    assert testinstance.filter_type == filter_type
281
282
283def test_testinstance_init_cases(all_testsuites_dict, class_testplan, platforms_list):
284    testsuite_path = 'scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'
285    class_testplan.testsuites = all_testsuites_dict
286    testsuite = class_testplan.testsuites.get(testsuite_path)
287    class_testplan.platforms = platforms_list
288    platform = class_testplan.get_platform("demo_board_2")
289
290    testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
291
292    testinstance.init_cases()
293
294    assert all(
295        [
296            any(
297                [
298                    tcc.name == tc.name and tcc.freeform == tc.freeform \
299                        for tcc in testinstance.testsuite.testcases
300                ]
301            ) for tc in testsuite.testcases
302        ]
303    )
304
305
306@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
307def test_testinstance_get_run_id(testinstance):
308    res = testinstance._get_run_id()
309
310    assert isinstance(res, str)
311
312
313TESTDATA_2 = [
314    ('another reason', 'another reason'),
315    (None, 'dummy reason'),
316]
317
318@pytest.mark.parametrize('reason, expected_reason', TESTDATA_2)
319@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
320def test_testinstance_add_missing_case_status(testinstance, reason, expected_reason):
321    testinstance.reason = 'dummy reason'
322
323    status = TwisterStatus.PASS
324
325    assert len(testinstance.testcases) > 1, 'Selected testsuite does not have enough testcases.'
326
327    testinstance.testcases[0].status = TwisterStatus.STARTED
328    testinstance.testcases[-1].status = TwisterStatus.NONE
329
330    testinstance.add_missing_case_status(status, reason)
331
332    assert testinstance.testcases[0].status == TwisterStatus.FAIL
333    assert testinstance.testcases[-1].status == TwisterStatus.PASS
334    assert testinstance.testcases[-1].reason == expected_reason
335
336
337def test_testinstance_dunders(all_testsuites_dict, class_testplan, platforms_list):
338    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
339    class_testplan.testsuites = all_testsuites_dict
340    testsuite = class_testplan.testsuites.get(testsuite_path)
341    class_testplan.platforms = platforms_list
342    platform = class_testplan.get_platform("demo_board_2")
343
344    testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
345    testinstance_copy = TestInstance(testsuite, platform, class_testplan.env.outdir)
346
347    d = testinstance.__getstate__()
348
349    d['name'] = 'dummy name'
350    testinstance_copy.__setstate__(d)
351
352    d['name'] = 'another name'
353    testinstance.__setstate__(d)
354
355    assert testinstance < testinstance_copy
356
357    testinstance_copy.__setstate__(d)
358
359    assert not testinstance < testinstance_copy
360    assert not testinstance_copy < testinstance
361
362    assert testinstance.__repr__() == f'<TestSuite {testsuite_path} on demo_board_2/unit_testing>'
363
364
365@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
366def test_testinstance_set_case_status_by_name(testinstance):
367    name = 'test_a.check_1.2a'
368    status = TwisterStatus.PASS
369    reason = 'dummy reason'
370
371    tc = testinstance.set_case_status_by_name(name, status, reason)
372
373    assert tc.name == name
374    assert tc.status == status
375    assert tc.reason == reason
376
377    tc = testinstance.set_case_status_by_name(name, status, None)
378
379    assert tc.reason == reason
380
381
382@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
383def test_testinstance_add_testcase(testinstance):
384    name = 'test_a.check_1.3a'
385    freeform = True
386
387    tc = testinstance.add_testcase(name, freeform)
388
389    assert tc in testinstance.testcases
390
391
392@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
393def test_testinstance_get_case_by_name(testinstance):
394    name = 'test_a.check_1.2a'
395
396    tc = testinstance.get_case_by_name(name)
397
398    assert tc.name == name
399
400    name = 'test_a.check_1.3a'
401
402    tc = testinstance.get_case_by_name(name)
403
404    assert tc is None
405
406
407@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
408def test_testinstance_get_case_or_create(caplog, testinstance):
409    name = 'test_a.check_1.2a'
410
411    tc = testinstance.get_case_or_create(name)
412
413    assert tc.name == name
414
415    name = 'test_a.check_1.3a'
416
417    tc = testinstance.get_case_or_create(name)
418
419    assert tc.name == name
420    assert 'Could not find a matching testcase for test_a.check_1.3a' in caplog.text
421
422
423TESTDATA_3 = [
424    (None, 'nonexistent harness', False),
425    ('nonexistent fixture', 'console', False),
426    (None, 'console', True),
427    ('dummy fixture', 'console', True),
428]
429
430@pytest.mark.parametrize(
431    'fixture, harness, expected_can_run',
432    TESTDATA_3,
433    ids=['improper harness', 'fixture not in list', 'no fixture specified', 'fixture in list']
434)
435def test_testinstance_testsuite_runnable(
436    all_testsuites_dict,
437    class_testplan,
438    fixture,
439    harness,
440    expected_can_run
441):
442    testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
443    class_testplan.testsuites = all_testsuites_dict
444    testsuite = class_testplan.testsuites.get(testsuite_path)
445
446    testsuite.harness = harness
447    testsuite.harness_config['fixture'] = fixture
448
449    fixtures = ['dummy fixture']
450
451    can_run = TestInstance.testsuite_runnable(testsuite, fixtures)\
452
453    assert can_run == expected_can_run
454
455
456TESTDATA_4 = [
457    (True, mock.ANY, mock.ANY, mock.ANY, None, [], False),
458    (False, True, mock.ANY, mock.ANY, 'device', [], True),
459    (False, False, 'qemu', mock.ANY, 'qemu', ['QEMU_PIPE=1'], True),
460    (False, False, 'armfvp', mock.ANY, 'armfvp', [], True),
461    (False, False, None, 'unit', 'unit', ['COVERAGE=1'], True),
462    (False, False, None, 'dummy type', '', [], False),
463]
464
465@pytest.mark.parametrize(
466    'preexisting_handler, device_testing, platform_sim, testsuite_type,' \
467    ' expected_handler_type, expected_handler_args, expected_handler_ready',
468    TESTDATA_4,
469    ids=['preexisting handler', 'device testing', 'qemu simulation',
470         'non-qemu simulation with exec', 'unit teting', 'no handler']
471)
472@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
473def test_testinstance_setup_handler(
474    testinstance,
475    preexisting_handler,
476    device_testing,
477    platform_sim,
478    testsuite_type,
479    expected_handler_type,
480    expected_handler_args,
481    expected_handler_ready
482):
483    testinstance.handler = mock.Mock() if preexisting_handler else None
484    testinstance.platform.simulators = [Simulator({"name": platform_sim, "exec": 'dummy exec'})] if platform_sim else []
485    testinstance.testsuite.type = testsuite_type
486    env = mock.Mock(
487        options=mock.Mock(
488            device_testing=device_testing,
489            enable_coverage=True,
490            sim_name=platform_sim,
491        )
492    )
493
494    with mock.patch.object(QEMUHandler, 'get_fifo', return_value=1), \
495         mock.patch('shutil.which', return_value=True):
496        testinstance.setup_handler(env)
497
498    if expected_handler_type:
499        assert testinstance.handler.type_str == expected_handler_type
500        assert testinstance.handler.ready == expected_handler_ready
501    assert all([arg in testinstance.handler.args for arg in expected_handler_args])
502
503
504TESTDATA_5 = [
505    ('nt', 'renode', mock.ANY, mock.ANY,
506     mock.ANY, mock.ANY, mock.ANY,
507     mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
508    ('linux', mock.ANY, mock.ANY, mock.ANY,
509     True, mock.ANY, mock.ANY,
510     mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
511    ('linux', mock.ANY, mock.ANY, mock.ANY,
512     False, True, mock.ANY,
513     False, mock.ANY, mock.ANY, mock.ANY, False),
514    ('linux', 'qemu', mock.ANY, mock.ANY,
515     False, mock.ANY, 'pytest',
516     mock.ANY, 'not runnable', mock.ANY, None, True),
517    ('linux', 'renode', 'renode', True,
518     False, mock.ANY, 'console',
519     mock.ANY, 'not runnable', [], None, True),
520    ('linux', 'renode', 'renode', False,
521     False, mock.ANY, 'not pytest',
522     mock.ANY, 'not runnable', mock.ANY, None, False),
523    ('linux', 'qemu', mock.ANY, mock.ANY,
524     False, mock.ANY, 'console',
525     mock.ANY, 'not runnable', ['?'], mock.Mock(duts=[mock.Mock(platform='demo_board_2', fixtures=[])]), True),
526]
527
528@pytest.mark.parametrize(
529    'os_name, platform_sim, platform_sim_exec, exec_exists,' \
530    ' testsuite_build_only, testsuite_slow, testsuite_harness,' \
531    ' enable_slow, filter, fixtures, hardware_map, expected',
532    TESTDATA_5,
533    ids=['windows', 'build only', 'skip slow', 'pytest harness', 'sim', 'no sim', 'hardware map']
534)
535@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
536def test_testinstance_check_runnable(
537    testinstance,
538    os_name,
539    platform_sim,
540    platform_sim_exec,
541    exec_exists,
542    testsuite_build_only,
543    testsuite_slow,
544    testsuite_harness,
545    enable_slow,
546    filter,
547    fixtures,
548    hardware_map,
549    expected
550):
551    testinstance.platform.simulators = [Simulator({"name": platform_sim, "exec": platform_sim_exec})]
552    testinstance.testsuite.build_only = testsuite_build_only
553    testinstance.testsuite.slow = testsuite_slow
554    testinstance.testsuite.harness = testsuite_harness
555
556    env = mock.Mock(
557        options=mock.Mock(
558            device_testing=False,
559            enable_slow=enable_slow,
560            fixtures=fixtures,
561            filter=filter,
562            sim_name=platform_sim
563        )
564    )
565    with mock.patch('os.name', os_name), \
566         mock.patch('shutil.which', return_value=exec_exists):
567        res = testinstance.check_runnable(env.options, hardware_map)
568
569    assert res == expected
570
571
572TESTDATA_6 = [
573    (True, 'build.log'),
574    (False, ''),
575]
576
577@pytest.mark.parametrize('from_buildlog, expected_buildlog_filepath', TESTDATA_6)
578@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
579def test_testinstance_calculate_sizes(testinstance, from_buildlog, expected_buildlog_filepath):
580    expected_elf_filepath = 'dummy.elf'
581    expected_extra_sections = []
582    expected_warning = True
583    testinstance.get_elf_file = mock.Mock(return_value='dummy.elf')
584    testinstance.get_buildlog_file = mock.Mock(return_value='build.log')
585
586    sc_mock = mock.Mock()
587    mock_sc = mock.Mock(return_value=sc_mock)
588
589    with mock.patch('twisterlib.testinstance.SizeCalculator', mock_sc):
590        res = testinstance.calculate_sizes(from_buildlog, expected_warning)
591
592    assert res == sc_mock
593    mock_sc.assert_called_once_with(
594        elf_filename=expected_elf_filepath,
595        extra_sections=expected_extra_sections,
596        buildlog_filepath=expected_buildlog_filepath,
597        generate_warning=expected_warning
598    )
599
600
601TESTDATA_7 = [
602    (True, None),
603    (False, BuildError),
604]
605
606@pytest.mark.parametrize('sysbuild, expected_error', TESTDATA_7)
607@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
608def test_testinstance_get_elf_file(caplog, tmp_path, testinstance, sysbuild, expected_error):
609    sysbuild_dir = tmp_path / 'sysbuild'
610    sysbuild_dir.mkdir()
611    zephyr_dir = sysbuild_dir / 'zephyr'
612    zephyr_dir.mkdir()
613    sysbuild_elf = zephyr_dir / 'dummy.elf'
614    sysbuild_elf.write_bytes(b'0')
615    sysbuild_elf2 = zephyr_dir / 'dummy2.elf'
616    sysbuild_elf2.write_bytes(b'0')
617
618    testinstance.sysbuild = sysbuild
619    testinstance.domains = mock.Mock(
620        get_default_domain=mock.Mock(
621            return_value=mock.Mock(
622                build_dir=sysbuild_dir
623            )
624        )
625    )
626
627    with pytest.raises(expected_error) if expected_error else nullcontext():
628        testinstance.get_elf_file()
629
630    if expected_error is None:
631        assert 'multiple ELF files detected: ' in caplog.text
632
633
634TESTDATA_8 = [
635    (True, None),
636    (False, BuildError),
637]
638
639@pytest.mark.parametrize('create_build_log, expected_error', TESTDATA_8)
640@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
641def test_testinstance_get_buildlog_file(tmp_path, testinstance, create_build_log, expected_error):
642    if create_build_log:
643        build_dir = tmp_path / 'build'
644        build_dir.mkdir()
645        build_log = build_dir / 'build.log'
646        build_log.write_text('')
647        testinstance.build_dir = build_dir
648
649    with pytest.raises(expected_error) if expected_error else nullcontext():
650        res = testinstance.get_buildlog_file()
651
652    if expected_error is None:
653        assert res == str(build_log)
654