1#!/usr/bin/env python3
2# Copyright (c) 2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5"""
6Blackbox tests for twister's command line functions related to Zephyr platforms.
7"""
8
9import importlib
10import re
11import mock
12import os
13import pytest
14import sys
15import json
16
17# pylint: disable=no-name-in-module
18from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
19from twisterlib.testplan import TestPlan
20
21
22@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
23class TestPlatform:
24    TESTDATA_1 = [
25        (
26            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
27            ['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
28            {
29                'selected_test_scenarios': 3,
30                'selected_test_instances': 9,
31                'executed_test_instances': 6,
32                'skipped_configurations': 3,
33                'skipped_by_static_filter': 3,
34                'skipped_at_runtime': 0,
35                'passed_configurations': 4,
36                'built_configurations': 2,
37                'failed_configurations': 0,
38                'errored_configurations': 0,
39                'executed_test_cases': 10,
40                'skipped_test_cases': 2,
41                'platform_count': 2,
42                'executed_on_platform': 4,
43                'only_built': 2
44            }
45        ),
46        (
47            os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
48            ['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
49            {
50                'selected_test_scenarios': 1,
51                'selected_test_instances': 3,
52                'executed_test_instances': 0,
53                'skipped_configurations': 3,
54                'skipped_by_static_filter': 3,
55                'skipped_at_runtime': 0,
56                'passed_configurations': 0,
57                'built_configurations': 0,
58                'failed_configurations': 0,
59                'errored_configurations': 0,
60                'executed_test_cases': 0,
61                'skipped_test_cases': 0,
62                'platform_count': 0,
63                'executed_on_platform': 0,
64                'only_built': 0
65            }
66        ),
67        (
68            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic_cpp'),
69            ['native_sim'],
70            {
71                'selected_test_scenarios': 3,
72                'selected_test_instances': 3,
73                'executed_test_instances': 3,
74                'skipped_configurations': 0,
75                'skipped_by_static_filter': 0,
76                'skipped_at_runtime': 0,
77                'passed_configurations': 2,
78                'built_configurations': 1,
79                'failed_configurations': 0,
80                'errored_configurations': 0,
81                'executed_test_cases': 5,
82                'skipped_test_cases': 0,
83                'platform_count': 1,
84                'executed_on_platform': 2,
85                'only_built': 1
86            }
87        ),
88    ]
89
90    @classmethod
91    def setup_class(cls):
92        apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
93        cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
94        cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
95        cls.twister_module = importlib.util.module_from_spec(cls.spec)
96
97    @classmethod
98    def teardown_class(cls):
99        pass
100
101    @pytest.mark.parametrize(
102        'board_root, expected_returncode',
103        [(True, '0'), (False, '2')],
104        ids=['dummy in additional board root', 'no additional board root, crash']
105    )
106    def test_board_root(self, out_path, board_root, expected_returncode):
107        test_platforms = ['qemu_x86', 'dummy/unit_testing']
108        board_root_path = os.path.join(TEST_DATA, 'boards')
109        path = os.path.join(TEST_DATA, 'tests', 'dummy')
110        args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
111               (['--board-root', board_root_path] if board_root else []) + \
112               [val for pair in zip(
113                   ['-p'] * len(test_platforms), test_platforms
114               ) for val in pair]
115
116        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
117                pytest.raises(SystemExit) as sys_exit:
118            self.loader.exec_module(self.twister_module)
119
120        # Checking twister.log increases coupling,
121        # but we need to differentiate crashes.
122        with open(os.path.join(out_path, 'twister.log')) as f:
123            log = f.read()
124            error_regex = r'ERROR.*platform_filter\s+-\s+unrecognized\s+platform\s+-\s+dummy/unit_testing$'
125            board_error = re.search(error_regex, log)
126            assert board_error if not board_root else not board_error
127
128        assert str(sys_exit.value) == expected_returncode
129
130    def test_force_platform(self, out_path):
131        test_platforms = ['qemu_x86', 'intel_adl_crb']
132        path = os.path.join(TEST_DATA, 'tests', 'dummy')
133        args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
134               ['--force-platform'] + \
135               [val for pair in zip(
136                   ['-p'] * len(test_platforms), test_platforms
137               ) for val in pair]
138
139        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
140                pytest.raises(SystemExit) as sys_exit:
141            self.loader.exec_module(self.twister_module)
142
143        with open(os.path.join(out_path, 'testplan.json')) as f:
144            j = json.load(f)
145        filtered_j = [
146            (ts['platform'], ts['name'], tc['identifier']) \
147               for ts in j['testsuites'] \
148               for tc in ts['testcases'] if 'reason' not in tc
149        ]
150
151        assert str(sys_exit.value) == '0'
152
153        assert len(filtered_j) == 26
154
155    def test_platform(self, out_path):
156        path = os.path.join(TEST_DATA, 'tests', 'dummy')
157        args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
158               ['--platform', 'qemu_x86']
159
160        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
161                pytest.raises(SystemExit) as sys_exit:
162            self.loader.exec_module(self.twister_module)
163
164        with open(os.path.join(out_path, 'testplan.json')) as f:
165            j = json.load(f)
166        filtered_j = [
167            (ts['platform'], ts['name'], tc['identifier']) \
168               for ts in j['testsuites'] \
169               for tc in ts['testcases'] if 'reason' not in tc
170        ]
171
172        assert str(sys_exit.value) == '0'
173
174        assert all([platform == 'qemu_x86/atom' for platform, _, _ in filtered_j])
175
176    @pytest.mark.parametrize(
177        'test_path, test_platforms',
178        [
179            (
180                os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
181                ['qemu_x86'],
182            ),
183        ],
184        ids=[
185            'any_platform',
186        ],
187    )
188    @pytest.mark.parametrize(
189        'flag',
190        ['-l', '--all']
191    )
192    def test_any_platform(self, capfd, out_path, test_path, test_platforms, flag):
193        args = ['--outdir', out_path, '-T', test_path, '-y'] + \
194               [flag] + \
195               [val for pair in zip(
196                   ['-p'] * len(test_platforms), test_platforms
197               ) for val in pair]
198
199        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
200            pytest.raises(SystemExit) as sys_exit:
201            self.loader.exec_module(self.twister_module)
202
203        out, err = capfd.readouterr()
204        sys.stdout.write(out)
205        sys.stderr.write(err)
206
207        assert str(sys_exit.value) == '0'
208
209    @pytest.mark.parametrize(
210        'test_path, test_platforms, expected',
211        [
212            (
213                os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
214                ['qemu_x86', 'qemu_x86_64'],
215                {
216                    'selected_test_instances': 6,
217                    'passed_configurations': 2,
218                    'executed_test_instances': 3,
219                    'executed_on_platform': 2,
220                    'only_built': 1,
221                }
222            ),
223        ],
224        ids=[
225            'exclude_platform',
226        ],
227    )
228    def test_exclude_platform(self, capfd, out_path, test_path, test_platforms, expected):
229        args = ['--outdir', out_path, '-T', test_path] + \
230               ['--exclude-platform', "qemu_x86"] + \
231               [val for pair in zip(
232                   ['-p'] * len(test_platforms), test_platforms
233               ) for val in pair]
234
235        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
236            pytest.raises(SystemExit) as sys_exit:
237            self.loader.exec_module(self.twister_module)
238
239        out, err = capfd.readouterr()
240        sys.stdout.write(out)
241        sys.stderr.write(err)
242
243        pass_regex = r'^INFO    - (?P<passed_configurations>[0-9]+) of' \
244                     r' (?P<test_instances>[0-9]+) executed test configurations passed'
245
246        built_regex = r'^INFO    - (?P<executed_on_platform>[0-9]+)' \
247                      r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
248                      r' test configurations were only built.$'
249
250        pass_search = re.search(pass_regex, err, re.MULTILINE)
251
252        assert pass_search
253        assert int(pass_search.group('passed_configurations')) == \
254               expected['passed_configurations']
255        assert int(pass_search.group('test_instances')) == \
256               expected['executed_test_instances']
257
258        built_search = re.search(built_regex, err, re.MULTILINE)
259
260        assert built_search
261        assert int(built_search.group('executed_on_platform')) == \
262               expected['executed_on_platform']
263        assert int(built_search.group('only_built')) == \
264               expected['only_built']
265
266        assert str(sys_exit.value) == '0'
267
268    @pytest.mark.parametrize(
269        'test_path, test_platforms, expected',
270        TESTDATA_1,
271        ids=[
272            'emulation_only tests/dummy/agnostic',
273            'emulation_only tests/dummy/device',
274            'native_sim_only tests/dummy/agnostic_cpp',
275        ]
276    )
277    def test_emulation_only(self, capfd, out_path, test_path, test_platforms, expected):
278        args = ['-i', '--outdir', out_path, '-T', test_path] + \
279               ['--emulation-only'] + \
280               [val for pair in zip(
281                   ['-p'] * len(test_platforms), test_platforms
282               ) for val in pair]
283
284        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
285            pytest.raises(SystemExit) as sys_exit:
286            self.loader.exec_module(self.twister_module)
287
288        select_regex = r'^INFO    - (?P<test_scenarios>[0-9]+) test scenarios' \
289                       r' \((?P<test_instances>[0-9]+) configurations\) selected,' \
290                       r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
291                       r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
292                       r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
293
294        pass_regex = r'^INFO    - (?P<passed_configurations>[0-9]+) of' \
295                     r' (?P<executed_test_instances>[0-9]+) executed test configurations passed' \
296                     r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
297                     r' (?P<failed_configurations>[0-9]+) failed,' \
298                     r' (?P<errored_configurations>[0-9]+) errored,' \
299                     r' with (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
300
301        case_regex = r'^INFO    - (?P<passed_cases>[0-9]+) of' \
302                     r' (?P<executed_test_cases>[0-9]+) executed test cases passed' \
303                     r' \([0-9]+\.[0-9]+%\)' \
304                     r'(?:, (?P<blocked_cases>[0-9]+) blocked)?' \
305                     r'(?:, (?P<failed_cases>[0-9]+) failed)?' \
306                     r'(?:, (?P<errored_cases>[0-9]+) errored)?' \
307                     r'(?:, (?P<none_cases>[0-9]+) without a status)?' \
308                     r' on (?P<platform_count>[0-9]+) out of total' \
309                     r' (?P<total_platform_count>[0-9]+) platforms \([0-9]+\.[0-9]+%\)'
310
311        skip_regex = r'(?P<skipped_test_cases>[0-9]+) selected test cases not executed:' \
312                     r'(?: (?P<skipped_cases>[0-9]+) skipped)?' \
313                     r'(?:, (?P<filtered_cases>[0-9]+) filtered)?' \
314                     r'.'
315
316        built_regex = r'^INFO    - (?P<executed_on_platform>[0-9]+)' \
317                      r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
318                      r' test configurations were only built.$'
319
320        out, err = capfd.readouterr()
321        sys.stdout.write(out)
322        sys.stderr.write(err)
323
324        select_search = re.search(select_regex, err, re.MULTILINE)
325
326        assert select_search
327        assert int(select_search.group('test_scenarios')) == \
328               expected['selected_test_scenarios']
329        assert int(select_search.group('test_instances')) == \
330               expected['selected_test_instances']
331        assert int(select_search.group('skipped_configurations')) == \
332               expected['skipped_configurations']
333        assert int(select_search.group('skipped_by_static_filter')) == \
334               expected['skipped_by_static_filter']
335        assert int(select_search.group('skipped_at_runtime')) == \
336               expected['skipped_at_runtime']
337
338        pass_search = re.search(pass_regex, err, re.MULTILINE)
339
340        assert pass_search
341        assert int(pass_search.group('passed_configurations')) == \
342               expected['passed_configurations']
343        assert int(pass_search.group('built_configurations')) == \
344               expected['built_configurations']
345        assert int(pass_search.group('executed_test_instances')) == \
346               expected['executed_test_instances']
347        if expected['failed_configurations']:
348            assert int(pass_search.group('failed_configurations')) == \
349                   expected['failed_configurations']
350        if expected['errored_configurations']:
351            assert int(pass_search.group('errored_configurations')) == \
352                   expected['errored_configurations']
353
354        case_search = re.search(case_regex, err, re.MULTILINE)
355
356        assert case_search
357        assert int(case_search.group('executed_test_cases')) == \
358               expected['executed_test_cases']
359        assert int(case_search.group('platform_count')) == \
360               expected['platform_count']
361
362        if expected['skipped_test_cases']:
363            skip_search = re.search(skip_regex, err, re.MULTILINE)
364
365            assert skip_search
366            assert int(skip_search.group('skipped_test_cases')) == \
367                   expected['skipped_test_cases']
368
369        built_search = re.search(built_regex, err, re.MULTILINE)
370
371        assert built_search
372        assert int(built_search.group('executed_on_platform')) == \
373               expected['executed_on_platform']
374        assert int(built_search.group('only_built')) == \
375               expected['only_built']
376
377        assert str(sys_exit.value) == '0'
378