1#!/usr/bin/env python3
2# Copyright (c) 2023-2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5"""
6Blackbox tests for twister's command line functions
7"""
8
9import importlib
10import json
11import mock
12import os
13import pytest
14import shutil
15import sys
16import re
17
18from lxml import etree
19
20# pylint: disable=no-name-in-module
21from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
22from twisterlib.statuses import TwisterStatus
23from twisterlib.testplan import TestPlan
24
25
26@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
27class TestReport:
28    TESTDATA_1 = [
29        (
30            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
31            ['qemu_x86/atom', 'mps2/an385'],
32            [
33                'qemu_x86_atom.xml', 'mps2_an385.xml',
34                'testplan.json', 'twister.json',
35                'twister.log', 'twister_report.xml',
36                'twister_suite_report.xml', 'twister.xml'
37            ]
38        ),
39    ]
40    TESTDATA_2 = [
41        (
42            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
43            ['qemu_x86/atom', 'mps2/an385'],
44            [
45                'mps2_an385_TEST.xml', 'qemu_x86_atom_TEST.xml',
46                'twister_TEST.json', 'twister_TEST_report.xml',
47                'twister_TEST_suite_report.xml', 'twister_TEST.xml'
48            ]
49        ),
50    ]
51    TESTDATA_3 = [
52        (
53            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
54            ['qemu_x86/atom', 'mps2/an385'],
55            ['--report-name', 'abcd'],
56            [
57                'abcd.json', 'abcd_report.xml',
58                'abcd_suite_report.xml', 'abcd.xml'
59            ]
60        ),
61        (
62            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
63            ['qemu_x86/atom', 'mps2/an385'],
64            ['--report-name', '1234', '--platform-reports'],
65            [
66                'mps2_an385.xml', 'qemu_x86_atom.xml',
67                '1234.json', '1234_report.xml',
68                '1234_suite_report.xml', '1234.xml'
69            ]
70        ),
71        (
72            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
73            ['qemu_x86/atom', 'mps2/an385'],
74            ['--report-name', 'Final', '--platform-reports', '--report-suffix=Test'],
75            [
76                'mps2_an385_Test.xml', 'qemu_x86_atom_Test.xml',
77                'Final_Test.json', 'Final_Test_report.xml',
78                'Final_Test_suite_report.xml', 'Final_Test.xml'
79            ]
80        ),
81    ]
82    TESTDATA_4 = [
83        (
84            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
85            ['qemu_x86/atom'],
86            [
87                'twister.json', 'twister_report.xml',
88                'twister_suite_report.xml', 'twister.xml'
89            ],
90            "TEST_DIR"
91        ),
92    ]
93    TESTDATA_5 = [
94        (
95            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
96            ['qemu_x86/atom'],
97            [
98                'testplan.json', 'twister.log',
99                'twister.json', 'twister_report.xml',
100                'twister_suite_report.xml', 'twister.xml'
101            ],
102            "OUT_DIR"
103        ),
104    ]
105    TESTDATA_6 = [
106        (
107            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
108            ['qemu_x86/atom'],
109            "TEST_LOG_FILE.log"
110        ),
111    ]
112    TESTDATA_7 = [
113        (
114            os.path.join(TEST_DATA, 'tests', 'one_fail_two_error_one_pass'),
115            ['qemu_x86/atom'],
116            [r'one_fail_two_error_one_pass.agnostic.group1.subgroup2 on qemu_x86/atom FAILED \(.*\)',
117            r'one_fail_two_error_one_pass.agnostic.group1.subgroup3 on qemu_x86/atom ERROR \(Build failure\)',
118            r'one_fail_two_error_one_pass.agnostic.group1.subgroup4 on qemu_x86/atom ERROR \(Build failure\)'],
119        )
120    ]
121
122    @classmethod
123    def setup_class(cls):
124        apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
125        cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
126        cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
127        cls.twister_module = importlib.util.module_from_spec(cls.spec)
128
129    @classmethod
130    def teardown_class(cls):
131        pass
132
133    @pytest.mark.parametrize(
134        'test_path, test_platforms, file_name',
135        TESTDATA_1,
136        ids=[
137            'platform_reports'
138        ]
139    )
140    def test_platform_reports(self, capfd, out_path, test_path, test_platforms, file_name):
141        args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports'] + \
142               [val for pair in zip(
143                   ['-p'] * len(test_platforms), test_platforms
144               ) for val in pair]
145
146        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
147                pytest.raises(SystemExit) as sys_exit:
148            self.loader.exec_module(self.twister_module)
149
150        out, err = capfd.readouterr()
151        sys.stdout.write(out)
152        sys.stderr.write(err)
153
154        for f_name in file_name:
155            path = os.path.join(out_path, f_name)
156            assert os.path.exists(path), 'file not found'
157
158            if path.endswith(".json"):
159                with open(path, "r") as json_file:
160                    data = json.load(json_file)
161                    assert data, f"JSON file '{path}' is empty"
162
163            elif path.endswith(".xml"):
164                tree = etree.parse(path)
165                xml_text = etree.tostring(tree, encoding="utf-8").decode("utf-8")
166                assert xml_text.strip(), f"XML file '{path}' is empty"
167
168            elif path.endswith(".log"):
169                with open(path, "r") as log_file:
170                    text_content = log_file.read()
171                    assert text_content.strip(), f"LOG file '{path}' is empty"
172
173            else:
174                pytest.fail(f"Unsupported file type: '{path}'")
175
176        for f_platform in test_platforms:
177            platform_path = os.path.join(out_path, f_platform.replace("/", "_") + ".json", )
178            assert os.path.exists(platform_path), f'file not found {f_platform}'
179
180        assert str(sys_exit.value) == '0'
181
182    @pytest.mark.parametrize(
183        'test_path, test_platforms, file_name',
184        TESTDATA_2,
185        ids=[
186            'report_suffix',
187        ]
188    )
189    def test_report_suffix(self, capfd, out_path, test_path, test_platforms, file_name):
190        args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports', '--report-suffix=TEST'] + \
191               [val for pair in zip(
192                   ['-p'] * len(test_platforms), test_platforms
193               ) for val in pair]
194
195        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
196                pytest.raises(SystemExit) as sys_exit:
197            self.loader.exec_module(self.twister_module)
198
199        out, err = capfd.readouterr()
200        sys.stdout.write(out)
201        sys.stderr.write(err)
202
203        for f_name in file_name:
204            path = os.path.join(out_path, f_name)
205            assert os.path.exists(path), f'file not found {f_name}'
206
207        assert str(sys_exit.value) == '0'
208
209    @pytest.mark.parametrize(
210        'test_path, test_platforms, report_arg, file_name',
211        TESTDATA_3,
212        ids=[
213            'only_report_name',
214            'report_name + platform_reports',
215            'report-name + platform-reports + report-suffix'
216        ]
217    )
218    def test_report_name(self, capfd, out_path, test_path, test_platforms, report_arg, file_name):
219        args = ['-i', '--outdir', out_path, '-T', test_path] + \
220               [val for pair in zip(
221                   ['-p'] * len(test_platforms), test_platforms
222               ) for val in pair] + \
223               [val for pair in zip(
224                   report_arg
225               ) for val in pair]
226
227        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
228                pytest.raises(SystemExit) as sys_exit:
229            self.loader.exec_module(self.twister_module)
230
231        out, err = capfd.readouterr()
232        sys.stdout.write(out)
233        sys.stderr.write(err)
234
235        for f_name in file_name:
236            path = os.path.join(out_path, f_name)
237            assert os.path.exists(path), f'file not found {f_name}'
238
239        assert str(sys_exit.value) == '0'
240
241    @pytest.mark.parametrize(
242        'test_path, test_platforms, file_name, dir_name',
243        TESTDATA_4,
244        ids=[
245            'report_dir',
246        ]
247    )
248    def test_report_dir(self, capfd, out_path, test_path, test_platforms, file_name, dir_name):
249        args = ['-i', '--outdir', out_path, '-T', test_path, "--report-dir", dir_name] + \
250               [val for pair in zip(
251                   ['-p'] * len(test_platforms), test_platforms
252               ) for val in pair]
253
254        twister_path = os.path.join(ZEPHYR_BASE, dir_name)
255        if os.path.exists(twister_path):
256            shutil.rmtree(twister_path)
257
258        try:
259            with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
260                    pytest.raises(SystemExit) as sys_exit:
261                self.loader.exec_module(self.twister_module)
262
263            out, err = capfd.readouterr()
264            sys.stdout.write(out)
265            sys.stderr.write(err)
266
267            for f_name in file_name:
268                path = os.path.join(twister_path, f_name)
269                assert os.path.exists(path), f'file not found {f_name}'
270
271            assert str(sys_exit.value) == '0'
272        finally:
273            twister_path = os.path.join(ZEPHYR_BASE, dir_name)
274            if os.path.exists(twister_path):
275                shutil.rmtree(twister_path)
276
277    @pytest.mark.noclearout
278    @pytest.mark.parametrize(
279        'test_path, test_platforms, file_name, dir_name',
280        TESTDATA_5,
281        ids=[
282            'outdir',
283        ]
284    )
285    def test_outdir(self, capfd, test_path, test_platforms, file_name, dir_name):
286        args = ['-i', '-T', test_path, "--outdir", dir_name] + \
287               [val for pair in zip(
288                   ['-p'] * len(test_platforms), test_platforms
289               ) for val in pair]
290
291        twister_path = os.path.join(ZEPHYR_BASE, dir_name)
292        if os.path.exists(twister_path):
293            shutil.rmtree(twister_path)
294
295        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
296                pytest.raises(SystemExit) as sys_exit:
297            self.loader.exec_module(self.twister_module)
298
299        out, err = capfd.readouterr()
300        sys.stdout.write(out)
301        sys.stderr.write(err)
302
303        try:
304            for f_name in file_name:
305                path = os.path.join(twister_path, f_name)
306                assert os.path.exists(path), 'file not found {f_name}'
307
308            for f_platform in test_platforms:
309                platform_path = os.path.join(twister_path, f_platform.replace("/", "_"))
310                assert os.path.exists(platform_path), f'file not found {f_platform}'
311
312            assert str(sys_exit.value) == '0'
313        finally:
314            twister_path = os.path.join(ZEPHYR_BASE, dir_name)
315            if os.path.exists(twister_path):
316                shutil.rmtree(twister_path)
317
318    @pytest.mark.parametrize(
319        'test_path, test_platforms, file_name',
320        TESTDATA_6,
321        ids=[
322            'log_file',
323        ]
324    )
325    def test_log_file(self, capfd, test_path, test_platforms, out_path, file_name):
326        args = ['-i','--outdir', out_path, '-T', test_path, "--log-file", file_name] + \
327               [val for pair in zip(
328                   ['-p'] * len(test_platforms), test_platforms
329               ) for val in pair]
330
331        file_path = os.path.join(ZEPHYR_BASE, file_name)
332        if os.path.exists(file_path):
333            os.remove(file_path)
334
335        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
336                pytest.raises(SystemExit) as sys_exit:
337            self.loader.exec_module(self.twister_module)
338
339        out, err = capfd.readouterr()
340        sys.stdout.write(out)
341        sys.stderr.write(err)
342
343        assert os.path.exists(file_path), 'file not found {f_name}'
344
345        assert str(sys_exit.value) == '0'
346
347    @pytest.mark.parametrize(
348        'test_path, flags, expected_testcase_counts',
349        [
350            (
351                os.path.join(TEST_DATA, 'tests', 'dummy'),
352                ['--detailed-skipped-report'],
353                {'qemu_x86/atom': 6, 'intel_adl_crb/alder_lake': 1}
354            ),
355            (
356                os.path.join(TEST_DATA, 'tests', 'dummy'),
357                ['--detailed-skipped-report', '--report-filtered'],
358                {'qemu_x86/atom': 13, 'intel_adl_crb/alder_lake': 13}
359            ),
360        ],
361        ids=['dummy tests', 'dummy tests with filtered']
362    )
363    def test_detailed_skipped_report(self, out_path, test_path, flags, expected_testcase_counts):
364        test_platforms = ['qemu_x86/atom', 'intel_adl_crb/alder_lake']
365        args = ['-i', '--outdir', out_path, '-T', test_path] + \
366               flags + \
367               [val for pair in zip(
368                   ['-p'] * len(test_platforms), test_platforms
369               ) for val in pair]
370
371        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
372                pytest.raises(SystemExit) as sys_exit:
373            self.loader.exec_module(self.twister_module)
374
375        assert str(sys_exit.value) == '0'
376
377        testsuite_counter = 0
378        xml_data = etree.parse(os.path.join(out_path, 'twister_report.xml')).getroot()
379        for ts in xml_data.iter('testsuite'):
380            testsuite_counter += 1
381            # Without the tested flag, filtered testcases would be missing from the report
382            testcase_count = len(list(ts.iter('testcase')))
383            expected_tc_count = expected_testcase_counts[ts.get('name')]
384            assert testcase_count == expected_tc_count, \
385                   f'Not all expected testcases appear in the report.' \
386                   f' (In {ts.get("name")}, expected {expected_tc_count}, got {testcase_count}.)'
387
388        assert testsuite_counter == len(test_platforms), \
389               'Some platforms are missing from the XML report.'
390
391    @pytest.mark.parametrize(
392        'test_path, report_filtered, expected_filtered_count',
393        [
394            (os.path.join(TEST_DATA, 'tests', 'dummy'), False, 0),
395            (os.path.join(TEST_DATA, 'tests', 'dummy'), True, 10),
396        ],
397        ids=['no filtered', 'with filtered']
398    )
399    def test_report_filtered(self, out_path, test_path, report_filtered, expected_filtered_count):
400        test_platforms = ['qemu_x86', 'intel_adl_crb']
401        args = ['-i', '--outdir', out_path, '-T', test_path] + \
402               (['--report-filtered'] if report_filtered else []) + \
403               [val for pair in zip(
404                   ['-p'] * len(test_platforms), test_platforms
405               ) for val in pair]
406
407        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
408                pytest.raises(SystemExit) as sys_exit:
409            self.loader.exec_module(self.twister_module)
410
411        assert str(sys_exit.value) == '0'
412
413        with open(os.path.join(out_path, 'twister.json')) as f:
414            j = json.load(f)
415
416        testsuites = j.get('testsuites')
417        assert testsuites, 'No testsuites found.'
418        statuses = [TwisterStatus(testsuite.get('status')) for testsuite in testsuites]
419        filtered_status_count = statuses.count("filtered")
420        assert filtered_status_count == expected_filtered_count, \
421            f'Expected {expected_filtered_count} filtered statuses, got {filtered_status_count}.'
422
423    def test_enable_size_report(self, out_path):
424        test_platforms = ['qemu_x86', 'intel_adl_crb']
425        path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
426        args = ['-i', '--outdir', out_path, '-T', path] + \
427               ['--enable-size-report'] + \
428               [val for pair in zip(
429                   ['-p'] * len(test_platforms), test_platforms
430               ) for val in pair]
431
432        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
433                pytest.raises(SystemExit) as sys_exit:
434            self.loader.exec_module(self.twister_module)
435
436        assert str(sys_exit.value) == '0'
437
438        with open(os.path.join(out_path, 'twister.json')) as f:
439            j = json.load(f)
440
441        expected_rel_path = os.path.relpath(os.path.join(path, 'dummy.device.group'), ZEPHYR_BASE)
442
443        # twister.json will contain [used/available]_[ram/rom] keys if the flag works
444        # except for those keys that would have values of 0.
445        # In this testcase, availables are equal to 0, so they are missing.
446        assert all(
447            [
448                'used_ram' in ts for ts in j['testsuites'] \
449                if ts['name'] == expected_rel_path and not 'reason' in ts
450            ]
451        )
452        assert all(
453            [
454                'used_rom' in ts for ts in j['testsuites'] \
455                if ts['name'] == expected_rel_path and not 'reason' in ts
456            ]
457        )
458
459    @pytest.mark.parametrize(
460        'test_path, test_platforms, expected_content',
461        TESTDATA_7,
462        ids=[
463            'Report summary test'
464        ]
465    )
466
467    def test_report_summary(self, out_path, capfd, test_path, test_platforms, expected_content):
468        args = ['-i', '--outdir', out_path, '-T', test_path] + \
469               [val for pair in zip(
470                   ['-p'] * len(test_platforms), test_platforms
471               ) for val in pair]
472
473        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
474                pytest.raises(SystemExit) as sys_exit:
475            self.loader.exec_module(self.twister_module)
476
477        assert str(sys_exit.value) == '1'
478
479        capfd.readouterr()
480
481        clear_log_in_test()
482
483        args += ['--report-summary']
484
485        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
486                pytest.raises(SystemExit) as sys_exit:
487            self.loader.exec_module(self.twister_module)
488
489        out, err = capfd.readouterr()
490        sys.stdout.write(out)
491        sys.stderr.write(err)
492
493        for line in expected_content:
494            result = re.search(line, err)
495            assert result, f'missing information in log: {line}'
496
497        capfd.readouterr()
498
499        clear_log_in_test()
500
501        args = ['-i', '--outdir', out_path, '-T', test_path] + \
502               ['--report-summary', '2'] + \
503               [val for pair in zip(
504                   ['-p'] * len(test_platforms), test_platforms
505               ) for val in pair]
506
507        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
508                pytest.raises(SystemExit) as sys_exit:
509            self.loader.exec_module(self.twister_module)
510
511        out, err = capfd.readouterr()
512        sys.stdout.write(out)
513        sys.stderr.write(err)
514
515        lines=0
516        for line in expected_content:
517            result = re.search(line, err)
518            if result: lines += 1
519        assert lines == 2, f'too many or too few lines'
520