1#!/usr/bin/env python3
2# Copyright (c) 2023-2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5"""
6Blackbox tests for twister's command line functions
7"""
8# pylint: disable=duplicate-code
9
10import importlib
11import mock
12import os
13import pytest
14import re
15import sys
16import time
17
18# pylint: disable=no-name-in-module
19from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
20from twisterlib.testplan import TestPlan
21
22
23@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
24class TestRunner:
25    TESTDATA_1 = [
26        (
27            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
28            ['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
29            {
30                'executed_on_platform': 0,
31                'only_built': 6
32            }
33        ),
34        (
35            os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
36            ['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
37            {
38                'executed_on_platform': 0,
39                'only_built': 1
40            }
41        ),
42    ]
43    TESTDATA_2 = [
44        (
45            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
46            ['qemu_x86/atom', 'qemu_x86_64/atom', 'intel_adl_crb/alder_lake'],
47            {
48                'selected_test_scenarios': 3,
49                'selected_test_instances': 4,
50                'skipped_configurations': 0,
51                'skipped_by_static_filter': 0,
52                'skipped_at_runtime': 0,
53                'passed_configurations': 4,
54                'built_configurations': 0,
55                'failed_configurations': 0,
56                'errored_configurations': 0,
57                'executed_test_cases': 10,
58                'skipped_test_cases': 0,
59                'platform_count': 2,
60                'executed_on_platform': 4,
61                'only_built': 0
62            }
63        )
64    ]
65    TESTDATA_3 = [
66        (
67            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
68            ['qemu_x86'],
69        ),
70    ]
71    TESTDATA_4 = [
72        (
73            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
74            ['qemu_x86', 'qemu_x86_64'],
75            {
76                'passed_configurations': 0,
77                'selected_test_instances': 6,
78                'executed_on_platform': 0,
79                'only_built': 6,
80            }
81        ),
82    ]
83    TESTDATA_5 = [
84        (
85            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
86            ['qemu_x86'],
87            os.path.join(TEST_DATA, "pre_script.sh")
88        ),
89    ]
90    TESTDATA_6 = [
91        (
92            os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
93            ['qemu_x86_64'],
94            '1',
95        ),
96        (
97            os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
98            ['qemu_x86'],
99            '2',
100        ),
101    ]
102    TESTDATA_7 = [
103        (
104            os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
105            ['qemu_x86'],
106            '15',
107        ),
108        (
109            os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
110            ['qemu_x86'],
111            '30',
112        ),
113    ]
114    TESTDATA_8 = [
115        (
116            os.path.join(TEST_DATA, 'tests', 'always_timeout', 'dummy'),
117            ['qemu_x86'],
118            '2',
119        ),
120        (
121            os.path.join(TEST_DATA, 'tests', 'always_timeout', 'dummy'),
122            ['qemu_x86'],
123            '0.5',
124        ),
125    ]
126    TESTDATA_9 = [
127        (
128            os.path.join(TEST_DATA, 'tests', 'dummy'),
129            ['qemu_x86/atom'],
130            ['device'],
131            ['dummy.agnostic.group2 FILTERED: Command line testsuite tag filter',
132             'dummy.agnostic.group1.subgroup2 FILTERED: Command line testsuite tag filter',
133             'dummy.agnostic.group1.subgroup1 FILTERED: Command line testsuite tag filter',
134             r'0 of 0 executed test configurations passed \(0.00%\), 0 built \(not run\), 0 failed, 0 errored'
135             ]
136        ),
137        (
138            os.path.join(TEST_DATA, 'tests', 'dummy'),
139            ['qemu_x86/atom'],
140            ['subgrouped'],
141            ['dummy.agnostic.group2 FILTERED: Command line testsuite tag filter',
142             r'1 of 2 executed test configurations passed \(50.00%\), 1 built \(not run\), 0 failed, 0 errored'
143             ]
144        ),
145        (
146            os.path.join(TEST_DATA, 'tests', 'dummy'),
147            ['qemu_x86/atom'],
148            ['agnostic', 'device'],
149            [r'2 of 3 executed test configurations passed \(66.67%\), 1 built \(not run\), 0 failed, 0 errored']
150        ),
151    ]
152    TESTDATA_10 = [
153        (
154            os.path.join(TEST_DATA, 'tests', 'one_fail_one_pass'),
155            ['qemu_x86/atom'],
156            {
157                'selected_test_instances': 2,
158                'skipped_configurations': 0,
159                'passed_configurations': 0,
160                'built_configurations': 0,
161                'failed_configurations': 1,
162                'errored_configurations': 0,
163            }
164        )
165    ]
166    TESTDATA_11 = [
167        (
168            os.path.join(TEST_DATA, 'tests', 'always_build_error'),
169            ['qemu_x86_64'],
170            '1',
171        ),
172        (
173            os.path.join(TEST_DATA, 'tests', 'always_build_error'),
174            ['qemu_x86'],
175            '4',
176        ),
177    ]
178
179    @classmethod
180    def setup_class(cls):
181        apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
182        cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
183        cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
184        cls.twister_module = importlib.util.module_from_spec(cls.spec)
185
186    @classmethod
187    def teardown_class(cls):
188        pass
189
190    @pytest.mark.parametrize(
191        'test_path, test_platforms, expected',
192        TESTDATA_1,
193        ids=[
194            'build_only tests/dummy/agnostic',
195            'build_only tests/dummy/device',
196        ],
197    )
198    @pytest.mark.parametrize(
199        'flag',
200        ['--build-only', '-b']
201    )
202    def test_build_only(self, capfd, out_path, test_path, test_platforms, expected, flag):
203        args = ['-i', '--outdir', out_path, '-T', test_path, flag] + \
204               [val for pair in zip(
205                   ['-p'] * len(test_platforms), test_platforms
206               ) for val in pair]
207
208        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
209            pytest.raises(SystemExit) as sys_exit:
210            self.loader.exec_module(self.twister_module)
211
212        built_regex = r'^INFO    - (?P<executed_on_platform>[0-9]+)' \
213                      r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
214                      r' test configurations were only built.$'
215
216        out, err = capfd.readouterr()
217        sys.stdout.write(out)
218        sys.stderr.write(err)
219
220        built_search = re.search(built_regex, err, re.MULTILINE)
221
222        assert built_search
223        assert int(built_search.group('executed_on_platform')) == \
224               expected['executed_on_platform']
225        assert int(built_search.group('only_built')) == \
226               expected['only_built']
227
228        assert str(sys_exit.value) == '0'
229
230    @pytest.mark.parametrize(
231        'test_path, test_platforms, expected',
232        TESTDATA_2,
233        ids=[
234            'test_only'
235        ],
236    )
237    def test_runtest_only(self, capfd, out_path, test_path, test_platforms, expected):
238
239        args = ['--outdir', out_path,'-i', '-T', test_path, '--build-only'] + \
240            [val for pair in zip(
241                ['-p'] * len(test_platforms), test_platforms
242            ) for val in pair]
243
244        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
245            pytest.raises(SystemExit) as sys_exit:
246            self.loader.exec_module(self.twister_module)
247
248        capfd.readouterr()
249
250        clear_log_in_test()
251
252        args = ['--outdir', out_path,'-i', '-T', test_path, '--test-only'] + \
253            [val for pair in zip(
254                ['-p'] * len(test_platforms), test_platforms
255            ) for val in pair]
256
257        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
258            pytest.raises(SystemExit) as sys_exit:
259            self.loader.exec_module(self.twister_module)
260
261
262        select_regex = r'^INFO    - (?P<test_scenarios>[0-9]+) test scenarios' \
263                       r' \((?P<test_instances>[0-9]+) configurations\) selected,' \
264                       r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
265                       r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
266                       r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
267
268        pass_regex = r'^INFO    - (?P<passed_configurations>[0-9]+) of' \
269                     r' (?P<test_instances>[0-9]+) executed test configurations passed' \
270                     r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
271                     r' (?P<failed_configurations>[0-9]+) failed,' \
272                     r' (?P<errored_configurations>[0-9]+) errored, with' \
273                     r' (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
274
275        case_regex = r'^INFO    - (?P<passed_cases>[0-9]+) of' \
276                     r' (?P<executed_test_cases>[0-9]+) executed test cases passed' \
277                     r' \([0-9]+\.[0-9]+%\)' \
278                     r'(?:, (?P<blocked_cases>[0-9]+) blocked)?' \
279                     r'(?:, (?P<failed_cases>[0-9]+) failed)?' \
280                     r'(?:, (?P<errored_cases>[0-9]+) errored)?' \
281                     r'(?:, (?P<none_cases>[0-9]+) without a status)?' \
282                     r' on (?P<platform_count>[0-9]+) out of total' \
283                     r' (?P<total_platform_count>[0-9]+) platforms \([0-9]+\.[0-9]+%\)'
284
285        skip_regex = r'(?P<skipped_test_cases>[0-9]+) selected test cases not executed:' \
286                     r'(?: (?P<skipped_cases>[0-9]+) skipped)?' \
287                     r'(?:, (?P<filtered_cases>[0-9]+) filtered)?' \
288                     r'.'
289
290        built_regex = r'^INFO    - (?P<executed_on_platform>[0-9]+)' \
291                      r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
292                      r' test configurations were only built.$'
293
294        out, err = capfd.readouterr()
295        sys.stdout.write(out)
296        sys.stderr.write(err)
297
298        select_search = re.search(select_regex, err, re.MULTILINE)
299
300        assert select_search
301        assert int(select_search.group('test_scenarios')) == \
302            expected['selected_test_scenarios']
303        assert int(select_search.group('test_instances')) == \
304            expected['selected_test_instances']
305        assert int(select_search.group('skipped_configurations')) == \
306            expected['skipped_configurations']
307        assert int(select_search.group('skipped_by_static_filter')) == \
308            expected['skipped_by_static_filter']
309        assert int(select_search.group('skipped_at_runtime')) == \
310            expected['skipped_at_runtime']
311
312        pass_search = re.search(pass_regex, err, re.MULTILINE)
313
314        assert pass_search
315        assert int(pass_search.group('passed_configurations')) == \
316            expected['passed_configurations']
317        assert int(pass_search.group('test_instances')) == \
318            expected['selected_test_instances']
319        assert int(pass_search.group('built_configurations')) == \
320            expected['built_configurations']
321        assert int(pass_search.group('failed_configurations')) == \
322            expected['failed_configurations']
323        assert int(pass_search.group('errored_configurations')) == \
324            expected['errored_configurations']
325
326        case_search = re.search(case_regex, err, re.MULTILINE)
327
328        assert case_search
329        assert int(case_search.group('executed_test_cases')) == \
330            expected['executed_test_cases']
331        assert int(case_search.group('platform_count')) == \
332            expected['platform_count']
333
334        if expected['skipped_test_cases']:
335            skip_search = re.search(skip_regex, err, re.MULTILINE)
336            assert skip_search
337            assert int(skip_search.group('skipped_test_cases')) == \
338                expected['skipped_test_cases']
339
340        built_search = re.search(built_regex, err, re.MULTILINE)
341
342        assert built_search
343        assert int(built_search.group('executed_on_platform')) == \
344               expected['executed_on_platform']
345        assert int(built_search.group('only_built')) == \
346               expected['only_built']
347
348        assert str(sys_exit.value) == '0'
349
350    @pytest.mark.parametrize(
351        'test_path, test_platforms',
352        TESTDATA_3,
353        ids=[
354            'dry_run',
355        ],
356    )
357    @pytest.mark.parametrize(
358        'flag',
359        ['--dry-run', '-y']
360    )
361    def test_dry_run(self, capfd, out_path, test_path, test_platforms, flag):
362        args = ['--outdir', out_path, '-T', test_path, flag] + \
363               [val for pair in zip(
364                   ['-p'] * len(test_platforms), test_platforms
365               ) for val in pair]
366
367        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
368            pytest.raises(SystemExit) as sys_exit:
369            self.loader.exec_module(self.twister_module)
370
371        out, err = capfd.readouterr()
372        sys.stdout.write(out)
373        sys.stderr.write(err)
374
375        assert str(sys_exit.value) == '0'
376
377    @pytest.mark.parametrize(
378        'test_path, test_platforms, expected',
379        TESTDATA_4,
380        ids=[
381            'cmake_only',
382        ],
383    )
384    def test_cmake_only(self, capfd, out_path, test_path, test_platforms, expected):
385        args = ['--outdir', out_path, '-T', test_path, '--cmake-only'] + \
386               [val for pair in zip(
387                   ['-p'] * len(test_platforms), test_platforms
388               ) for val in pair]
389
390        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
391            pytest.raises(SystemExit) as sys_exit:
392            self.loader.exec_module(self.twister_module)
393
394        out, err = capfd.readouterr()
395        sys.stdout.write(out)
396        sys.stderr.write(err)
397
398        pass_regex = r'^INFO    - (?P<passed_configurations>[0-9]+) of' \
399                     r' (?P<test_instances>[0-9]+) executed test configurations passed'
400
401        built_regex = r'^INFO    - (?P<executed_on_platform>[0-9]+)' \
402                      r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
403                      r' test configurations were only built.$'
404
405        pass_search = re.search(pass_regex, err, re.MULTILINE)
406
407        assert pass_search
408        assert int(pass_search.group('passed_configurations')) == \
409               expected['passed_configurations']
410        assert int(pass_search.group('test_instances')) == \
411               expected['selected_test_instances']
412
413        built_search = re.search(built_regex, err, re.MULTILINE)
414
415        assert built_search
416        assert int(built_search.group('executed_on_platform')) == \
417               expected['executed_on_platform']
418        assert int(built_search.group('only_built')) == \
419               expected['only_built']
420
421        assert str(sys_exit.value) == '0'
422
423    @pytest.mark.parametrize(
424        'test_path, test_platforms, file_name',
425        TESTDATA_5,
426        ids=[
427            'pre_script',
428        ],
429    )
430    def test_pre_script(self, capfd, out_path, test_path, test_platforms, file_name):
431        args = ['--outdir', out_path, '-T', test_path, '--pre-script', file_name] + \
432               [val for pair in zip(
433                   ['-p'] * len(test_platforms), test_platforms
434               ) for val in pair]
435
436        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
437            pytest.raises(SystemExit) as sys_exit:
438            self.loader.exec_module(self.twister_module)
439
440        out, err = capfd.readouterr()
441        sys.stdout.write(out)
442        sys.stderr.write(err)
443
444        assert str(sys_exit.value) == '0'
445
446    @pytest.mark.parametrize(
447        'test_path, test_platforms',
448        TESTDATA_3,
449        ids=[
450            'device_flash_timeout',
451        ],
452    )
453    def test_device_flash_timeout(self, capfd, out_path, test_path, test_platforms):
454        args = ['--outdir', out_path, '-T', test_path, '--device-flash-timeout', "240"] + \
455               [val for pair in zip(
456                   ['-p'] * len(test_platforms), test_platforms
457               ) for val in pair]
458
459        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
460            pytest.raises(SystemExit) as sys_exit:
461            self.loader.exec_module(self.twister_module)
462
463        out, err = capfd.readouterr()
464        sys.stdout.write(out)
465        sys.stderr.write(err)
466
467        assert str(sys_exit.value) == '0'
468
469    @pytest.mark.parametrize(
470        'test_path, test_platforms, iterations',
471        TESTDATA_6,
472        ids=[
473            'retry 2',
474            'retry 3'
475        ],
476    )
477    def test_retry(self, capfd, out_path, test_path, test_platforms, iterations):
478        args = ['--outdir', out_path, '-T', test_path, '--retry-failed', iterations, '--retry-interval', '1'] + \
479               [val for pair in zip(
480                   ['-p'] * len(test_platforms), test_platforms
481               ) for val in pair]
482
483        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
484            pytest.raises(SystemExit) as sys_exit:
485            self.loader.exec_module(self.twister_module)
486
487        out, err = capfd.readouterr()
488        sys.stdout.write(out)
489        sys.stderr.write(err)
490
491        pattern = re.compile(r'INFO\s+-\s+(\d+)\s+Iteration:[\s\S]*?ERROR\s+-\s+(\w+)')
492        matches = pattern.findall(err)
493
494        if matches:
495            last_iteration = max(int(match[0]) for match in matches)
496            last_match = next(match for match in matches if int(match[0]) == last_iteration)
497            iteration_number, platform_name = int(last_match[0]), last_match[1]
498            assert int(iteration_number) == int(iterations) + 1
499            assert [platform_name] == test_platforms
500        else:
501            assert 'Pattern not found in the output'
502
503        assert str(sys_exit.value) == '1'
504
505    @pytest.mark.parametrize(
506        'test_path, test_platforms, interval',
507        TESTDATA_7,
508        ids=[
509            'retry interval 15',
510            'retry interval 30'
511        ],
512    )
513    def test_retry_interval(self, capfd, out_path, test_path, test_platforms, interval):
514        args = ['--outdir', out_path, '-T', test_path, '--retry-failed', '1', '--retry-interval', interval] + \
515               [val for pair in zip(
516                   ['-p'] * len(test_platforms), test_platforms
517               ) for val in pair]
518
519        start_time = time.time()
520
521        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
522            pytest.raises(SystemExit) as sys_exit:
523            self.loader.exec_module(self.twister_module)
524
525        out, err = capfd.readouterr()
526        sys.stdout.write(out)
527        sys.stderr.write(err)
528
529        end_time = time.time()
530        elapsed_time = end_time - start_time
531        if elapsed_time < int(interval):
532            assert 'interval was too short'
533
534        assert str(sys_exit.value) == '1'
535
536    @pytest.mark.parametrize(
537        'test_path, test_platforms, timeout',
538        TESTDATA_8,
539        ids=[
540            'timeout-multiplier 2 - 20s',
541            'timeout-multiplier 0.5 - 5s'
542        ],
543    )
544    def test_timeout_multiplier(self, capfd, out_path, test_path, test_platforms, timeout):
545        args = ['--outdir', out_path, '-T', test_path, '--timeout-multiplier', timeout, '-v'] + \
546               [val for pair in zip(
547                   ['-p'] * len(test_platforms), test_platforms
548               ) for val in pair]
549
550        tolerance = 1.0
551
552        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
553            pytest.raises(SystemExit) as sys_exit:
554            self.loader.exec_module(self.twister_module)
555
556        out, err = capfd.readouterr()
557        sys.stdout.write(out)
558        sys.stderr.write(err)
559
560        elapsed_time = float(re.search(r'Timeout \(qemu (\d+\.\d+)s\)', err).group(1))
561
562        assert abs(
563            elapsed_time - float(timeout) * 10) <= tolerance, f"Time is different from expected"
564
565        assert str(sys_exit.value) == '1'
566
567    @pytest.mark.parametrize(
568        'test_path, test_platforms, tags, expected',
569        TESTDATA_9,
570        ids=[
571            'tags device',
572            'tags subgruped',
573            'tag agnostic and device'
574        ],
575    )
576    def test_tag(self, capfd, out_path, test_path, test_platforms, tags, expected):
577        args = ['--outdir', out_path, '-T', test_path, '-vv', '-ll', 'DEBUG'] + \
578               [val for pair in zip(
579                   ['-p'] * len(test_platforms), test_platforms
580               ) for val in pair] + \
581               [val for pairs in zip(
582                   ['-t'] * len(tags), tags
583               ) for val in pairs]
584
585        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
586            pytest.raises(SystemExit) as sys_exit:
587            self.loader.exec_module(self.twister_module)
588
589        out, err = capfd.readouterr()
590        sys.stdout.write(out)
591        sys.stderr.write(err)
592
593        for line in expected:
594            assert re.search(line, err), f"no expected:'{line}' in '{err}'"
595
596        assert str(sys_exit.value) == '0'
597
598    @pytest.mark.parametrize(
599        'test_path, test_platforms, expected',
600        TESTDATA_10,
601        ids=[
602            'only_failed'
603        ],
604    )
605
606    def test_only_failed(self, capfd, out_path, test_path, test_platforms, expected):
607        args = ['--outdir', out_path,'-i', '-T', test_path, '-v'] + \
608            [val for pair in zip(
609                ['-p'] * len(test_platforms), test_platforms
610            ) for val in pair]
611
612        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
613            pytest.raises(SystemExit) as sys_exit:
614            self.loader.exec_module(self.twister_module)
615
616        capfd.readouterr()
617
618        clear_log_in_test()
619
620        args = ['--outdir', out_path,'-i', '-T', test_path, '--only-failed'] + \
621            [val for pair in zip(
622                ['-p'] * len(test_platforms), test_platforms
623            ) for val in pair]
624
625        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
626            pytest.raises(SystemExit) as sys_exit:
627            self.loader.exec_module(self.twister_module)
628
629        select_regex = r'^INFO    - (?P<test_scenarios>[0-9]+) test scenarios' \
630                       r' \((?P<test_instances>[0-9]+) configurations\) selected,' \
631                       r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
632                       r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
633                       r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
634
635        pass_regex = r'^INFO    - (?P<passed_configurations>[0-9]+) of' \
636                     r' (?P<test_instances>[0-9]+) executed test configurations passed' \
637                     r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
638                     r' (?P<failed_configurations>[0-9]+) failed,' \
639                     r' (?P<errored_configurations>[0-9]+) errored, with' \
640                     r' (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
641
642        out, err = capfd.readouterr()
643        sys.stdout.write(out)
644        sys.stderr.write(err)
645
646
647        assert re.search(
648            r'one_fail_one_pass.agnostic.group1.subgroup2 on qemu_x86/atom failed \(.*\)', err)
649
650
651        select_search = re.search(select_regex, err, re.MULTILINE)
652        assert int(select_search.group('skipped_configurations')) == \
653                expected['skipped_configurations']
654
655        pass_search = re.search(pass_regex, err, re.MULTILINE)
656
657        assert pass_search
658        assert int(pass_search.group('passed_configurations')) == \
659                expected['passed_configurations']
660        assert int(pass_search.group('test_instances')) == \
661                expected['selected_test_instances']
662        assert int(pass_search.group('built_configurations')) == \
663                expected['built_configurations']
664        assert int(pass_search.group('failed_configurations')) == \
665                expected['failed_configurations']
666        assert int(pass_search.group('errored_configurations')) == \
667                expected['errored_configurations']
668
669        assert str(sys_exit.value) == '1'
670
671    @pytest.mark.parametrize(
672        'test_path, test_platforms, iterations',
673        TESTDATA_11,
674        ids=[
675            'retry 2',
676            'retry 3'
677        ],
678    )
679    def test_retry_build_errors(self, capfd, out_path, test_path, test_platforms, iterations):
680        args = ['--outdir', out_path, '-T', test_path, '--retry-build-errors', '--retry-failed', iterations,
681                '--retry-interval', '10'] + \
682               [val for pair in zip(
683                   ['-p'] * len(test_platforms), test_platforms
684               ) for val in pair]
685
686        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
687            pytest.raises(SystemExit) as sys_exit:
688            self.loader.exec_module(self.twister_module)
689
690        out, err = capfd.readouterr()
691        sys.stdout.write(out)
692        sys.stderr.write(err)
693
694        pattern = re.compile(r'INFO\s+-\s+(\d+)\s+Iteration:[\s\S]*?ERROR\s+-\s+(\w+)')
695        matches = pattern.findall(err)
696
697        if matches:
698            last_iteration = max(int(match[0]) for match in matches)
699            last_match = next(match for match in matches if int(match[0]) == last_iteration)
700            iteration_number, platform_name = int(last_match[0]), last_match[1]
701            assert int(iteration_number) == int(iterations) + 1
702            assert [platform_name] == test_platforms
703        else:
704            assert 'Pattern not found in the output'
705
706        assert str(sys_exit.value) == '1'
707