1#!/usr/bin/env python3
2
3# Copyright(c) 2023 Google LLC
4# SPDX-License-Identifier: Apache-2.0
5
6"""
7This test file contains testsuites for the Harness classes of twister
8"""
9import mock
10import sys
11import os
12import pytest
13import re
14import logging as logger
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from twisterlib.harness import Gtest, Bsim
20from twisterlib.harness import Harness
21from twisterlib.harness import Robot
22from twisterlib.harness import Test
23from twisterlib.testinstance import TestInstance
24from twisterlib.harness import Console
25from twisterlib.harness import Pytest
26from twisterlib.harness import PytestHarnessException
27from twisterlib.harness import HarnessImporter
28
29GTEST_START_STATE = " RUN      "
30GTEST_PASS_STATE = "       OK "
31GTEST_SKIP_STATE = " DISABLED "
32GTEST_FAIL_STATE = "  FAILED  "
33SAMPLE_GTEST_START = (
34    "[00:00:00.000,000] <inf> label:  [==========] Running all tests."
35)
36SAMPLE_GTEST_FMT = "[00:00:00.000,000] <inf> label:  [{state}] {suite}.{test} (0ms)"
37SAMPLE_GTEST_END = (
38    "[00:00:00.000,000] <inf> label:  [==========] Done running all tests."
39)
40
41
42def process_logs(harness, logs):
43    for line in logs:
44        harness.handle(line)
45
46
47TEST_DATA_RECORDING = [
48                ([''], "^START:(?P<foo>.*):END", [], None),
49                (['START:bar:STOP'], "^START:(?P<foo>.*):END", [], None),
50                (['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], None),
51                (['START:bar:baz:END'], "^START:(?P<foo>.*):(?P<boo>.*):END", [{'foo':'bar', 'boo':'baz'}], None),
52                (['START:bar:baz:END','START:may:jun:END'], "^START:(?P<foo>.*):(?P<boo>.*):END",
53                 [{'foo':'bar', 'boo':'baz'}, {'foo':'may', 'boo':'jun'}], None),
54                (['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], []),
55                (['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], ['boo']),
56                (['START:bad_json:END'], "^START:(?P<foo>.*):END",
57                 [{'foo':{'ERROR':{'msg':'Expecting value: line 1 column 1 (char 0)', 'doc':'bad_json'}}}], ['foo']),
58                (['START::END'], "^START:(?P<foo>.*):END", [{'foo':{}}], ['foo']),
59                (['START: {"one":1, "two":2} :END'], "^START:(?P<foo>.*):END", [{'foo':{'one':1, 'two':2}}], ['foo']),
60                (['START: {"one":1, "two":2} :STOP:oops:END'], "^START:(?P<foo>.*):STOP:(?P<boo>.*):END",
61                   [{'foo':{'one':1, 'two':2},'boo':'oops'}], ['foo']),
62                (['START: {"one":1, "two":2} :STOP:{"oops":0}:END'], "^START:(?P<foo>.*):STOP:(?P<boo>.*):END",
63                   [{'foo':{'one':1, 'two':2},'boo':{'oops':0}}], ['foo','boo']),
64                      ]
65@pytest.mark.parametrize(
66    "lines, pattern, expected_records, as_json",
67    TEST_DATA_RECORDING,
68    ids=["empty", "no match", "match 1 field", "match 2 fields", "match 2 records",
69         "as_json empty", "as_json no such field", "error parsing json", "empty json value", "simple json",
70         "plain field and json field", "two json fields"
71        ]
72)
73def test_harness_parse_record(lines, pattern, expected_records, as_json):
74    harness = Harness()
75    harness.record = { 'regex': pattern }
76    harness.record_pattern = re.compile(pattern)
77
78    harness.record_as_json = as_json
79    if as_json is not None:
80        harness.record['as_json'] = as_json
81
82    assert not harness.recording
83
84    for line in lines:
85        harness.parse_record(line)
86
87    assert harness.recording == expected_records
88
89
90TEST_DATA_1 = [('RunID: 12345', False, False, False, None, True),
91                ('PROJECT EXECUTION SUCCESSFUL', False, False, False, 'passed', False),
92                ('PROJECT EXECUTION SUCCESSFUL', True, False, False, 'failed', False),
93                ('PROJECT EXECUTION FAILED', False, False, False, 'failed', False),
94                ('ZEPHYR FATAL ERROR', False, True, False, None, False),
95                ('GCOV_COVERAGE_DUMP_START', None, None, True, None, False),
96                ('GCOV_COVERAGE_DUMP_END', None, None, False, None, False),]
97@pytest.mark.parametrize(
98    "line, fault, fail_on_fault, cap_cov, exp_stat, exp_id",
99    TEST_DATA_1,
100    ids=["match id", "passed passed", "passed failed", "failed failed", "fail on fault", "GCOV START", "GCOV END"]
101)
102def test_harness_process_test(line, fault, fail_on_fault, cap_cov, exp_stat, exp_id):
103    #Arrange
104    harness = Harness()
105    harness.run_id = 12345
106    harness.state = None
107    harness.fault = fault
108    harness.fail_on_fault = fail_on_fault
109    mock.patch.object(Harness, 'parse_record', return_value=None)
110
111    #Act
112    harness.process_test(line)
113
114    #Assert
115    assert harness.matched_run_id == exp_id
116    assert harness.state == exp_stat
117    assert harness.capture_coverage == cap_cov
118    assert harness.recording == []
119
120
121def test_robot_configure(tmp_path):
122    #Arrange
123    mock_platform = mock.Mock()
124    mock_platform.name = "mock_platform"
125    mock_platform.normalized_name = "mock_platform"
126
127    mock_testsuite = mock.Mock(id = 'id', testcases = [])
128    mock_testsuite.name = "mock_testsuite"
129    mock_testsuite.harness_config = {}
130
131    outdir = tmp_path / 'gtest_out'
132    outdir.mkdir()
133
134    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
135    instance.testsuite.harness_config = {
136        'robot_testsuite': '/path/to/robot/test',
137        'robot_option': 'test_option'
138    }
139    robot_harness = Robot()
140
141    #Act
142    robot_harness.configure(instance)
143
144    #Assert
145    assert robot_harness.instance == instance
146    assert robot_harness.path == '/path/to/robot/test'
147    assert robot_harness.option == 'test_option'
148
149
150def test_robot_handle(tmp_path):
151    #Arrange
152    mock_platform = mock.Mock()
153    mock_platform.name = "mock_platform"
154    mock_platform.normalized_name = "mock_platform"
155
156    mock_testsuite = mock.Mock(id = 'id', testcases = [])
157    mock_testsuite.name = "mock_testsuite"
158    mock_testsuite.harness_config = {}
159
160    outdir = tmp_path / 'gtest_out'
161    outdir.mkdir()
162
163    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
164
165    handler = Robot()
166    handler.instance = instance
167    handler.id = 'test_case_1'
168
169    line = 'Test case passed'
170
171    #Act
172    handler.handle(line)
173    tc = instance.get_case_or_create('test_case_1')
174
175    #Assert
176    assert instance.state == "passed"
177    assert tc.status == "passed"
178
179
180TEST_DATA_2 = [("", 0, "passed"), ("Robot test failure: sourcedir for mock_platform", 1, "failed"),]
181@pytest.mark.parametrize(
182    "exp_out, returncode, expected_status",
183    TEST_DATA_2,
184    ids=["passed", "failed"]
185)
186def test_robot_run_robot_test(tmp_path, caplog, exp_out, returncode, expected_status):
187    # Arrange
188    command = ["command"]
189
190    handler = mock.Mock()
191    handler.sourcedir = "sourcedir"
192    handler.log = "handler.log"
193
194    path = "path"
195    option = "option"
196
197    mock_platform = mock.Mock()
198    mock_platform.name = "mock_platform"
199    mock_platform.normalized_name = "mock_platform"
200
201    mock_testsuite = mock.Mock(id = 'id', testcases = [mock.Mock()])
202    mock_testsuite.name = "mock_testsuite"
203    mock_testsuite.harness_config = {}
204
205    outdir = tmp_path / 'gtest_out'
206    outdir.mkdir()
207
208    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
209    instance.build_dir = "build_dir"
210
211    open_mock = mock.mock_open()
212
213    robot = Robot()
214    robot.path = path
215    robot.option = option
216    robot.instance = instance
217    proc_mock = mock.Mock(
218        returncode = returncode,
219        communicate = mock.Mock(return_value=(b"output", None))
220    )
221    popen_mock = mock.Mock(return_value = mock.Mock(
222        __enter__ = mock.Mock(return_value = proc_mock),
223        __exit__ = mock.Mock()
224    ))
225
226    # Act
227    with mock.patch("subprocess.Popen", popen_mock) as mock.mock_popen, \
228         mock.patch("builtins.open", open_mock):
229        robot.run_robot_test(command,handler)
230
231
232    # Assert
233    assert instance.status == expected_status
234    open_mock().write.assert_called_once_with("output")
235    assert exp_out in caplog.text
236
237
238TEST_DATA_3 = [('one_line', None), ('multi_line', 2),]
239@pytest.mark.parametrize(
240    "type, num_patterns",
241    TEST_DATA_3,
242    ids=["one line", "multi line"]
243)
244def test_console_configure(tmp_path, type, num_patterns):
245    #Arrange
246    mock_platform = mock.Mock()
247    mock_platform.name = "mock_platform"
248    mock_platform.normalized_name = "mock_platform"
249
250    mock_testsuite = mock.Mock(id = 'id', testcases = [])
251    mock_testsuite.name = "mock_testsuite"
252    mock_testsuite.harness_config = {}
253
254    outdir = tmp_path / 'gtest_out'
255    outdir.mkdir()
256
257    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
258    instance.testsuite.harness_config = {
259        'type': type,
260        'regex': ['pattern1', 'pattern2']
261    }
262    console = Console()
263
264    #Act
265    console.configure(instance)
266
267    #Assert
268    if num_patterns == 2:
269        assert len(console.patterns) == num_patterns
270        assert [pattern.pattern for pattern in console.patterns] == ['pattern1', 'pattern2']
271    else:
272        assert console.pattern.pattern == 'pattern1'
273
274
275TEST_DATA_4 = [("one_line", True, "passed", "line", False, False),
276                ("multi_line", True, "passed", "line", False, False),
277                ("multi_line", False, "passed", "line", False, False),
278                ("invalid_type", False, None, "line", False, False),
279                ("invalid_type", False, None, "ERROR", True, False),
280                ("invalid_type", False, None, "COVERAGE_START", False, True),
281                ("invalid_type", False, None, "COVERAGE_END", False, False)]
282@pytest.mark.parametrize(
283    "line_type, ordered_val, exp_state, line, exp_fault, exp_capture",
284    TEST_DATA_4,
285    ids=["one line", "multi line ordered", "multi line not ordered", "logger error", "fail on fault", "GCOV START", "GCOV END"]
286)
287def test_console_handle(tmp_path, line_type, ordered_val, exp_state, line, exp_fault, exp_capture):
288    mock_platform = mock.Mock()
289    mock_platform.name = "mock_platform"
290    mock_platform.normalized_name = "mock_platform"
291
292    mock_testsuite = mock.Mock(id = 'id', testcases = [])
293    mock_testsuite.name = "mock_testsuite"
294    mock_testsuite.harness_config = {}
295
296    outdir = tmp_path / 'gtest_out'
297    outdir.mkdir()
298
299    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
300
301    console = Console()
302    console.instance = instance
303    console.type = line_type
304    console.patterns = [re.compile("pattern1"), re.compile("pattern2")]
305    console.pattern = re.compile("pattern")
306    console.patterns_expected = 0
307    console.state = None
308    console.fail_on_fault = True
309    console.FAULT = "ERROR"
310    console.GCOV_START = "COVERAGE_START"
311    console.GCOV_END = "COVERAGE_END"
312    console.record = {"regex": "RESULT: (.*)"}
313    console.fieldnames = []
314    console.recording = []
315    console.regex = ["regex1", "regex2"]
316    console.id = "test_case_1"
317
318    instance.get_case_or_create('test_case_1')
319    instance.testsuite.id = "test_suite_1"
320
321    console.next_pattern = 0
322    console.ordered = ordered_val
323    line = line
324    console.handle(line)
325
326    line1 = "pattern1"
327    line2 = "pattern2"
328    console.handle(line1)
329    console.handle(line2)
330    assert console.state == exp_state
331    with pytest.raises(Exception):
332        console.handle(line)
333        assert logger.error.called
334    assert console.fault == exp_fault
335    assert console.capture_coverage == exp_capture
336
337
338TEST_DATA_5 = [("serial_pty", 0), (None, 0),(None, 1)]
339@pytest.mark.parametrize(
340   "pty_value, hardware_value",
341   TEST_DATA_5,
342   ids=["hardware pty", "hardware", "non hardware"]
343)
344
345def test_pytest__generate_parameters_for_hardware(tmp_path, pty_value, hardware_value):
346    #Arrange
347    mock_platform = mock.Mock()
348    mock_platform.name = "mock_platform"
349    mock_platform.normalized_name = "mock_platform"
350
351    mock_testsuite = mock.Mock(id = 'id', testcases = [])
352    mock_testsuite.name = "mock_testsuite"
353    mock_testsuite.harness_config = {}
354
355    outdir = tmp_path / 'gtest_out'
356    outdir.mkdir()
357
358    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
359
360    handler = mock.Mock()
361    handler.instance = instance
362
363    hardware = mock.Mock()
364    hardware.serial_pty = pty_value
365    hardware.serial = 'serial'
366    hardware.baud = 115200
367    hardware.runner = "runner"
368    hardware.runner_params = ["--runner-param1", "runner-param2"]
369    hardware.fixtures = ['fixture1:option1', 'fixture2']
370
371    options = handler.options
372    options.west_flash = "args"
373
374    hardware.probe_id = '123'
375    hardware.product = 'product'
376    hardware.pre_script = 'pre_script'
377    hardware.post_flash_script = 'post_flash_script'
378    hardware.post_script = 'post_script'
379
380    pytest_test = Pytest()
381    pytest_test.configure(instance)
382
383    #Act
384    if hardware_value == 0:
385        handler.get_hardware.return_value = hardware
386        command = pytest_test._generate_parameters_for_hardware(handler)
387    else:
388        handler.get_hardware.return_value = None
389
390    #Assert
391    if hardware_value == 1:
392        with pytest.raises(PytestHarnessException) as exinfo:
393            pytest_test._generate_parameters_for_hardware(handler)
394        assert str(exinfo.value) == 'Hardware is not available'
395    else:
396        assert '--device-type=hardware' in command
397        if pty_value == "serial_pty":
398            assert '--device-serial-pty=serial_pty' in command
399        else:
400            assert '--device-serial=serial' in command
401            assert '--device-serial-baud=115200' in command
402        assert '--runner=runner' in command
403        assert '--runner-params=--runner-param1' in command
404        assert '--runner-params=runner-param2' in command
405        assert '--west-flash-extra-args=args' in command
406        assert '--device-id=123' in command
407        assert '--device-product=product' in command
408        assert '--pre-script=pre_script' in command
409        assert '--post-flash-script=post_flash_script' in command
410        assert '--post-script=post_script' in command
411        assert '--twister-fixture=fixture1:option1' in command
412        assert '--twister-fixture=fixture2' in command
413
414
415def test__update_command_with_env_dependencies():
416    cmd = ['cmd']
417    pytest_test = Pytest()
418    mock.patch.object(Pytest, 'PYTEST_PLUGIN_INSTALLED', False)
419
420    # Act
421    result_cmd, _ = pytest_test._update_command_with_env_dependencies(cmd)
422
423    # Assert
424    assert result_cmd == ['cmd', '-p', 'twister_harness.plugin']
425
426
427def test_pytest_run(tmp_path, caplog):
428    # Arrange
429    timeout = 10
430    cmd=['command']
431    exp_out = 'Support for handler handler_type not implemented yet'
432
433    harness = Pytest()
434    harness = mock.create_autospec(harness)
435
436    mock.patch.object(Pytest, 'generate_command', return_value=cmd)
437    mock.patch.object(Pytest, 'run_command')
438
439    mock_platform = mock.Mock()
440    mock_platform.name = "mock_platform"
441    mock_platform.normalized_name = "mock_platform"
442
443    mock_testsuite = mock.Mock(id = 'id', testcases = [], source_dir = 'source_dir', harness_config = {})
444    mock_testsuite.name = "mock_testsuite"
445    mock_testsuite.harness_config = {}
446
447    handler = mock.Mock(
448        options = mock.Mock(verbose= 0),
449        type_str = 'handler_type'
450    )
451
452    outdir = tmp_path / 'gtest_out'
453    outdir.mkdir()
454
455    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
456    instance.handler = handler
457
458    test_obj = Pytest()
459    test_obj.configure(instance)
460
461    # Act
462    test_obj.pytest_run(timeout)
463    # Assert
464    assert test_obj.state == 'failed'
465    assert exp_out in caplog.text
466
467
468TEST_DATA_6 = [(None), ('Test')]
469@pytest.mark.parametrize(
470   "name",
471   TEST_DATA_6,
472   ids=["no name", "provided name"]
473)
474def test_get_harness(name):
475    #Arrange
476    harnessimporter = HarnessImporter()
477    harness_name = name
478
479    #Act
480    harness_class = harnessimporter.get_harness(harness_name)
481
482    #Assert
483    assert isinstance(harness_class, Test)
484
485
486TEST_DATA_7 = [("", "Running TESTSUITE suite_name", ['suite_name'], None, True, None),
487            ("", "START - test_testcase", [], "started", True, None),
488            ("", "PASS - test_example in 0 seconds", [], "passed", True, None),
489            ("", "SKIP - test_example in 0 seconds", [], "skipped", True, None),
490            ("", "FAIL - test_example in 0 seconds", [], "failed", True, None),
491            ("not a ztest and no state for test_id", "START - test_testcase", [], "passed", False, "passed"),
492            ("not a ztest and no state for test_id", "START - test_testcase", [], "failed", False, "failed")]
493@pytest.mark.parametrize(
494   "exp_out, line, exp_suite_name, exp_status, ztest, state",
495   TEST_DATA_7,
496   ids=['testsuite', 'testcase', 'pass', 'skip', 'failed', 'ztest pass', 'ztest fail']
497)
498def test_test_handle(tmp_path, caplog, exp_out, line, exp_suite_name, exp_status, ztest, state):
499    # Arrange
500    line = line
501    mock_platform = mock.Mock()
502    mock_platform.name = "mock_platform"
503    mock_platform.normalized_name = "mock_platform"
504
505    mock_testsuite = mock.Mock(id = 'id', testcases = [])
506    mock_testsuite.name = "mock_testsuite"
507    mock_testsuite.harness_config = {}
508
509    outdir = tmp_path / 'gtest_out'
510    outdir.mkdir()
511
512    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
513
514    test_obj = Test()
515    test_obj.configure(instance)
516    test_obj.id = "test_id"
517    test_obj.ztest = ztest
518    test_obj.state = state
519    test_obj.id = 'test_id'
520    #Act
521    test_obj.handle(line)
522
523    # Assert
524    assert test_obj.detected_suite_names == exp_suite_name
525    assert exp_out in caplog.text
526    if not "Running" in line and exp_out == "":
527        assert test_obj.instance.testcases[0].status == exp_status
528    if "ztest" in exp_out:
529        assert test_obj.instance.testcases[1].status == exp_status
530
531
532@pytest.fixture
533def gtest(tmp_path):
534    mock_platform = mock.Mock()
535    mock_platform.name = "mock_platform"
536    mock_platform.normalized_name = "mock_platform"
537    mock_testsuite = mock.Mock()
538    mock_testsuite.name = "mock_testsuite"
539    mock_testsuite.detailed_test_id = True
540    mock_testsuite.id = "id"
541    mock_testsuite.testcases = []
542    mock_testsuite.harness_config = {}
543    outdir = tmp_path / 'gtest_out'
544    outdir.mkdir()
545
546    instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
547
548    harness = Gtest()
549    harness.configure(instance)
550    return harness
551
552
553def test_gtest_start_test_no_suites_detected(gtest):
554    process_logs(gtest, [SAMPLE_GTEST_START])
555    assert len(gtest.detected_suite_names) == 0
556    assert gtest.state is None
557
558
559def test_gtest_start_test(gtest):
560    process_logs(
561        gtest,
562        [
563            SAMPLE_GTEST_START,
564            SAMPLE_GTEST_FMT.format(
565                state=GTEST_START_STATE, suite="suite_name", test="test_name"
566            ),
567        ],
568    )
569    assert gtest.state is None
570    assert len(gtest.detected_suite_names) == 1
571    assert gtest.detected_suite_names[0] == "suite_name"
572    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
573    assert (
574        gtest.instance.get_case_by_name("id.suite_name.test_name").status == "started"
575    )
576
577
578def test_gtest_pass(gtest):
579    process_logs(
580        gtest,
581        [
582            SAMPLE_GTEST_START,
583            SAMPLE_GTEST_FMT.format(
584                state=GTEST_START_STATE, suite="suite_name", test="test_name"
585            ),
586            SAMPLE_GTEST_FMT.format(
587                state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
588            ),
589        ],
590    )
591    assert gtest.state is None
592    assert len(gtest.detected_suite_names) == 1
593    assert gtest.detected_suite_names[0] == "suite_name"
594    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
595    assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
596
597
598def test_gtest_failed(gtest):
599    process_logs(
600        gtest,
601        [
602            SAMPLE_GTEST_START,
603            SAMPLE_GTEST_FMT.format(
604                state=GTEST_START_STATE, suite="suite_name", test="test_name"
605            ),
606            SAMPLE_GTEST_FMT.format(
607                state=GTEST_FAIL_STATE, suite="suite_name", test="test_name"
608            ),
609        ],
610    )
611    assert gtest.state is None
612    assert len(gtest.detected_suite_names) == 1
613    assert gtest.detected_suite_names[0] == "suite_name"
614    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
615    assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "failed"
616
617
618def test_gtest_skipped(gtest):
619    process_logs(
620        gtest,
621        [
622            SAMPLE_GTEST_START,
623            SAMPLE_GTEST_FMT.format(
624                state=GTEST_START_STATE, suite="suite_name", test="test_name"
625            ),
626            SAMPLE_GTEST_FMT.format(
627                state=GTEST_SKIP_STATE, suite="suite_name", test="test_name"
628            ),
629        ],
630    )
631    assert gtest.state is None
632    assert len(gtest.detected_suite_names) == 1
633    assert gtest.detected_suite_names[0] == "suite_name"
634    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
635    assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "skipped"
636
637
638def test_gtest_all_pass(gtest):
639    process_logs(
640        gtest,
641        [
642            SAMPLE_GTEST_START,
643            SAMPLE_GTEST_FMT.format(
644                state=GTEST_START_STATE, suite="suite_name", test="test_name"
645            ),
646            SAMPLE_GTEST_FMT.format(
647                state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
648            ),
649            SAMPLE_GTEST_END,
650        ],
651    )
652    assert gtest.state == "passed"
653    assert len(gtest.detected_suite_names) == 1
654    assert gtest.detected_suite_names[0] == "suite_name"
655    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
656    assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
657
658
659def test_gtest_one_skipped(gtest):
660    process_logs(
661        gtest,
662        [
663            SAMPLE_GTEST_START,
664            SAMPLE_GTEST_FMT.format(
665                state=GTEST_START_STATE, suite="suite_name", test="test_name"
666            ),
667            SAMPLE_GTEST_FMT.format(
668                state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
669            ),
670            SAMPLE_GTEST_FMT.format(
671                state=GTEST_START_STATE, suite="suite_name", test="test_name1"
672            ),
673            SAMPLE_GTEST_FMT.format(
674                state=GTEST_SKIP_STATE, suite="suite_name", test="test_name1"
675            ),
676            SAMPLE_GTEST_END,
677        ],
678    )
679    assert gtest.state == "passed"
680    assert len(gtest.detected_suite_names) == 1
681    assert gtest.detected_suite_names[0] == "suite_name"
682    assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
683    assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
684    assert gtest.instance.get_case_by_name("id.suite_name.test_name1") is not None
685    assert gtest.instance.get_case_by_name("id.suite_name.test_name1").status == "skipped"
686
687
688def test_gtest_one_fail(gtest):
689    process_logs(
690        gtest,
691        [
692            SAMPLE_GTEST_START,
693            SAMPLE_GTEST_FMT.format(
694                state=GTEST_START_STATE, suite="suite_name", test="test0"
695            ),
696            SAMPLE_GTEST_FMT.format(
697                state=GTEST_PASS_STATE, suite="suite_name", test="test0"
698            ),
699            SAMPLE_GTEST_FMT.format(
700                state=GTEST_START_STATE, suite="suite_name", test="test1"
701            ),
702            SAMPLE_GTEST_FMT.format(
703                state=GTEST_FAIL_STATE, suite="suite_name", test="test1"
704            ),
705            SAMPLE_GTEST_END,
706        ],
707    )
708    assert gtest.state == "failed"
709    assert len(gtest.detected_suite_names) == 1
710    assert gtest.detected_suite_names[0] == "suite_name"
711    assert gtest.instance.get_case_by_name("id.suite_name.test0") is not None
712    assert gtest.instance.get_case_by_name("id.suite_name.test0").status == "passed"
713    assert gtest.instance.get_case_by_name("id.suite_name.test1") is not None
714    assert gtest.instance.get_case_by_name("id.suite_name.test1").status == "failed"
715
716
717def test_gtest_missing_result(gtest):
718    with pytest.raises(
719        AssertionError,
720        match=r"gTest error, id.suite_name.test0 didn't finish",
721    ):
722        process_logs(
723            gtest,
724            [
725                SAMPLE_GTEST_START,
726                SAMPLE_GTEST_FMT.format(
727                    state=GTEST_START_STATE, suite="suite_name", test="test0"
728                ),
729                SAMPLE_GTEST_FMT.format(
730                    state=GTEST_START_STATE, suite="suite_name", test="test1"
731                ),
732            ],
733        )
734
735
736def test_gtest_mismatch_result(gtest):
737    with pytest.raises(
738        AssertionError,
739        match=r"gTest error, mismatched tests. Expected id.suite_name.test0 but got None",
740    ):
741        process_logs(
742            gtest,
743            [
744                SAMPLE_GTEST_START,
745                SAMPLE_GTEST_FMT.format(
746                    state=GTEST_START_STATE, suite="suite_name", test="test0"
747                ),
748                SAMPLE_GTEST_FMT.format(
749                    state=GTEST_PASS_STATE, suite="suite_name", test="test1"
750                ),
751            ],
752        )
753
754
755def test_gtest_repeated_result(gtest):
756    with pytest.raises(
757        AssertionError,
758        match=r"gTest error, mismatched tests. Expected id.suite_name.test1 but got id.suite_name.test0",
759    ):
760        process_logs(
761            gtest,
762            [
763                SAMPLE_GTEST_START,
764                SAMPLE_GTEST_FMT.format(
765                    state=GTEST_START_STATE, suite="suite_name", test="test0"
766                ),
767                SAMPLE_GTEST_FMT.format(
768                    state=GTEST_PASS_STATE, suite="suite_name", test="test0"
769                ),
770                SAMPLE_GTEST_FMT.format(
771                    state=GTEST_START_STATE, suite="suite_name", test="test1"
772                ),
773                SAMPLE_GTEST_FMT.format(
774                    state=GTEST_PASS_STATE, suite="suite_name", test="test0"
775                ),
776            ],
777        )
778
779
780def test_gtest_repeated_run(gtest):
781    with pytest.raises(
782        AssertionError,
783        match=r"gTest error, id.suite_name.test0 running twice",
784    ):
785        process_logs(
786            gtest,
787            [
788                SAMPLE_GTEST_START,
789                SAMPLE_GTEST_FMT.format(
790                    state=GTEST_START_STATE, suite="suite_name", test="test0"
791                ),
792                SAMPLE_GTEST_FMT.format(
793                    state=GTEST_PASS_STATE, suite="suite_name", test="test0"
794                ),
795                SAMPLE_GTEST_FMT.format(
796                    state=GTEST_START_STATE, suite="suite_name", test="test0"
797                ),
798            ],
799        )
800
801
802def test_bsim_build(monkeypatch, tmp_path):
803    mocked_instance = mock.Mock()
804    build_dir = tmp_path / 'build_dir'
805    os.makedirs(build_dir)
806    mocked_instance.build_dir = str(build_dir)
807    mocked_instance.name = 'platform_name/test/dummy.test'
808    mocked_instance.testsuite.harness_config = {}
809
810    harness = Bsim()
811    harness.instance = mocked_instance
812
813    monkeypatch.setenv('BSIM_OUT_PATH', str(tmp_path))
814    os.makedirs(os.path.join(tmp_path, 'bin'), exist_ok=True)
815    zephyr_exe_path = os.path.join(build_dir, 'zephyr', 'zephyr.exe')
816    os.makedirs(os.path.dirname(zephyr_exe_path), exist_ok=True)
817    with open(zephyr_exe_path, 'w') as file:
818        file.write('TEST_EXE')
819
820    harness.build()
821
822    new_exe_path = os.path.join(tmp_path, 'bin', 'bs_platform_name_test_dummy_test')
823    assert os.path.exists(new_exe_path)
824    with open(new_exe_path, 'r') as file:
825        exe_content = file.read()
826    assert 'TEST_EXE' in exe_content
827