1#!/usr/bin/env python3 2# Copyright (c) 2020 Intel Corporation 3# 4# SPDX-License-Identifier: Apache-2.0 5# pylint: disable=line-too-long 6""" 7Tests for testinstance class 8""" 9 10import os 11import sys 12import pytest 13 14ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") 15sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister")) 16from twisterlib import TestInstance, BuildError, TestCase, TwisterException 17 18 19TESTDATA_1 = [ 20 (False, False, "console", "na", "qemu", False, [], (False, True)), 21 (False, False, "console", "native", "qemu", False, [], (False, True)), 22 (True, False, "console", "native", "nsim", False, [], (True, False)), 23 (True, True, "console", "native", "renode", False, [], (True, False)), 24 (False, False, "sensor", "native", "", False, [], (True, False)), 25 (False, False, "sensor", "na", "", False, [], (True, False)), 26 (False, True, "sensor", "native", "", True, [], (True, False)), 27] 28@pytest.mark.parametrize("build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected", TESTDATA_1) 29def test_check_build_or_run(class_testsuite, monkeypatch, all_testcases_dict, platforms_list, build_only, slow, harness, platform_type, platform_sim, device_testing, fixture, expected): 30 """" Test to check the conditions for build_only and run scenarios 31 Scenario 1: Test when different parameters are passed, build_only and run are set correctly 32 Sceanrio 2: Test if build_only is enabled when the OS is Windows""" 33 34 class_testsuite.testcases = all_testcases_dict 35 testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1') 36 37 class_testsuite.platforms = platforms_list 38 platform = class_testsuite.get_platform("demo_board_2") 39 platform.type = platform_type 40 platform.simulation = platform_sim 41 testcase.harness = harness 42 testcase.build_only = build_only 43 testcase.slow = slow 44 45 testinstance = TestInstance(testcase, platform, class_testsuite.outdir) 46 run = testinstance.check_runnable(slow, device_testing, fixture) 47 _, r = expected 48 assert run == r 49 50 monkeypatch.setattr("os.name", "nt") 51 run = testinstance.check_runnable() 52 assert not run 53 54TESTDATA_2 = [ 55 (True, True, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'), 56 (True, False, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'), 57 (False, False, True, ["demo_board_2"], 'native', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'), 58 (True, False, True, ["demo_board_2"], 'mcu', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'), 59 (False, False, False, ["demo_board_2"], 'native', ''), 60 (False, False, True, ['demo_board_1'], 'native', ''), 61 (True, False, False, ["demo_board_2"], 'native', '\nCONFIG_ASAN=y'), 62 (False, True, False, ["demo_board_2"], 'native', '\nCONFIG_UBSAN=y'), 63] 64 65@pytest.mark.parametrize("enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2) 66def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content): 67 """Test correct content is written to testcase_extra.conf based on if conditions 68 TO DO: Add extra_configs to the input list""" 69 class_testsuite.testcases = all_testcases_dict 70 testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/samples/test_app/sample_test.app') 71 class_testsuite.platforms = platforms_list 72 platform = class_testsuite.get_platform("demo_board_2") 73 74 testinstance = TestInstance(testcase, platform, class_testsuite.outdir) 75 platform.type = platform_type 76 assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content 77 78def test_calculate_sizes(class_testsuite, all_testcases_dict, platforms_list): 79 """ Test Calculate sizes method for zephyr elf""" 80 class_testsuite.testcases = all_testcases_dict 81 testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/samples/test_app/sample_test.app') 82 class_testsuite.platforms = platforms_list 83 platform = class_testsuite.get_platform("demo_board_2") 84 testinstance = TestInstance(testcase, platform, class_testsuite.outdir) 85 86 with pytest.raises(BuildError): 87 assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary" 88 89TESTDATA_3 = [ 90 (ZEPHYR_BASE + '/scripts/tests/twister/test_data/testcases', ZEPHYR_BASE, '/scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1', '/scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1'), 91 (ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'), 92 (ZEPHYR_BASE, '/scripts/tests/twister/test_data/testcases/test_b', 'test_b.check_1', '/scripts/tests/twister/test_data/testcases/test_b/test_b.check_1'), 93 (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'), 94 (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'), 95 (ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'), 96] 97@pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3) 98def test_get_unique(testcase_root, workdir, name, expected): 99 '''Test to check if the unique name is given for each testcase root and workdir''' 100 unique = TestCase(testcase_root, workdir, name) 101 assert unique.name == expected 102 103TESTDATA_4 = [ 104 (ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'), 105 (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'), 106] 107@pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4) 108def test_get_unique_exception(testcase_root, workdir, name, exception): 109 '''Test to check if tests reference the category and subsystem with a dot as a separator''' 110 111 with pytest.raises(TwisterException): 112 unique = TestCase(testcase_root, workdir, name) 113 assert unique == exception 114 115TESTDATA_5 = [ 116 ("testcases/tests/test_ztest.c", None, ['a', 'c', 'unit_a', 'newline', 'test_test_aa', 'user', 'last']), 117 ("testcases/tests/test_a/test_ztest_error.c", "Found a test that does not start with test_", ['1a', '1c', '2a', '2b']), 118 ("testcases/tests/test_a/test_ztest_error_1.c", "found invalid #ifdef, #endif in ztest_test_suite()", ['unit_1a', 'unit_1b', 'Unit_1c']), 119] 120 121@pytest.mark.parametrize("test_file, expected_warnings, expected_subcases", TESTDATA_5) 122def test_scan_file(test_data, test_file, expected_warnings, expected_subcases): 123 '''Testing scan_file method with different ztest files for warnings and results''' 124 125 testcase = TestCase("/scripts/tests/twister/test_data/testcases/tests", ".", "test_a.check_1") 126 127 results, warnings = testcase.scan_file(os.path.join(test_data, test_file)) 128 assert sorted(results) == sorted(expected_subcases) 129 assert warnings == expected_warnings 130 131 132TESTDATA_6 = [ 133 ("testcases/tests", ['a', 'c', 'unit_a', 'newline', 'test_test_aa', 'user', 'last']), 134 ("testcases/tests/test_a", ['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b']), 135] 136 137@pytest.mark.parametrize("test_path, expected_subcases", TESTDATA_6) 138def test_subcases(test_data, test_path, expected_subcases): 139 '''Testing scan path and parse subcases methods for expected subcases''' 140 testcase = TestCase("/scripts/tests/twister/test_data/testcases/tests", ".", "test_a.check_1") 141 142 subcases = testcase.scan_path(os.path.join(test_data, test_path)) 143 assert sorted(subcases) == sorted(expected_subcases) 144 145 testcase.id = "test_id" 146 testcase.parse_subcases(test_data + test_path) 147 assert sorted(testcase.cases) == [testcase.id + '.' + x for x in sorted(expected_subcases)] 148