1#!/usr/bin/env python3 2# Copyright (c) 2020-2024 Intel Corporation 3# 4# SPDX-License-Identifier: Apache-2.0 5 6''' 7This test file contains testsuites for testsuite.py module of twister 8''' 9import sys 10import os 11import mock 12import pytest 13 14from contextlib import nullcontext 15 16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") 17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister")) 18 19from twisterlib.statuses import TwisterStatus 20from twisterlib.testplan import TestPlan, change_skip_to_error_if_integration 21from twisterlib.testinstance import TestInstance 22from twisterlib.testsuite import TestSuite 23from twisterlib.platform import Platform 24from twisterlib.quarantine import Quarantine 25from twisterlib.error import TwisterRuntimeError 26 27 28def test_testplan_add_testsuites_short(class_testplan): 29 """ Testing add_testcase function of Testsuite class in twister """ 30 # Test 1: Check the list of testsuites after calling add testsuites function is as expected 31 class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml' 32 class_testplan.TESTSUITE_FILENAME = 'test_data.yaml' 33 class_testplan.add_testsuites() 34 35 tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/' 36 expected_testsuites = ['test_b.check_1', 37 'test_b.check_2', 38 'test_c.check_1', 39 'test_c.check_2', 40 'test_a.check_1', 41 'test_a.check_2', 42 'test_d.check_1', 43 'test_e.check_1', 44 'sample_test.app', 45 'test_config.main'] 46 testsuite_list = [] 47 for key in sorted(class_testplan.testsuites.keys()): 48 testsuite_list.append(os.path.basename(os.path.normpath(key))) 49 assert sorted(testsuite_list) == sorted(expected_testsuites) 50 51 # Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects 52 suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1') 53 assert suite.name == tests_rel_dir + 'test_a/test_a.check_1' 54 assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values()) 55 56@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")]) 57def test_add_configurations_short(test_data, class_env, board_root_dir): 58 """ Testing add_configurations function of TestPlan class in Twister 59 Test : Asserting on default platforms list 60 """ 61 class_env.board_roots = [os.path.abspath(test_data + board_root_dir)] 62 plan = TestPlan(class_env) 63 plan.parse_configuration(config_file=class_env.test_config) 64 if board_root_dir == "board_config": 65 plan.add_configurations() 66 print(sorted(plan.default_platforms)) 67 assert sorted(plan.default_platforms) == sorted(['demo_board_1/unit_testing', 'demo_board_3/unit_testing']) 68 elif board_root_dir == "board_config_file_not_exist": 69 plan.add_configurations() 70 assert sorted(plan.default_platforms) != sorted(['demo_board_1']) 71 72 73def test_get_all_testsuites_short(class_testplan, all_testsuites_dict): 74 """ Testing get_all_testsuites function of TestPlan class in Twister """ 75 plan = class_testplan 76 plan.testsuites = all_testsuites_dict 77 expected_tests = ['sample_test.app', 'test_a.check_1.1a', 78 'test_a.check_1.1c', 79 'test_a.check_1.2a', 'test_a.check_1.2b', 80 'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a', 81 'test_a.check_1.unit_1b', 'test_a.check_2.1a', 82 'test_a.check_2.1c', 'test_a.check_2.2a', 83 'test_a.check_2.2b', 'test_a.check_2.Unit_1c', 84 'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b', 85 'test_b.check_1', 'test_b.check_2', 'test_c.check_1', 86 'test_c.check_2', 'test_d.check_1.unit_1a', 87 'test_d.check_1.unit_1b', 88 'test_e.check_1.feature5.1a', 89 'test_e.check_1.feature5.1b', 90 'test_config.main'] 91 92 assert sorted(plan.get_all_tests()) == sorted(expected_tests) 93 94def test_get_platforms_short(class_testplan, platforms_list): 95 """ Testing get_platforms function of TestPlan class in Twister """ 96 plan = class_testplan 97 plan.platforms = platforms_list 98 platform = plan.get_platform("demo_board_1") 99 assert isinstance(platform, Platform) 100 assert platform.name == "demo_board_1/unit_testing" 101 102TESTDATA_PART1 = [ 103 ("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"), 104 ("platform_allow", ['demo_board_1/unit_testing'], None, None, "Not in testsuite platform allow list"), 105 ("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"), 106 ("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"), 107 ("arch_exclude", ['x86'], None, None, "In test case arch exclude"), 108 ("arch_allow", ['arm'], None, None, "Not in test case arch allow list"), 109 ("skip", True, None, None, "Skip filter"), 110 ("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"), 111 ("min_flash", "2024", "flash", "1024", "Not enough FLASH"), 112 ("min_ram", "500", "ram", "256", "Not enough RAM"), 113 ("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"), 114 ("build_on_all", True, None, None, "Platform is excluded on command line."), 115 ("build_on_all", True, "level", "foobar", "Unknown test level 'foobar'"), 116 (None, None, "supported_toolchains", ['gcc', 'xcc', 'xt-clang'], "Not supported by the toolchain"), 117] 118 119 120@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards", 121 TESTDATA_PART1) 122def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list, 123 tc_attribute, tc_value, plat_attribute, plat_value, expected_discards): 124 """ Testing apply_filters function of TestPlan class in Twister 125 Part 1: Response of apply_filters function have 126 appropriate values according to the filters 127 """ 128 plan = class_testplan 129 if tc_attribute is None and plat_attribute is None: 130 plan.apply_filters() 131 132 plan.platforms = platforms_list 133 plan.platform_names = [p.name for p in platforms_list] 134 plan.testsuites = all_testsuites_dict 135 for plat in plan.platforms: 136 if plat_attribute == "ignore_tags": 137 plat.ignore_tags = plat_value 138 if plat_attribute == "flash": 139 plat.flash = plat_value 140 if plat_attribute == "ram": 141 plat.ram = plat_value 142 if plat_attribute == "env": 143 plat.env = plat_value 144 plat.env_satisfied = False 145 if plat_attribute == "supported_toolchains": 146 plat.supported_toolchains = plat_value 147 for _, testcase in plan.testsuites.items(): 148 if tc_attribute == "toolchain_allow": 149 testcase.toolchain_allow = tc_value 150 if tc_attribute == "platform_allow": 151 testcase.platform_allow = tc_value 152 if tc_attribute == "toolchain_exclude": 153 testcase.toolchain_exclude = tc_value 154 if tc_attribute == "platform_exclude": 155 testcase.platform_exclude = tc_value 156 if tc_attribute == "arch_exclude": 157 testcase.arch_exclude = tc_value 158 if tc_attribute == "arch_allow": 159 testcase.arch_allow = tc_value 160 if tc_attribute == "skip": 161 testcase.skip = tc_value 162 if tc_attribute == "tags": 163 testcase.tags = tc_value 164 if tc_attribute == "min_flash": 165 testcase.min_flash = tc_value 166 if tc_attribute == "min_ram": 167 testcase.min_ram = tc_value 168 169 if plat_attribute == "level": 170 plan.options.level = plat_value 171 172 if tc_attribute == "build_on_all": 173 for _, testcase in plan.testsuites.items(): 174 testcase.build_on_all = tc_value 175 plan.apply_filters(exclude_platform=['demo_board_1']) 176 elif plat_attribute == "supported_toolchains": 177 plan.apply_filters(force_toolchain=False, 178 exclude_platform=['demo_board_1'], 179 platform=['demo_board_2/unit_testing']) 180 elif tc_attribute is None and plat_attribute is None: 181 plan.apply_filters() 182 else: 183 plan.apply_filters(exclude_platform=['demo_board_1'], 184 platform=['demo_board_2/unit_testing']) 185 186 filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values())) 187 for d in filtered_instances: 188 assert d.reason == expected_discards 189 190TESTDATA_PART2 = [ 191 ("runnable", "True", "Not runnable on device"), 192 ("exclude_tag", ['test_a'], "Command line testsuite exclude filter"), 193 ("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"), 194 ("arch", ['arm_test'], "Command line testsuite arch filter"), 195 ("tag", ['test_d'], "Command line testsuite tag filter") 196 ] 197 198 199@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2) 200def test_apply_filters_part2(class_testplan, all_testsuites_dict, 201 platforms_list, extra_filter, extra_filter_value, expected_discards): 202 """ Testing apply_filters function of TestPlan class in Twister 203 Part 2 : Response of apply_filters function (discard dictionary) have 204 appropriate values according to the filters 205 """ 206 207 class_testplan.platforms = platforms_list 208 class_testplan.platform_names = [p.name for p in platforms_list] 209 class_testplan.testsuites = all_testsuites_dict 210 kwargs = { 211 extra_filter : extra_filter_value, 212 "exclude_platform" : [ 213 'demo_board_1' 214 ], 215 "platform" : [ 216 'demo_board_2' 217 ] 218 } 219 class_testplan.apply_filters(**kwargs) 220 filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, class_testplan.instances.values())) 221 for d in filtered_instances: 222 assert d.reason == expected_discards 223 224 225TESTDATA_PART3 = [ 226 (20, 20, -1, 0), 227 (-2, -1, 10, 20), 228 (0, 0, 0, 0) 229 ] 230 231@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram", 232 TESTDATA_PART3) 233def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list, 234 tc_min_flash, plat_flash, tc_min_ram, plat_ram): 235 """ Testing apply_filters function of TestPlan class in Twister 236 Part 3 : Testing edge cases for ram and flash values of platforms & testsuites 237 """ 238 class_testplan.platforms = platforms_list 239 class_testplan.platform_names = [p.name for p in platforms_list] 240 class_testplan.testsuites = all_testsuites_dict 241 242 for plat in class_testplan.platforms: 243 plat.flash = plat_flash 244 plat.ram = plat_ram 245 for _, testcase in class_testplan.testsuites.items(): 246 testcase.min_ram = tc_min_ram 247 testcase.min_flash = tc_min_flash 248 class_testplan.apply_filters(exclude_platform=['demo_board_1'], 249 platform=['demo_board_2']) 250 251 filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, class_testplan.instances.values())) 252 assert not filtered_instances 253 254def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list): 255 """ Testing add_instances() function of TestPlan class in Twister 256 Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name) 257 Test 2: Values of 'instances' dictionary in Testsuite class are an 258 instance of 'TestInstance' class 259 Test 3: Values of 'instances' dictionary have expected values. 260 """ 261 class_env.outdir = tmp_path 262 plan = TestPlan(class_env) 263 plan.platforms = platforms_list 264 platform = plan.get_platform("demo_board_2") 265 instance_list = [] 266 for _, testcase in all_testsuites_dict.items(): 267 instance = TestInstance(testcase, platform, class_env.outdir) 268 instance_list.append(instance) 269 plan.add_instances(instance_list) 270 assert list(plan.instances.keys()) == \ 271 [platform.name + '/' + s for s in list(all_testsuites_dict.keys())] 272 assert all(isinstance(n, TestInstance) for n in list(plan.instances.values())) 273 assert list(plan.instances.values()) == instance_list 274 275 276QUARANTINE_BASIC = { 277 'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3', 278 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3' 279} 280 281QUARANTINE_WITH_REGEXP = { 282 'demo_board_2/unit_testing/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86', 283 'demo_board_1/unit_testing/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d', 284 'demo_board_3/unit_testing/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d', 285 'demo_board_2/unit_testing/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d', 286 'demo_board_2/unit_testing/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86' 287} 288 289QUARANTINE_PLATFORM = { 290 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3', 291 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3', 292 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3', 293 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3', 294 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3', 295 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3', 296 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3', 297 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3', 298 'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3' 299} 300 301QUARANTINE_MULTIFILES = { 302 **QUARANTINE_BASIC, 303 **QUARANTINE_WITH_REGEXP 304} 305 306@pytest.mark.parametrize( 307 ("quarantine_files, quarantine_verify, expected_val"), 308 [ 309 (['basic.yaml'], False, QUARANTINE_BASIC), 310 (['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP), 311 (['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP), 312 (['platform.yaml'], False, QUARANTINE_PLATFORM), 313 (['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES), 314 (['empty.yaml'], False, {}) 315 ], 316 ids=[ 317 'basic', 318 'with_regexp', 319 'quarantine_verify', 320 'platform', 321 'multifiles', 322 'empty' 323 ]) 324def test_quarantine_short(class_testplan, platforms_list, test_data, 325 quarantine_files, quarantine_verify, expected_val): 326 """ Testing quarantine feature in Twister 327 """ 328 class_testplan.options.all = True 329 class_testplan.platforms = platforms_list 330 class_testplan.platform_names = [p.name for p in platforms_list] 331 class_testplan.TESTSUITE_FILENAME = 'test_data.yaml' 332 class_testplan.add_testsuites() 333 334 quarantine_list = [ 335 os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files 336 ] 337 class_testplan.quarantine = Quarantine(quarantine_list) 338 class_testplan.options.quarantine_verify = quarantine_verify 339 class_testplan.apply_filters() 340 for testname, instance in class_testplan.instances.items(): 341 if quarantine_verify: 342 if testname in expected_val: 343 assert instance.status == TwisterStatus.NONE 344 else: 345 assert instance.status == TwisterStatus.FILTER 346 assert instance.reason == "Not under quarantine" 347 else: 348 if testname in expected_val: 349 assert instance.status == TwisterStatus.FILTER 350 assert instance.reason == "Quarantine: " + expected_val[testname] 351 else: 352 assert instance.status == TwisterStatus.NONE 353 354 355TESTDATA_PART4 = [ 356 (os.path.join('test_d', 'test_d.check_1'), ['dummy'], 357 None, 'Snippet not supported'), 358 (os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'], 359 0, None), 360 (os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'], 361 2, 'Snippet not supported'), 362] 363 364@pytest.mark.parametrize( 365 'testpath, required_snippets, expected_filtered_len, expected_filtered_reason', 366 TESTDATA_PART4, 367 ids=['app', 'global', 'multiple'] 368) 369def test_required_snippets_short( 370 class_testplan, 371 all_testsuites_dict, 372 platforms_list, 373 testpath, 374 required_snippets, 375 expected_filtered_len, 376 expected_filtered_reason 377): 378 """ Testing required_snippets function of TestPlan class in Twister """ 379 plan = class_testplan 380 testpath = os.path.join('scripts', 'tests', 'twister', 'test_data', 381 'testsuites', 'tests', testpath) 382 testsuite = class_testplan.testsuites.get(testpath) 383 plan.platforms = platforms_list 384 print(platforms_list) 385 plan.platform_names = [p.name for p in platforms_list] 386 plan.testsuites = {testpath: testsuite} 387 388 for _, testcase in plan.testsuites.items(): 389 testcase.exclude_platform = [] 390 testcase.required_snippets = required_snippets 391 testcase.build_on_all = True 392 393 plan.apply_filters() 394 395 filtered_instances = list( 396 filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values()) 397 ) 398 if expected_filtered_len is not None: 399 assert len(filtered_instances) == expected_filtered_len 400 if expected_filtered_reason is not None: 401 for d in filtered_instances: 402 assert d.reason == expected_filtered_reason 403 404 405def test_testplan_get_level(): 406 testplan = TestPlan(env=mock.Mock()) 407 lvl1 = mock.Mock() 408 lvl1.name = 'a lvl' 409 lvl2 = mock.Mock() 410 lvl2.name = 'a lvl' 411 lvl3 = mock.Mock() 412 lvl3.name = 'other lvl' 413 testplan.levels.append(lvl1) 414 testplan.levels.append(lvl2) 415 testplan.levels.append(lvl3) 416 417 name = 'a lvl' 418 419 res = testplan.get_level(name) 420 assert res == lvl1 421 422 res = testplan.get_level(name) 423 assert res == lvl1 424 425 lvl_missed = mock.Mock() 426 lvl_missed.name = 'missed lvl' 427 res = testplan.get_level('missed_lvl') 428 assert res is None 429 430 testplan.levels.remove(lvl1) 431 testplan.levels.remove(lvl2) 432 433 res = testplan.get_level(name) 434 assert res is None 435 436 437TESTDATA_1 = [ 438 ('', {}), 439 ( 440"""\ 441levels: 442 - name: lvl1 443 adds: 444 - sc1 445 - sc2 446 inherits: [] 447 - name: lvl2 448 adds: 449 - sc1-1 450 - sc1-2 451 inherits: [lvl1] 452""", 453 { 454 'lvl1': ['sc1', 'sc2'], 455 'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2'] 456 } 457 ), 458] 459 460@pytest.mark.parametrize( 461 'config_yaml, expected_scenarios', 462 TESTDATA_1, 463 ids=['no config', 'valid config'] 464) 465def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios): 466 testplan = TestPlan(env=mock.Mock()) 467 testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2'] 468 469 tmp_config_file = tmp_path / 'config_file.yaml' 470 if config_yaml: 471 tmp_config_file.write_text(config_yaml) 472 473 with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext(): 474 testplan.parse_configuration(tmp_config_file) 475 476 if not testplan.levels: 477 assert expected_scenarios == {} 478 for level in testplan.levels: 479 assert sorted(level.scenarios) == sorted(expected_scenarios[level.name]) 480 481 482TESTDATA_2 = [ 483 ([], [], False), 484 (['ts1.tc3'], [], True), 485 (['ts2.tc2'], ['- ts2'], False), 486] 487 488@pytest.mark.parametrize( 489 'sub_tests, expected_outs, expect_error', 490 TESTDATA_2, 491 ids=['no subtests', 'subtests not found', 'valid subtests'] 492) 493def test_testplan_find_subtests( 494 capfd, 495 sub_tests, 496 expected_outs, 497 expect_error 498): 499 testplan = TestPlan(env=mock.Mock()) 500 testplan.options = mock.Mock(sub_test=sub_tests) 501 testplan.run_individual_testsuite = [] 502 testplan.testsuites = { 503 'ts1': mock.Mock( 504 testcases=[ 505 mock.Mock(), 506 mock.Mock(), 507 ] 508 ), 509 'ts2': mock.Mock( 510 testcases=[ 511 mock.Mock(), 512 mock.Mock(), 513 mock.Mock(), 514 ] 515 ) 516 } 517 testplan.testsuites['ts1'].name = 'ts1' 518 testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1' 519 testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2' 520 testplan.testsuites['ts2'].name = 'ts2' 521 testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1' 522 testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2' 523 testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3' 524 525 with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext(): 526 testplan.find_subtests() 527 528 out, err = capfd.readouterr() 529 sys.stdout.write(out) 530 sys.stdout.write(err) 531 532 assert all([printout in out for printout in expected_outs]) 533 534 535TESTDATA_3 = [ 536 (0, 0, [], False, [], TwisterRuntimeError, []), 537 (1, 1, [], False, [], TwisterRuntimeError, []), 538 (1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']), 539# (1, 0, ['qfile.yaml'], False, ['# empty'], None, ['Quarantine file qfile.yaml is empty']), 540 (1, 0, ['qfile.yaml'], False, ['- platforms:\n - demo_board_3\n comment: "board_3"'], None, []), 541] 542 543@pytest.mark.parametrize( 544 'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs', 545 TESTDATA_3, 546 ids=['no tests', 'load errors', 'quarantine verify without quarantine list', 547# 'empty quarantine file', 548 'valid quarantine file'] 549) 550def test_testplan_discover( 551 tmp_path, 552 caplog, 553 added_testsuite_count, 554 load_errors, 555 ql, 556 qv, 557 ql_data, 558 exception, 559 expected_logs 560): 561 for qf, data in zip(ql, ql_data): 562 tmp_qf = tmp_path / qf 563 tmp_qf.write_text(data) 564 565 testplan = TestPlan(env=mock.Mock()) 566 testplan.options = mock.Mock( 567 test='ts1', 568 quarantine_list=[tmp_path / qf for qf in ql], 569 quarantine_verify=qv, 570 ) 571 testplan.testsuites = { 572 'ts1': mock.Mock(id=1), 573 'ts2': mock.Mock(id=2), 574 } 575 testplan.run_individual_testsuite = 'ts0' 576 testplan.load_errors = load_errors 577 testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count) 578 testplan.find_subtests = mock.Mock() 579 testplan.report_duplicates = mock.Mock() 580 testplan.parse_configuration = mock.Mock() 581 testplan.add_configurations = mock.Mock() 582 583 with pytest.raises(exception) if exception else nullcontext(): 584 testplan.discover() 585 586 testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1') 587 assert all([log in caplog.text for log in expected_logs]) 588 589 590TESTDATA_4 = [ 591 (None, None, None, None, '00', 592 TwisterRuntimeError, [], []), 593 (None, True, None, None, '6/4', 594 TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []), 595 (None, None, 'load_tests.json', None, '0/4', 596 TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []), 597 ('suffix', None, None, True, '2/4', 598 None, set(['ts-p4', 'ts-p2', 'ts-p1', 'ts-p3']), [2, 4]), 599] 600 601@pytest.mark.parametrize( 602 'report_suffix, only_failed, load_tests, test_only, subset,' \ 603 ' exception, expected_selected_platforms, expected_generate_subset_args', 604 TESTDATA_4, 605 ids=['apply_filters only', 'only failed', 'load tests', 'test only'] 606) 607def test_testplan_load( 608 tmp_path, 609 report_suffix, 610 only_failed, 611 load_tests, 612 test_only, 613 subset, 614 exception, 615 expected_selected_platforms, 616 expected_generate_subset_args 617): 618 twister_json = """\ 619{ 620 "testsuites": [ 621 { 622 "name": "ts1", 623 "platform": "t-p1", 624 "testcases": [] 625 }, 626 { 627 "name": "ts1", 628 "platform": "t-p2", 629 "testcases": [] 630 }, 631 { 632 "name": "ts2", 633 "platform": "t-p3", 634 "testcases": [] 635 }, 636 { 637 "name": "ts2", 638 "platform": "t-p4", 639 "testcases": [] 640 } 641 ] 642} 643""" 644 twister_file = tmp_path / 'twister.json' 645 twister_file.write_text(twister_json) 646 647 twister_suffix_json = """\ 648{ 649 "testsuites": [ 650 { 651 "name": "ts1", 652 "platform": "ts-p1", 653 "testcases": [] 654 }, 655 { 656 "name": "ts1", 657 "platform": "ts-p2", 658 "testcases": [] 659 }, 660 { 661 "name": "ts2", 662 "platform": "ts-p3", 663 "testcases": [] 664 }, 665 { 666 "name": "ts2", 667 "platform": "ts-p4", 668 "testcases": [] 669 } 670 ] 671} 672""" 673 twister_suffix_file = tmp_path / 'twister_suffix.json' 674 twister_suffix_file.write_text(twister_suffix_json) 675 676 load_tests_json = """\ 677{ 678 "testsuites": [ 679 { 680 "name": "ts1", 681 "platform": "lt-p1", 682 "testcases": [] 683 }, 684 { 685 "name": "ts1", 686 "platform": "lt-p2", 687 "testcases": [] 688 }, 689 { 690 "name": "ts2", 691 "platform": "lt-p3", 692 \"testcases": [] 693 }, 694 { 695 "name": "ts2", 696 "platform": "lt-p4", 697 "testcases": [] 698 } 699 ] 700} 701""" 702 load_tests_file = tmp_path / 'load_tests.json' 703 load_tests_file.write_text(load_tests_json) 704 705 testplan = TestPlan(env=mock.Mock(outdir=tmp_path)) 706 testplan.testsuites = { 707 'ts1': mock.Mock(testcases=[], extra_configs=[]), 708 'ts2': mock.Mock(testcases=[], extra_configs=[]), 709 } 710 testplan.testsuites['ts1'].name = 'ts1' 711 testplan.testsuites['ts2'].name = 'ts2' 712 testplan.options = mock.Mock( 713 report_summary=None, 714 outdir=tmp_path, 715 report_suffix=report_suffix, 716 only_failed=only_failed, 717 load_tests=tmp_path / load_tests if load_tests else None, 718 test_only=test_only, 719 exclude_platform=['t-p0', 't-p1', 720 'ts-p0', 'ts-p1', 721 'lt-p0', 'lt-p1'], 722 platform=['t-p1', 't-p2', 't-p3', 't-p4', 723 'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4', 724 'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'], 725 subset=subset 726 ) 727 testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), 728 mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), 729 mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()] 730 testplan.platforms[0].name = 't-p1' 731 testplan.platforms[1].name = 't-p2' 732 testplan.platforms[2].name = 't-p3' 733 testplan.platforms[3].name = 't-p4' 734 testplan.platforms[4].name = 'ts-p1' 735 testplan.platforms[5].name = 'ts-p2' 736 testplan.platforms[6].name = 'ts-p3' 737 testplan.platforms[7].name = 'ts-p4' 738 testplan.platforms[8].name = 'lt-p1' 739 testplan.platforms[9].name = 'lt-p2' 740 testplan.platforms[10].name = 'lt-p3' 741 testplan.platforms[11].name = 'lt-p4' 742 testplan.platforms[0].aliases = ['t-p1'] 743 testplan.platforms[1].aliases = ['t-p2'] 744 testplan.platforms[2].aliases = ['t-p3'] 745 testplan.platforms[3].aliases = ['t-p4'] 746 testplan.platforms[4].aliases = ['ts-p1'] 747 testplan.platforms[5].aliases = ['ts-p2'] 748 testplan.platforms[6].aliases = ['ts-p3'] 749 testplan.platforms[7].aliases = ['ts-p4'] 750 testplan.platforms[8].aliases = ['lt-p1'] 751 testplan.platforms[9].aliases = ['lt-p2'] 752 testplan.platforms[10].aliases = ['lt-p3'] 753 testplan.platforms[11].aliases = ['lt-p4'] 754 testplan.platforms[0].normalized_name = 't-p1' 755 testplan.platforms[1].normalized_name = 't-p2' 756 testplan.platforms[2].normalized_name = 't-p3' 757 testplan.platforms[3].normalized_name = 't-p4' 758 testplan.platforms[4].normalized_name = 'ts-p1' 759 testplan.platforms[5].normalized_name = 'ts-p2' 760 testplan.platforms[6].normalized_name = 'ts-p3' 761 testplan.platforms[7].normalized_name = 'ts-p4' 762 testplan.platforms[8].normalized_name = 'lt-p1' 763 testplan.platforms[9].normalized_name = 'lt-p2' 764 testplan.platforms[10].normalized_name = 'lt-p3' 765 testplan.platforms[11].normalized_name = 'lt-p4' 766 testplan.generate_subset = mock.Mock() 767 testplan.apply_filters = mock.Mock() 768 769 with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \ 770 mock.patch('twisterlib.testinstance.TestInstance.check_runnable', return_value=True), \ 771 pytest.raises(exception) if exception else nullcontext(): 772 testplan.load() 773 774 assert testplan.selected_platforms == expected_selected_platforms 775 if expected_generate_subset_args: 776 testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args) 777 else: 778 testplan.generate_subset.assert_not_called() 779 780 781TESTDATA_5 = [ 782 (False, False, None, 1, 2, 783 ['plat1/testA', 'plat1/testB', 'plat1/testC', 784 'plat3/testA', 'plat3/testB', 'plat3/testC']), 785 (False, False, None, 1, 5, 786 ['plat1/testA', 787 'plat3/testA', 'plat3/testB', 'plat3/testC']), 788 (False, False, None, 2, 2, 789 ['plat2/testA', 'plat2/testB']), 790 (True, False, None, 1, 2, 791 ['plat1/testA', 'plat2/testA', 'plat1/testB', 792 'plat3/testA', 'plat3/testB', 'plat3/testC']), 793 (True, False, None, 2, 2, 794 ['plat2/testB', 'plat1/testC']), 795 (True, True, 123, 1, 2, 796 ['plat2/testA', 'plat2/testB', 'plat1/testC', 797 'plat3/testB', 'plat3/testA', 'plat3/testC']), 798 (True, True, 123, 2, 2, 799 ['plat1/testB', 'plat1/testA']), 800] 801 802@pytest.mark.parametrize( 803 'device_testing, shuffle, seed, subset, sets, expected_subset', 804 TESTDATA_5, 805 ids=['subset 1', 'subset 1 out of 5', 'subset 2', 806 'device testing, subset 1', 'device testing, subset 2', 807 'device testing, shuffle with seed, subset 1', 808 'device testing, shuffle with seed, subset 2'] 809) 810def test_testplan_generate_subset( 811 device_testing, 812 shuffle, 813 seed, 814 subset, 815 sets, 816 expected_subset 817): 818 testplan = TestPlan(env=mock.Mock()) 819 testplan.options = mock.Mock( 820 device_testing=device_testing, 821 shuffle_tests=shuffle, 822 shuffle_tests_seed=seed 823 ) 824 testplan.instances = { 825 'plat1/testA': mock.Mock(status=TwisterStatus.NONE), 826 'plat1/testB': mock.Mock(status=TwisterStatus.NONE), 827 'plat1/testC': mock.Mock(status=TwisterStatus.NONE), 828 'plat2/testA': mock.Mock(status=TwisterStatus.NONE), 829 'plat2/testB': mock.Mock(status=TwisterStatus.NONE), 830 'plat3/testA': mock.Mock(status=TwisterStatus.SKIP), 831 'plat3/testB': mock.Mock(status=TwisterStatus.SKIP), 832 'plat3/testC': mock.Mock(status=TwisterStatus.ERROR), 833 } 834 835 testplan.generate_subset(subset, sets) 836 837 assert [instance for instance in testplan.instances.keys()] == \ 838 expected_subset 839 840 841def test_testplan_handle_modules(): 842 testplan = TestPlan(env=mock.Mock()) 843 844 modules = [mock.Mock(meta={'name': 'name1'}), 845 mock.Mock(meta={'name': 'name2'})] 846 847 with mock.patch('twisterlib.testplan.parse_modules', return_value=modules): 848 testplan.handle_modules() 849 850 assert testplan.modules == ['name1', 'name2'] 851 852 853TESTDATA_6 = [ 854 (True, False, False, 0, 'report_test_tree'), 855 (True, True, False, 0, 'report_test_tree'), 856 (True, False, True, 0, 'report_test_tree'), 857 (True, True, True, 0, 'report_test_tree'), 858 (False, True, False, 0, 'report_test_list'), 859 (False, True, True, 0, 'report_test_list'), 860 (False, False, True, 0, 'report_tag_list'), 861 (False, False, False, 1, None), 862] 863 864@pytest.mark.parametrize( 865 'test_tree, list_tests, list_tags, expected_res, expected_method', 866 TESTDATA_6, 867 ids=['test tree', 'test tree + test list', 'test tree + tag list', 868 'test tree + test list + tag list', 'test list', 869 'test list + tag list', 'tag list', 'no report'] 870) 871def test_testplan_report( 872 test_tree, 873 list_tests, 874 list_tags, 875 expected_res, 876 expected_method 877): 878 testplan = TestPlan(env=mock.Mock()) 879 testplan.report_test_tree = mock.Mock() 880 testplan.report_test_list = mock.Mock() 881 testplan.report_tag_list = mock.Mock() 882 883 testplan.options = mock.Mock( 884 test_tree=test_tree, 885 list_tests=list_tests, 886 list_tags=list_tags, 887 ) 888 889 res = testplan.report() 890 891 assert res == expected_res 892 893 methods = ['report_test_tree', 'report_test_list', 'report_tag_list'] 894 if expected_method: 895 methods.remove(expected_method) 896 getattr(testplan, expected_method).assert_called_once() 897 for method in methods: 898 getattr(testplan, method).assert_not_called() 899 900 901TESTDATA_7 = [ 902 ( 903 [ 904 mock.Mock( 905 yamlfile='a.yaml', 906 scenarios=['scenario1', 'scenario2'] 907 ), 908 mock.Mock( 909 yamlfile='b.yaml', 910 scenarios=['scenario1'] 911 ) 912 ], 913 TwisterRuntimeError, 914 'Duplicated test scenarios found:\n' \ 915 '- scenario1 found in:\n' \ 916 ' - a.yaml\n' \ 917 ' - b.yaml\n', 918 [] 919 ), 920 ( 921 [ 922 mock.Mock( 923 yamlfile='a.yaml', 924 scenarios=['scenario.a.1', 'scenario.a.2'] 925 ), 926 mock.Mock( 927 yamlfile='b.yaml', 928 scenarios=['scenario.b.1'] 929 ) 930 ], 931 None, 932 None, 933 ['No duplicates found.'] 934 ), 935] 936 937@pytest.mark.parametrize( 938 'testsuites, expected_error, error_msg, expected_logs', 939 TESTDATA_7, 940 ids=['a duplicate', 'no duplicates'] 941) 942def test_testplan_report_duplicates( 943 capfd, 944 caplog, 945 testsuites, 946 expected_error, 947 error_msg, 948 expected_logs 949): 950 def mock_get(name): 951 return list(filter(lambda x: name in x.scenarios, testsuites)) 952 953 testplan = TestPlan(env=mock.Mock()) 954 testplan.scenarios = [scenario for testsuite in testsuites \ 955 for scenario in testsuite.scenarios] 956 testplan.get_testsuite = mock.Mock(side_effect=mock_get) 957 958 with pytest.raises(expected_error) if expected_error is not None else \ 959 nullcontext() as err: 960 testplan.report_duplicates() 961 962 if expected_error: 963 assert str(err._excinfo[1]) == error_msg 964 965 assert all([log in caplog.text for log in expected_logs]) 966 967 968def test_testplan_report_tag_list(capfd): 969 testplan = TestPlan(env=mock.Mock()) 970 testplan.testsuites = { 971 'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])), 972 'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])), 973 'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])), 974 'testsuite3': mock.Mock(tags=set(['tag'])) 975 } 976 977 testplan.report_tag_list() 978 979 out,err = capfd.readouterr() 980 sys.stdout.write(out) 981 sys.stderr.write(err) 982 983 assert '- tag' in out 984 assert '- tag1' in out 985 assert '- tag2' in out 986 assert '- tag3' in out 987 988 989def test_testplan_report_test_tree(capfd): 990 testplan = TestPlan(env=mock.Mock()) 991 testplan.get_tests_list = mock.Mock( 992 return_value=['1.dummy.case.1', '1.dummy.case.2', 993 '2.dummy.case.1', '2.dummy.case.2', 994 '3.dummy.case.1', '3.dummy.case.2', 995 '4.dummy.case.1', '4.dummy.case.2', 996 '5.dummy.case.1', '5.dummy.case.2', 997 'sample.group1.case1', 'sample.group1.case2', 998 'sample.group2.case', 'sample.group3.case1', 999 'sample.group3.case2', 'sample.group3.case3'] 1000 ) 1001 1002 testplan.report_test_tree() 1003 1004 out,err = capfd.readouterr() 1005 sys.stdout.write(out) 1006 sys.stderr.write(err) 1007 1008 expected = """ 1009Testsuite 1010├── Samples 1011│ ├── group1 1012│ │ ├── sample.group1.case1 1013│ │ └── sample.group1.case2 1014│ ├── group2 1015│ │ └── sample.group2.case 1016│ └── group3 1017│ ├── sample.group3.case1 1018│ ├── sample.group3.case2 1019│ └── sample.group3.case3 1020└── Tests 1021 ├── 1 1022 │ └── dummy 1023 │ ├── 1.dummy.case.1 1024 │ └── 1.dummy.case.2 1025 ├── 2 1026 │ └── dummy 1027 │ ├── 2.dummy.case.1 1028 │ └── 2.dummy.case.2 1029 ├── 3 1030 │ └── dummy 1031 │ ├── 3.dummy.case.1 1032 │ └── 3.dummy.case.2 1033 ├── 4 1034 │ └── dummy 1035 │ ├── 4.dummy.case.1 1036 │ └── 4.dummy.case.2 1037 └── 5 1038 └── dummy 1039 ├── 5.dummy.case.1 1040 └── 5.dummy.case.2 1041""" 1042 expected = expected[1:] 1043 1044 assert expected in out 1045 1046 1047def test_testplan_report_test_list(capfd): 1048 testplan = TestPlan(env=mock.Mock()) 1049 testplan.get_tests_list = mock.Mock( 1050 return_value=['4.dummy.case.1', '4.dummy.case.2', 1051 '3.dummy.case.2', '2.dummy.case.2', 1052 '1.dummy.case.1', '1.dummy.case.2', 1053 '3.dummy.case.1', '2.dummy.case.1', 1054 '5.dummy.case.1', '5.dummy.case.2'] 1055 ) 1056 1057 testplan.report_test_list() 1058 1059 out,err = capfd.readouterr() 1060 sys.stdout.write(out) 1061 sys.stderr.write(err) 1062 1063 assert ' - 1.dummy.case.1\n' \ 1064 ' - 1.dummy.case.2\n' \ 1065 ' - 2.dummy.case.1\n' \ 1066 ' - 2.dummy.case.2\n' \ 1067 ' - 3.dummy.case.1\n' \ 1068 ' - 3.dummy.case.2\n' \ 1069 ' - 4.dummy.case.1\n' \ 1070 ' - 4.dummy.case.2\n' \ 1071 ' - 5.dummy.case.1\n' \ 1072 ' - 5.dummy.case.2\n' \ 1073 '10 total.' in out 1074 1075 1076def test_testplan_info(capfd): 1077 TestPlan.info('dummy text') 1078 1079 out, err = capfd.readouterr() 1080 sys.stdout.write(out) 1081 sys.stderr.write(err) 1082 1083 assert 'dummy text\n' in out 1084 1085 1086TESTDATA_8 = [ 1087 (False, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p2/unit_testing', 'p3/unit_testing']), 1088 (True, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p3/unit_testing']), 1089] 1090 1091@pytest.mark.parametrize( 1092 'override_default_platforms, expected_platform_names, expected_defaults', 1093 TESTDATA_8, 1094 ids=['no override defaults', 'override defaults'] 1095) 1096def test_testplan_add_configurations( 1097 tmp_path, 1098 override_default_platforms, 1099 expected_platform_names, 1100 expected_defaults 1101): 1102 # tmp_path 1103 # └ boards <- board root 1104 # ├ zephyr 1105 # │ ├ p1 1106 # │ | ├ p1e1.yaml 1107 # │ | └ p1e2.yaml 1108 # │ └ p2 1109 # │ ├ p2.yaml 1110 # │ └ p2-1.yaml <- duplicate 1111 # │ └ p2-2.yaml <- load error 1112 # └ arm 1113 # └ p3 1114 # ├ p3.yaml 1115 # └ p3_B.conf 1116 tmp_soc_root_dir = tmp_path / 'soc' 1117 tmp_soc_root_dir.mkdir() 1118 1119 tmp_vend1_dir = tmp_soc_root_dir / 'zephyr' 1120 tmp_vend1_dir.mkdir() 1121 1122 tmp_soc1_dir = tmp_vend1_dir / 's1' 1123 tmp_soc1_dir.mkdir() 1124 1125 soc1_yaml = """\ 1126family: 1127 - name: zephyr 1128 series: 1129 - name: zephyr_testing 1130 socs: 1131 - name: unit_testing 1132""" 1133 soc1_yamlfile = tmp_soc1_dir / 'soc.yml' 1134 soc1_yamlfile.write_text(soc1_yaml) 1135 1136 tmp_board_root_dir = tmp_path / 'boards' 1137 tmp_board_root_dir.mkdir() 1138 1139 tmp_vend1_dir = tmp_board_root_dir / 'zephyr' 1140 tmp_vend1_dir.mkdir() 1141 1142 tmp_p1_dir = tmp_vend1_dir / 'p1' 1143 tmp_p1_dir.mkdir() 1144 1145 p1e1_bs_yaml = """\ 1146boards: 1147 1148 - name: p1e1 1149 vendor: zephyr 1150 socs: 1151 - name: unit_testing 1152 - name: p1e2 1153 vendor: zephyr 1154 socs: 1155 - name: unit_testing 1156""" 1157 p1e1_yamlfile = tmp_p1_dir / 'board.yml' 1158 p1e1_yamlfile.write_text(p1e1_bs_yaml) 1159 1160 p1e1_yaml = """\ 1161identifier: p1e1 1162name: Platform 1 Edition 1 1163type: native 1164arch: x86 1165vendor: zephyr 1166toolchain: 1167 - zephyr 1168twister: False 1169""" 1170 p1e1_yamlfile = tmp_p1_dir / 'p1e1.yaml' 1171 p1e1_yamlfile.write_text(p1e1_yaml) 1172 1173 p1e2_yaml = """\ 1174identifier: p1e2 1175name: Platform 1 Edition 2 1176type: native 1177arch: x86 1178vendor: zephyr 1179toolchain: 1180 - zephyr 1181""" 1182 p1e2_yamlfile = tmp_p1_dir / 'p1e2.yaml' 1183 p1e2_yamlfile.write_text(p1e2_yaml) 1184 1185 tmp_p2_dir = tmp_vend1_dir / 'p2' 1186 tmp_p2_dir.mkdir() 1187 1188 p2_bs_yaml = """\ 1189boards: 1190 1191 - name: p2 1192 vendor: zephyr 1193 socs: 1194 - name: unit_testing 1195 - name: p2_2 1196 vendor: zephyr 1197 socs: 1198 - name: unit_testing 1199""" 1200 p2_yamlfile = tmp_p2_dir / 'board.yml' 1201 p2_yamlfile.write_text(p2_bs_yaml) 1202 1203 p2_yaml = """\ 1204identifier: p2/unit_testing 1205name: Platform 2 1206type: sim 1207arch: x86 1208vendor: vendor2 1209toolchain: 1210 - zephyr 1211testing: 1212 default: True 1213""" 1214 p2_yamlfile = tmp_p2_dir / 'p2.yaml' 1215 p2_yamlfile.write_text(p2_yaml) 1216 1217 1218 p2_2_yaml = """\ 1219testing: 1220 ć#@%!#!#^#@%@:1.0 1221identifier: p2_2 1222name: Platform 2 2 1223type: sim 1224arch: x86 1225vendor: vendor2 1226toolchain: 1227 - zephyr 1228""" 1229 p2_2_yamlfile = tmp_p2_dir / 'p2-2.yaml' 1230 p2_2_yamlfile.write_text(p2_2_yaml) 1231 1232 tmp_vend2_dir = tmp_board_root_dir / 'arm' 1233 tmp_vend2_dir.mkdir() 1234 1235 tmp_p3_dir = tmp_vend2_dir / 'p3' 1236 tmp_p3_dir.mkdir() 1237 1238 p3_bs_yaml = """\ 1239boards: 1240 - name: p3 1241 vendor: zephyr 1242 socs: 1243 - name: unit_testing 1244""" 1245 p3_yamlfile = tmp_p3_dir / 'board.yml' 1246 p3_yamlfile.write_text(p3_bs_yaml) 1247 1248 p3_yaml = """\ 1249identifier: p3 1250name: Platform 3 1251type: unit 1252arch: arm 1253vendor: vendor3 1254toolchain: 1255 - zephyr 1256testing: 1257 default: True 1258""" 1259 p3_yamlfile = tmp_p3_dir / 'p3.yaml' 1260 p3_yamlfile.write_text(p3_yaml) 1261 1262 env = mock.Mock(board_roots=[tmp_board_root_dir],soc_roots=[tmp_path], arch_roots=[tmp_path]) 1263 1264 testplan = TestPlan(env=env) 1265 1266 testplan.test_config = { 1267 'platforms': { 1268 'override_default_platforms': override_default_platforms, 1269 'default_platforms': ['p3', 'p1e1'] 1270 } 1271 } 1272 1273 1274 testplan.add_configurations() 1275 1276 if expected_defaults is not None: 1277 print(expected_defaults) 1278 print(testplan.default_platforms) 1279 assert sorted(expected_defaults) == sorted(testplan.default_platforms) 1280 if expected_platform_names is not None: 1281 print(expected_platform_names) 1282 print(testplan.platform_names) 1283 platform_names = [p.name for p in testplan.platforms] 1284 assert sorted(expected_platform_names) == sorted(platform_names) 1285 1286 1287def test_testplan_get_all_tests(): 1288 testplan = TestPlan(env=mock.Mock()) 1289 tc1 = mock.Mock() 1290 tc1.name = 'tc1' 1291 tc2 = mock.Mock() 1292 tc2.name = 'tc2' 1293 tc3 = mock.Mock() 1294 tc3.name = 'tc3' 1295 tc4 = mock.Mock() 1296 tc4.name = 'tc4' 1297 tc5 = mock.Mock() 1298 tc5.name = 'tc5' 1299 ts1 = mock.Mock(testcases=[tc1, tc2]) 1300 ts2 = mock.Mock(testcases=[tc3, tc4, tc5]) 1301 testplan.testsuites = { 1302 'ts1': ts1, 1303 'ts2': ts2 1304 } 1305 1306 res = testplan.get_all_tests() 1307 1308 assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5'] 1309 1310 1311TESTDATA_9 = [ 1312 ([], False, True, 11, 1), 1313 ([], False, False, 7, 2), 1314 ([], True, False, 9, 1), 1315 ([], True, True, 9, 1), 1316 ([], True, False, 9, 1), 1317 (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, True, 3, 1), 1318 (['good_test/dummy.common.1', 'good_test/dummy.common.2', 1319 'duplicate_test/dummy.common.1', 'duplicate_test/dummy.common.2'], False, True, 4, 1), 1320 (['dummy.common.1', 'dummy.common.2'], False, False, 2, 1), 1321 (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, True, 0, 1), 1322] 1323 1324@pytest.mark.parametrize( 1325 'testsuite_filter, use_alt_root, detailed_id, expected_suite_count, expected_errors', 1326 TESTDATA_9, 1327 ids=[ 1328 'no testsuite filter, detailed id', 1329 'no testsuite filter, short id', 1330 'no testsuite filter, alt root, detailed id', 1331 'no filter, alt root, detailed id', 1332 'no filter, alt root, short id', 1333 'testsuite filter', 1334 'testsuite filter and valid duplicate', 1335 'testsuite filter, short id and duplicate', 1336 'testsuite filter, alt root', 1337 ] 1338) 1339def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, detailed_id, 1340 expected_errors, expected_suite_count): 1341 # tmp_path 1342 # ├ tests <- test root 1343 # │ ├ good_test 1344 # │ │ └ testcase.yaml 1345 # │ ├ wrong_test 1346 # │ │ └ testcase.yaml 1347 # │ ├ good_sample 1348 # │ │ └ sample.yaml 1349 # │ ├ duplicate_test 1350 # │ │ └ testcase.yaml 1351 # │ └ others 1352 # │ └ other.txt 1353 # └ other_tests <- alternate test root 1354 # └ good_test 1355 # └ testcase.yaml 1356 tmp_test_root_dir = tmp_path / 'tests' 1357 tmp_test_root_dir.mkdir() 1358 1359 tmp_good_test_dir = tmp_test_root_dir / 'good_test' 1360 tmp_good_test_dir.mkdir() 1361 testcase_yaml_1 = """\ 1362tests: 1363 dummy.common.1: 1364 build_on_all: true 1365 dummy.common.2: 1366 build_on_all: true 1367 dummy.common.3: 1368 build_on_all: true 1369 dummy.special: 1370 build_on_all: false 1371""" 1372 testfile_1 = tmp_good_test_dir / 'testcase.yaml' 1373 testfile_1.write_text(testcase_yaml_1) 1374 1375 tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test' 1376 tmp_bad_test_dir.mkdir() 1377 testcase_yaml_2 = """\ 1378tests: 1379 wrong: 1380 yaml: {]} 1381""" 1382 testfile_2 = tmp_bad_test_dir / 'testcase.yaml' 1383 testfile_2.write_text(testcase_yaml_2) 1384 1385 tmp_good_sample_dir = tmp_test_root_dir / 'good_sample' 1386 tmp_good_sample_dir.mkdir() 1387 samplecase_yaml_1 = """\ 1388tests: 1389 sample.dummy.common.1: 1390 tags: 1391 - samples 1392 sample.dummy.common.2: 1393 tags: 1394 - samples 1395 sample.dummy.special.1: 1396 tags: 1397 - samples 1398""" 1399 samplefile_1 = tmp_good_sample_dir / 'sample.yaml' 1400 samplefile_1.write_text(samplecase_yaml_1) 1401 1402 tmp_duplicate_test_dir = tmp_test_root_dir / 'duplicate_test' 1403 tmp_duplicate_test_dir.mkdir() 1404 # The duplicate needs to have the same number of tests as these configurations 1405 # can be read either with duplicate_test first, or good_test first, so number 1406 # of selected tests needs to be the same in both situations. 1407 testcase_yaml_4 = """\ 1408tests: 1409 dummy.common.1: 1410 build_on_all: true 1411 dummy.common.2: 1412 build_on_all: true 1413 dummy.common.3: 1414 build_on_all: true 1415 dummy.special: 1416 build_on_all: false 1417""" 1418 testfile_4 = tmp_duplicate_test_dir / 'testcase.yaml' 1419 testfile_4.write_text(testcase_yaml_4) 1420 1421 tmp_other_dir = tmp_test_root_dir / 'others' 1422 tmp_other_dir.mkdir() 1423 _ = tmp_other_dir / 'other.txt' 1424 1425 tmp_alt_test_root_dir = tmp_path / 'other_tests' 1426 tmp_alt_test_root_dir.mkdir() 1427 1428 tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test' 1429 tmp_alt_good_test_dir.mkdir() 1430 testcase_yaml_3 = """\ 1431tests: 1432 dummy.alt.1: 1433 build_on_all: true 1434 dummy.alt.2: 1435 build_on_all: true 1436""" 1437 testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml' 1438 testfile_3.write_text(testcase_yaml_3) 1439 1440 env = mock.Mock( 1441 test_roots=[tmp_test_root_dir], 1442 options=mock.Mock(detailed_test_id=detailed_id), 1443 alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else [] 1444 ) 1445 1446 testplan = TestPlan(env=env) 1447 1448 res = testplan.add_testsuites(testsuite_filter) 1449 1450 assert res == expected_suite_count 1451 assert testplan.load_errors == expected_errors 1452 1453 1454def test_testplan_str(): 1455 testplan = TestPlan(env=mock.Mock()) 1456 testplan.name = 'my name' 1457 1458 res = testplan.__str__() 1459 1460 assert res == 'my name' 1461 1462 1463TESTDATA_10 = [ 1464 ('a platform', True), 1465 ('other platform', False), 1466] 1467 1468@pytest.mark.parametrize( 1469 'name, expect_found', 1470 TESTDATA_10, 1471 ids=['platform exists', 'no platform'] 1472) 1473def test_testplan_get_platform(name, expect_found): 1474 testplan = TestPlan(env=mock.Mock()) 1475 p1 = mock.Mock() 1476 p1.name = 'some platform' 1477 p1.aliases = [p1.name] 1478 p2 = mock.Mock() 1479 p2.name = 'a platform' 1480 p2.aliases = [p2.name] 1481 testplan.platforms = [p1, p2] 1482 1483 res = testplan.get_platform(name) 1484 1485 if expect_found: 1486 assert res.name == name 1487 else: 1488 assert res is None 1489 1490 1491TESTDATA_11 = [ 1492 (True, 'runnable'), 1493 (False, 'buildable'), 1494] 1495 1496@pytest.mark.parametrize( 1497 'device_testing, expected_tfilter', 1498 TESTDATA_11, 1499 ids=['device testing', 'no device testing'] 1500) 1501def test_testplan_load_from_file(caplog, device_testing, expected_tfilter): 1502 def get_platform(name): 1503 p = mock.Mock() 1504 p.name = name 1505 p.normalized_name = name 1506 return p 1507 1508 ts1tc1 = mock.Mock() 1509 ts1tc1.name = 'TS1.tc1' 1510 ts1 = mock.Mock(testcases=[ts1tc1]) 1511 ts1.name = 'TestSuite 1' 1512 ts2 = mock.Mock(testcases=[]) 1513 ts2.name = 'TestSuite 2' 1514 ts3tc1 = mock.Mock() 1515 ts3tc1.name = 'TS3.tc1' 1516 ts3tc2 = mock.Mock() 1517 ts3tc2.name = 'TS3.tc2' 1518 ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2]) 1519 ts3.name = 'TestSuite 3' 1520 ts4tc1 = mock.Mock() 1521 ts4tc1.name = 'TS4.tc1' 1522 ts4 = mock.Mock(testcases=[ts4tc1]) 1523 ts4.name = 'TestSuite 4' 1524 ts5 = mock.Mock(testcases=[]) 1525 ts5.name = 'TestSuite 5' 1526 1527 testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir'))) 1528 testplan.options = mock.Mock(device_testing=device_testing, test_only=True, report_summary=None) 1529 testplan.testsuites = { 1530 'TestSuite 1': ts1, 1531 'TestSuite 2': ts2, 1532 'TestSuite 3': ts3, 1533 'TestSuite 4': ts4, 1534 'TestSuite 5': ts5 1535 } 1536 1537 testplan.get_platform = mock.Mock(side_effect=get_platform) 1538 1539 testplan_data = """\ 1540{ 1541 "testsuites": [ 1542 { 1543 "name": "TestSuite 1", 1544 "platform": "Platform 1", 1545 "run_id": 1, 1546 "execution_time": 60.00, 1547 "used_ram": 4096, 1548 "available_ram": 12278, 1549 "used_rom": 1024, 1550 "available_rom": 1047552, 1551 "status": "passed", 1552 "reason": "OK", 1553 "testcases": [ 1554 { 1555 "identifier": "TS1.tc1", 1556 "status": "passed", 1557 "reason": "passed", 1558 "execution_time": 60.00, 1559 "log": "" 1560 } 1561 ] 1562 }, 1563 { 1564 "name": "TestSuite 2", 1565 "platform": "Platform 1" 1566 }, 1567 { 1568 "name": "TestSuite 3", 1569 "platform": "Platform 1", 1570 "run_id": 1, 1571 "execution_time": 360.00, 1572 "used_ram": 4096, 1573 "available_ram": 12278, 1574 "used_rom": 1024, 1575 "available_rom": 1047552, 1576 "status": "error", 1577 "reason": "File Not Found Error", 1578 "testcases": [ 1579 { 1580 "identifier": "TS3.tc1", 1581 "status": "error", 1582 "reason": "File Not Found Error.", 1583 "execution_time": 360.00, 1584 "log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..." 1585 }, 1586 { 1587 "identifier": "TS3.tc2" 1588 } 1589 ] 1590 }, 1591 { 1592 "name": "TestSuite 4", 1593 "platform": "Platform 1", 1594 "execution_time": 360.00, 1595 "used_ram": 4096, 1596 "available_ram": 12278, 1597 "used_rom": 1024, 1598 "available_rom": 1047552, 1599 "status": "skipped", 1600 "reason": "Not in requested test list.", 1601 "testcases": [ 1602 { 1603 "identifier": "TS4.tc1", 1604 "status": "skipped", 1605 "reason": "Not in requested test list.", 1606 "execution_time": 360.00, 1607 "log": "[INFO] Parsing..." 1608 }, 1609 { 1610 "identifier": "TS3.tc2" 1611 } 1612 ] 1613 }, 1614 { 1615 "name": "TestSuite 5", 1616 "platform": "Platform 2" 1617 } 1618 ] 1619} 1620""" 1621 1622 filter_platform = ['Platform 1'] 1623 1624 check_runnable_mock = mock.Mock(return_value=True) 1625 1626 with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \ 1627 mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \ 1628 mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()): 1629 testplan.load_from_file('dummy.yaml', filter_platform) 1630 1631 expected_instances = { 1632 'Platform 1/TestSuite 1': { 1633 'metrics': { 1634 'handler_time': 60.0, 1635 'used_ram': 4096, 1636 'used_rom': 1024, 1637 'available_ram': 12278, 1638 'available_rom': 1047552 1639 }, 1640 'retries': 0, 1641 'testcases': { 1642 'TS1.tc1': { 1643 'status': TwisterStatus.PASS, 1644 'reason': 'passed', 1645 'duration': 60.0, 1646 'output': '' 1647 } 1648 } 1649 }, 1650 'Platform 1/TestSuite 2': { 1651 'metrics': { 1652 'handler_time': 0, 1653 'used_ram': 0, 1654 'used_rom': 0, 1655 'available_ram': 0, 1656 'available_rom': 0 1657 }, 1658 'retries': 0, 1659 'testcases': [] 1660 }, 1661 'Platform 1/TestSuite 3': { 1662 'metrics': { 1663 'handler_time': 360.0, 1664 'used_ram': 4096, 1665 'used_rom': 1024, 1666 'available_ram': 12278, 1667 'available_rom': 1047552 1668 }, 1669 'retries': 1, 1670 'testcases': { 1671 'TS3.tc1': { 1672 'status': TwisterStatus.ERROR, 1673 'reason': None, 1674 'duration': 360.0, 1675 'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...' 1676 }, 1677 'TS3.tc2': { 1678 'status': TwisterStatus.NONE, 1679 'reason': None, 1680 'duration': 0, 1681 'output': '' 1682 } 1683 } 1684 }, 1685 'Platform 1/TestSuite 4': { 1686 'metrics': { 1687 'handler_time': 360.0, 1688 'used_ram': 4096, 1689 'used_rom': 1024, 1690 'available_ram': 12278, 1691 'available_rom': 1047552 1692 }, 1693 'retries': 0, 1694 'testcases': { 1695 'TS4.tc1': { 1696 'status': TwisterStatus.SKIP, 1697 'reason': 'Not in requested test list.', 1698 'duration': 360.0, 1699 'output': '[INFO] Parsing...' 1700 } 1701 } 1702 }, 1703 } 1704 1705 for n, i in testplan.instances.items(): 1706 assert expected_instances[n]['metrics'] == i.metrics 1707 assert expected_instances[n]['retries'] == i.retries 1708 for t in i.testcases: 1709 assert expected_instances[n]['testcases'][str(t)]['status'] == t.status 1710 assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason 1711 assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration 1712 assert expected_instances[n]['testcases'][str(t)]['output'] == t.output 1713 1714 check_runnable_mock.assert_called_with(mock.ANY, mock.ANY) 1715 1716 expected_logs = [ 1717 'loading TestSuite 1...', 1718 'loading TestSuite 2...', 1719 'loading TestSuite 3...', 1720 'loading TestSuite 4...', 1721 ] 1722 assert all([log in caplog.text for log in expected_logs]) 1723 1724 1725def test_testplan_add_instances(): 1726 testplan = TestPlan(env=mock.Mock()) 1727 instance1 = mock.Mock() 1728 instance1.name = 'instance 1' 1729 instance2 = mock.Mock() 1730 instance2.name = 'instance 2' 1731 instance_list = [instance1, instance2] 1732 1733 testplan.add_instances(instance_list) 1734 1735 assert testplan.instances == { 1736 'instance 1': instance1, 1737 'instance 2': instance2, 1738 } 1739 1740 1741def test_testplan_get_testcase(): 1742 testplan = TestPlan(env=mock.Mock()) 1743 testplan.testsuites = { 1744 'test1.suite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]), 1745 'test1.suite1': mock.Mock(testcases=[mock.Mock(), mock.Mock()]), 1746 'test1.suite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]), 1747 'test1.suite3': mock.Mock(testcases=[]) 1748 } 1749 1750 testplan.testsuites['test1.suite0'].testcases[0].name = 'test1.suite0.case0' 1751 testplan.testsuites['test1.suite0'].testcases[1].name = 'test1.suite0.case1' 1752 # 1753 testplan.testsuites['test1.suite1'].testcases[0].name = 'test1.suite1.case0' 1754 testplan.testsuites['test1.suite1'].testcases[1].name = 'test1.suite1.case0' # in suite duplicate 1755 # 1756 testplan.testsuites['test1.suite2'].testcases[0].name = 'test1.suite2.case0' 1757 testplan.testsuites['test1.suite2'].testcases[1].name = 'test1.suite1.case0' # out suite duplicate 1758 1759 id = 'test1.suite1.case0' 1760 1761 res = testplan.get_testcase(id) 1762 1763 assert len(res) == 3 1764 assert testplan.testsuites['test1.suite1'] in res 1765 assert testplan.testsuites['test1.suite2'] in res 1766 1767 1768def test_testplan_verify_platforms_existence(caplog): 1769 testplan = TestPlan(env=mock.Mock()) 1770 testplan.platform_names = ['a platform', 'other platform'] 1771 1772 platform_names = ['other platform', 'some platform'] 1773 log_info = 'PLATFORM ERROR' 1774 1775 with pytest.raises(SystemExit) as se: 1776 testplan.verify_platforms_existence(platform_names, log_info) 1777 1778 assert str(se.value) == '2' 1779 assert 'PLATFORM ERROR - unrecognized platform - some platform' 1780 1781 1782TESTDATA_12 = [ 1783 (True), 1784 (False) 1785] 1786 1787@pytest.mark.parametrize( 1788 'exists', 1789 TESTDATA_12, 1790 ids=['links dir exists', 'links dir does not exist'] 1791) 1792def test_testplan_create_build_dir_links(exists): 1793 outdir = os.path.join('out', 'dir') 1794 instances_linked = [] 1795 1796 def mock_link(links_dir_path, instance): 1797 assert links_dir_path == os.path.join(outdir, 'twister_links') 1798 instances_linked.append(instance) 1799 1800 instances = { 1801 'inst0': mock.Mock(status=TwisterStatus.PASS), 1802 'inst1': mock.Mock(status=TwisterStatus.SKIP), 1803 'inst2': mock.Mock(status=TwisterStatus.ERROR), 1804 } 1805 expected_instances = [instances['inst0'], instances['inst2']] 1806 1807 testplan = TestPlan(env=mock.Mock(outdir=outdir)) 1808 testplan._create_build_dir_link = mock.Mock(side_effect=mock_link) 1809 testplan.instances = instances 1810 1811 with mock.patch('os.path.exists', return_value=exists), \ 1812 mock.patch('os.mkdir', mock.Mock()) as mkdir_mock: 1813 testplan.create_build_dir_links() 1814 1815 if not exists: 1816 mkdir_mock.assert_called_once() 1817 1818 assert expected_instances == instances_linked 1819 1820 1821TESTDATA_13 = [ 1822 ('nt'), 1823 ('Linux') 1824] 1825 1826@pytest.mark.parametrize( 1827 'os_name', 1828 TESTDATA_13, 1829) 1830def test_testplan_create_build_dir_link(os_name): 1831 def mock_makedirs(path, exist_ok=False): 1832 assert exist_ok 1833 assert path == instance_build_dir 1834 1835 def mock_symlink(source, target): 1836 assert source == instance_build_dir 1837 assert target == os.path.join('links', 'path', 'test_0') 1838 1839 def mock_call(cmd, shell=False): 1840 assert shell 1841 assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'), 1842 instance_build_dir] 1843 1844 def mock_join(*paths): 1845 slash = "\\" if os.name == 'nt' else "/" 1846 return slash.join(paths) 1847 1848 with mock.patch('os.name', os_name), \ 1849 mock.patch('os.symlink', side_effect=mock_symlink), \ 1850 mock.patch('os.makedirs', side_effect=mock_makedirs), \ 1851 mock.patch('subprocess.call', side_effect=mock_call), \ 1852 mock.patch('os.path.join', side_effect=mock_join): 1853 1854 testplan = TestPlan(env=mock.Mock()) 1855 links_dir_path = os.path.join('links', 'path') 1856 instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir') 1857 instance = mock.Mock(build_dir=instance_build_dir) 1858 testplan._create_build_dir_link(links_dir_path, instance) 1859 1860 assert instance.build_dir == os.path.join('links', 'path', 'test_0') 1861 assert testplan.link_dir_counter == 1 1862 1863 1864TESTDATA_14 = [ 1865 ('bad platform', 'dummy reason', [], 1866 'dummy status', 'dummy reason'), 1867 ('good platform', 'quarantined', [], 1868 TwisterStatus.ERROR, 'quarantined but is one of the integration platforms'), 1869 ('good platform', 'dummy reason', [{'type': 'command line filter'}], 1870 'dummy status', 'dummy reason'), 1871 ('good platform', 'dummy reason', [{'type': 'Skip filter'}], 1872 'dummy status', 'dummy reason'), 1873 ('good platform', 'dummy reason', [{'type': 'platform key filter'}], 1874 'dummy status', 'dummy reason'), 1875 ('good platform', 'dummy reason', [{'type': 'Toolchain filter'}], 1876 'dummy status', 'dummy reason'), 1877 ('good platform', 'dummy reason', [{'type': 'Module filter'}], 1878 'dummy status', 'dummy reason'), 1879 ('good platform', 'dummy reason', [{'type': 'testsuite filter'}], 1880 TwisterStatus.ERROR, 'dummy reason but is one of the integration platforms'), 1881] 1882 1883@pytest.mark.parametrize( 1884 'platform_name, reason, filters,' \ 1885 ' expected_status, expected_reason', 1886 TESTDATA_14, 1887 ids=['wrong platform', 'quarantined', 'command line filtered', 1888 'skip filtered', 'platform key filtered', 'toolchain filtered', 1889 'module filtered', 'skip to error change'] 1890) 1891def test_change_skip_to_error_if_integration( 1892 platform_name, 1893 reason, 1894 filters, 1895 expected_status, 1896 expected_reason 1897): 1898 options = mock.Mock() 1899 platform = mock.Mock() 1900 platform.name = platform_name 1901 testsuite = mock.Mock(integration_platforms=['good platform', 'a platform']) 1902 instance = mock.Mock( 1903 testsuite=testsuite, 1904 platform=platform, 1905 filters=filters, 1906 status='dummy status', 1907 reason=reason 1908 ) 1909 1910 change_skip_to_error_if_integration(options, instance) 1911 1912 assert instance.status == expected_status 1913 assert instance.reason == expected_reason 1914