1#!/usr/bin/env python3 2# Copyright (c) 2020 Intel Corporation 3# 4# SPDX-License-Identifier: Apache-2.0 5""" 6Tests for testinstance class 7""" 8 9import mmap 10import mock 11import os 12import pytest 13import sys 14 15from contextlib import nullcontext 16 17ZEPHYR_BASE = os.getenv('ZEPHYR_BASE') 18sys.path.insert(0, os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'twister')) 19 20from twisterlib.statuses import TwisterStatus 21from twisterlib.testsuite import ( 22 _find_src_dir_path, 23 _get_search_area_boundary, 24 find_c_files_in, 25 scan_file, 26 scan_testsuite_path, 27 ScanPathResult, 28 TestCase, 29 TestSuite 30) 31from twisterlib.error import TwisterException, TwisterRuntimeError 32 33 34TESTDATA_1 = [ 35 ( 36 ScanPathResult( 37 ['a', 'b'], 38 'Found a test that does not start with test_', 39 False, 40 False, 41 True, 42 ['feature_a', 'feature_b'] 43 ), 44 ScanPathResult( 45 ['a', 'b'], 46 'Found a test that does not start with test_', 47 False, 48 False, 49 True, 50 ['feature_a', 'feature_b'] 51 ), 52 True 53 ), 54# ( 55# ScanPathResult(), 56# ScanPathResult(), 57# True 58# ), 59 ( 60 ScanPathResult( 61 ['a', 'b'], 62 'Found a test that does not start with test_', 63 False, 64 False, 65 True, 66 ['feature_a', 'feature_b'] 67 ), 68 'I am not a ScanPathResult.', 69 False 70 ), 71# ( 72# ScanPathResult( 73# ['a', 'b'], 74# 'Found a test that does not start with test_', 75# False, 76# False, 77# True, 78# ['feature_a', 'feature_b'] 79# ), 80# ScanPathResult(), 81# False 82# ), 83] 84 85 86@pytest.mark.parametrize( 87 'original, provided, expected', 88 TESTDATA_1, 89 ids=[ 90 'identical', 91# 'empties', 92 'wrong type', 93# 'different with empty' 94 ] 95) 96def test_scanpathresults_dunders(original, provided, expected): 97 result = original == provided 98 99 assert result == expected 100 101TESTDATA_2 = [ 102 ( 103 os.path.join('testsuites', 'tests', 'test_ztest.c'), 104 ScanPathResult( 105 warnings=None, 106 matches=[ 107 'a', 108 'c', 109 'unit_a', 110 'newline', 111 'test_test_aa', 112 'user', 113 'last' 114 ], 115 has_registered_test_suites=False, 116 has_run_registered_test_suites=False, 117 has_test_main=False, 118 ztest_suite_names = ['test_api'] 119 ) 120 ), 121 ( 122 os.path.join('testsuites', 'tests', 'test_a', 'test_ztest_error.c'), 123 ScanPathResult( 124 warnings='Found a test that does not start with test_', 125 matches=['1a', '1c', '2a', '2b'], 126 has_registered_test_suites=False, 127 has_run_registered_test_suites=False, 128 has_test_main=True, 129 ztest_suite_names = ['feature1', 'feature2'] 130 ) 131 ), 132 ( 133 os.path.join('testsuites', 'tests', 'test_a', 'test_ztest_error_1.c'), 134 ScanPathResult( 135 warnings='found invalid #ifdef, #endif in ztest_test_suite()', 136 matches=['unit_1a', 'unit_1b', 'Unit_1c'], 137 has_registered_test_suites=False, 138 has_run_registered_test_suites=False, 139 has_test_main=False, 140 ztest_suite_names = ['feature3'] 141 ) 142 ), 143 ( 144 os.path.join( 145 'testsuites', 146 'tests', 147 'test_d', 148 'test_ztest_error_register_test_suite.c' 149 ), 150 ScanPathResult( 151 warnings=None, 152 matches=['unit_1a', 'unit_1b'], 153 has_registered_test_suites=True, 154 has_run_registered_test_suites=False, 155 has_test_main=False, 156 ztest_suite_names = ['feature4'] 157 ) 158 ), 159 ( 160 os.path.join( 161 'testsuites', 162 'tests', 163 'test_e', 164 'test_ztest_new_suite.c' 165 ), 166 ScanPathResult( 167 warnings=None, 168 matches=['feature5.1a', 'feature5.1b'], 169 has_registered_test_suites=False, 170 has_run_registered_test_suites=True, 171 has_test_main=False, 172 ztest_suite_names = ['feature5'] 173 ) 174 ), 175# ( 176# os.path.join( 177# 'testsuites', 178# 'tests', 179# 'test_e', 180# 'test_ztest_no_suite.c' 181# ), 182# ScanPathResult( 183# warnings=None, 184# matches=None, 185# has_registered_test_suites=False, 186# has_run_registered_test_suites=False, 187# has_test_main=False, 188# ztest_suite_names = [] 189# ) 190# ), 191] 192 193 194@pytest.mark.parametrize( 195 'test_file, expected', 196 TESTDATA_2, 197 ids=[ 198 'valid', 199 'test not starting with test_', 200 'invalid ifdef with test_main', 201 'registered testsuite', 202 'new testsuite with registered run', 203# 'empty testsuite' 204 ] 205) 206def test_scan_file(test_data, test_file, class_env, expected: ScanPathResult): 207 """ 208 Testing scan_file method with different 209 ztest files for warnings and results 210 """ 211 212 result: ScanPathResult = scan_file(os.path.join(test_data, test_file)) 213 214 assert result == expected 215 216 217# Generate testcases depending on available mmap attributes 218TESTIDS_3 = [] 219TESTDATA_3 = [] 220 221try: 222 TESTDATA_3.append( 223 ( 224 'nt', 225 {'access': mmap.ACCESS_READ} 226 ) 227 ) 228 TESTIDS_3.append('windows') 229except AttributeError: 230 pass 231 232try: 233 TESTDATA_3.append( 234 ( 235 'posix', 236 { 237 'flags': mmap.MAP_PRIVATE, 238 'prot': mmap.PROT_READ, 239 'offset': 0 240 } 241 ) 242 ) 243 TESTIDS_3.append('linux') 244except AttributeError: 245 pass 246 247 248@pytest.mark.parametrize( 249 'os_name, expected', 250 TESTDATA_3, 251 ids=TESTIDS_3 252) 253def test_scan_file_mmap(os_name, expected): 254 class TestException(Exception): 255 pass 256 257 def assert_mmap(*args, **kwargs): 258 assert expected.items() <= kwargs.items() 259 260 # We do this to skip the rest of scan_file 261 def raise_exception(*args, **kwargs): 262 raise TestException('') 263 264 with mock.patch('mmap.mmap', mock.Mock(side_effect=assert_mmap)), \ 265 mock.patch('builtins.open', mock.mock_open(read_data='dummy data')), \ 266 mock.patch('os.name', os_name), \ 267 mock.patch('contextlib.closing', raise_exception): 268 try: 269 scan_file('dummy/path') 270 except TestException: 271 assert True 272 return 273 274 assert False 275 276 277TESTDATA_4 = [ 278 ( 279 ZEPHYR_BASE, 280 '.', 281 'test_c', 282 'Tests should reference the category and subsystem' \ 283 ' with a dot as a separator.' 284 ), 285 ( 286 os.path.join(ZEPHYR_BASE, 'scripts', 'tests'), 287 '.', 288 '', 289 'Tests should reference the category and subsystem' \ 290 ' with a dot as a separator.'), 291] 292 293 294@pytest.mark.parametrize( 295 'testsuite_root, workdir, name, exception', 296 TESTDATA_4 297) 298def test_get_unique_exception(testsuite_root, workdir, name, exception): 299 """ 300 Test to check if tests reference the category and subsystem 301 with a dot as a separator 302 """ 303 304 with pytest.raises(TwisterException): 305 unique = TestSuite(testsuite_root, workdir, name) 306 assert unique == exception 307 308 309TEST_DATA_REL_PATH = os.path.join( 310 'scripts', 311 'tests', 312 'twister', 313 'test_data', 314 'testsuites' 315) 316 317 318TESTDATA_5 = [ 319 ( 320 os.path.join(ZEPHYR_BASE, TEST_DATA_REL_PATH), 321 os.path.join(ZEPHYR_BASE, TEST_DATA_REL_PATH, 'tests', 'test_a'), 322 os.path.join( 323 os.sep, 324 TEST_DATA_REL_PATH, 325 'tests', 326 'test_a', 327 'test_a.check_1' 328 ), 329 os.path.join( 330 os.sep, 331 TEST_DATA_REL_PATH, 332 'tests', 333 'test_a', 334 'test_a.check_1' 335 ), 336 ), 337 ( 338 ZEPHYR_BASE, 339 ZEPHYR_BASE, 340 'test_a.check_1', 341 'test_a.check_1' 342 ), 343 ( 344 ZEPHYR_BASE, 345 os.path.join( 346 ZEPHYR_BASE, 347 TEST_DATA_REL_PATH, 348 'test_b' 349 ), 350 os.path.join(os.sep, TEST_DATA_REL_PATH, 'test_b', 'test_b.check_1'), 351 os.path.join(os.sep, TEST_DATA_REL_PATH, 'test_b', 'test_b.check_1') 352 ), 353 ( 354 os.path.join(ZEPHYR_BASE, 'scripts', 'tests'), 355 os.path.join(ZEPHYR_BASE, 'scripts', 'tests'), 356 'test_b.check_1', 357 os.path.join('scripts', 'tests', 'test_b.check_1') 358 ), 359 ( 360 ZEPHYR_BASE, 361 ZEPHYR_BASE, 362 'test_a.check_1.check_2', 363 'test_a.check_1.check_2' 364 ), 365] 366 367 368@pytest.mark.parametrize( 369 'testsuite_root, suite_path, name, expected', 370 TESTDATA_5 371) 372def test_get_unique(testsuite_root, suite_path, name, expected): 373 """ 374 Test to check if the unique name is given 375 for each testsuite root and workdir 376 """ 377 378 suite = TestSuite(testsuite_root, suite_path, name) 379 assert suite.name == expected 380 381 382TESTDATA_6 = [ 383 ( 384 b'/* dummy */\r\n ztest_run_test_suite(feature)', 385 [ 386 mock.Mock( 387 start=mock.Mock(return_value=0), 388 end=mock.Mock(return_value=0) 389 ) 390 ], 391 False, 392 (0, 13) 393 ), 394 ( 395 b'ztest_register_test_suite(featureX, NULL, ztest_unit_test(test_a));', 396 [ 397 mock.Mock( 398 start=mock.Mock(return_value=0), 399 end=mock.Mock(return_value=26) 400 ) 401 ], 402 True, 403 (26, 67) 404 ), 405 ( 406 b'dummy text', 407 [ 408 mock.Mock( 409 start=mock.Mock(return_value=0), 410 end=mock.Mock(return_value=0) 411 ) 412 ], 413 False, 414 ValueError 415 ) 416] 417 418@pytest.mark.parametrize( 419 'search_area, suite_regex_matches, is_registered_test_suite, expected', 420 TESTDATA_6, 421 ids=['run suite', 'registered suite', 'error'] 422) 423def test_get_search_area_boundary( 424 search_area, 425 suite_regex_matches, 426 is_registered_test_suite, 427 expected 428): 429 with pytest.raises(expected) if \ 430 isinstance(expected, type) and issubclass(expected, Exception) \ 431 else nullcontext() as exception: 432 result = _get_search_area_boundary( 433 search_area, 434 suite_regex_matches, 435 is_registered_test_suite 436 ) 437 438 if exception: 439 assert str(exception.value) == 'can\'t find ztest_run_test_suite' 440 return 441 442 assert result == expected 443 444 445TESTDATA_7 = [ 446 (True, [os.path.join('', 'home', 'user', 'dummy_path', 'dummy.c'), 447 os.path.join('', 'home', 'user', 'dummy_path', 'dummy.cpp')]), 448 (False, []) 449] 450 451@pytest.mark.parametrize( 452 'isdir, expected', 453 TESTDATA_7, 454 ids=['valid', 'not a directory'] 455) 456def test_find_c_files_in(isdir, expected): 457 old_dir = os.path.join('', 'home', 'user', 'dummy_base_dir') 458 new_path = os.path.join('', 'home', 'user', 'dummy_path') 459 cur_dir = old_dir 460 461 def mock_chdir(path, *args, **kwargs): 462 nonlocal cur_dir 463 cur_dir = path 464 465 # We simulate such a structure: 466 # <new_path> 467 # ┣ dummy.c 468 # ┣ wrong_dummy.h 469 # ┗ dummy_dir 470 # ┣ dummy.cpp 471 # ┗ wrong_dummy.hpp 472 # <old_dir> 473 # ┗ wrong_dummy.c 474 new_path_base = ['dummy.c', 'wrong_dummy.h'] 475 new_path_subs = ['dummy.cpp', 'wrong_dummy.hpp'] 476 old_dir_base = ['wrong_dummy.c'] 477 478 def format_tester(fmt): 479 formats = [ 480 {'name': 'subdirs', 'fmt': '**/*.'}, 481 {'name': 'base', 'fmt': '*.'} 482 ] 483 484 for format in formats: 485 if fmt.startswith(format['fmt']): 486 return format['name'], fmt[len(format['fmt']):] 487 488 raise ValueError('This test wasn\'t designed for those globs.' 489 ' Please fix the test before PR!') 490 491 def mock_glob(fmt, *args, **kwargs): 492 from_where, extension = format_tester(fmt) 493 494 if cur_dir == old_dir: 495 if from_where == 'subdirs': 496 return [] 497 elif from_where == 'base': 498 return list(filter(lambda fn: fn.endswith(extension), 499 old_dir_base)) 500 else: 501 return [] 502 if cur_dir == new_path: 503 if from_where == 'subdirs': 504 return list(filter(lambda fn: fn.endswith(extension), 505 new_path_subs)) 506 elif from_where == 'base': 507 return list(filter(lambda fn: fn.endswith(extension), 508 new_path_base)) 509 else: 510 return [] 511 512 raise ValueError('This test wasn\'t designed for those dirs.' 513 'Please fix the test before PR!') 514 515 with mock.patch('os.path.isdir', return_value=isdir), \ 516 mock.patch('os.getcwd', return_value=cur_dir), \ 517 mock.patch('glob.glob', mock_glob), \ 518 mock.patch('os.chdir', side_effect=mock_chdir) as chdir_mock: 519 filenames = find_c_files_in(new_path) 520 521 assert sorted(filenames) == sorted(expected) 522 523 assert chdir_mock.call_args is None or \ 524 chdir_mock.call_args == mock.call(old_dir) 525 526 527TESTDATA_8 = [ 528 ( 529 os.path.join('dummy', 'path'), 530 ['testsuite_file_1', 'testsuite_file_2'], 531 ['src_dir_file_1', 'src_dir_file_2', 'src_dir_file_3'], 532 {'src_dir_file_1': 1000, 'src_dir_file_2': 2000, 'src_dir_file_3': 0}, 533 { 534 'testsuite_file_1': ScanPathResult( 535 matches = ['test_a', 'b'], 536 warnings = 'dummy warning', 537 has_registered_test_suites = True, 538 has_run_registered_test_suites = True, 539 has_test_main = True, 540 ztest_suite_names = ['feature_a'] 541 ), 542 'testsuite_file_2': ValueError, 543 'src_dir_file_1': ScanPathResult( 544 matches = ['test_b', 'a'], 545 warnings = None, 546 has_registered_test_suites = True, 547 has_run_registered_test_suites = True, 548 has_test_main = True, 549 ztest_suite_names = ['feature_b'] 550 ), 551 'src_dir_file_2': ValueError, 552 'src_dir_file_3': ValueError, 553 }, 554 [ 555 'testsuite_file_2: can\'t find: dummy exception', 556 'testsuite_file_1: dummy warning', 557 'src_dir_file_2: error parsing source file: dummy exception', 558 ], 559 None, 560 (['a', 'b', 'test_a', 'test_b'], ['feature_a', 'feature_b']) 561 ), 562 ( 563 os.path.join('dummy', 'path'), 564 [], 565 ['src_dir_file'], 566 {'src_dir_file': 1000}, 567 { 568 'src_dir_file': ScanPathResult( 569 matches = ['test_b', 'a'], 570 warnings = None, 571 has_registered_test_suites = True, 572 has_run_registered_test_suites = False, 573 has_test_main = True, 574 ztest_suite_names = ['feature_b'] 575 ), 576 }, 577 [ 578 'Found call to \'ztest_register_test_suite()\'' \ 579 ' but no call to \'ztest_run_registered_test_suites()\'' 580 ], 581 TwisterRuntimeError( 582 'Found call to \'ztest_register_test_suite()\'' \ 583 ' but no call to \'ztest_run_registered_test_suites()\'' 584 ), 585 None 586 ), 587 ( 588 os.path.join('dummy', 'path'), 589 [], 590 ['src_dir_file'], 591 {'src_dir_file': 100}, 592 { 593 'src_dir_file': ScanPathResult( 594 matches = ['test_b', 'a'], 595 warnings = 'dummy warning', 596 has_registered_test_suites = True, 597 has_run_registered_test_suites = True, 598 has_test_main = True, 599 ztest_suite_names = ['feature_b'] 600 ), 601 }, 602 ['src_dir_file: dummy warning'], 603 TwisterRuntimeError('src_dir_file: dummy warning'), 604 None 605 ), 606] 607 608 609@pytest.mark.parametrize( 610 'testsuite_path, testsuite_glob, src_dir_glob, sizes, scanpathresults,' \ 611 ' expected_logs, expected_exception, expected', 612 TESTDATA_8, 613 ids=[ 614 'valid', 615 'warning in src dir', 616 'register with run error', 617 ] 618) 619def test_scan_testsuite_path( 620 caplog, 621 testsuite_path, 622 testsuite_glob, 623 src_dir_glob, 624 sizes, 625 scanpathresults, 626 expected_logs, 627 expected_exception, 628 expected 629): 630 src_dir_path = os.path.join(testsuite_path, 'src') 631 632 def mock_fsdp(path, *args, **kwargs): 633 return src_dir_path 634 635 def mock_find(path, *args, **kwargs): 636 if path == src_dir_path: 637 return src_dir_glob 638 elif path == testsuite_path: 639 return testsuite_glob 640 else: 641 return [] 642 643 def mock_sf(filename, *args, **kwargs): 644 if isinstance(scanpathresults[filename], type) and \ 645 issubclass(scanpathresults[filename], Exception): 646 raise scanpathresults[filename]('dummy exception') 647 return scanpathresults[filename] 648 649 def mock_stat(filename, *args, **kwargs): 650 result = mock.Mock() 651 # as we may call os.stat in code 652 # some protection need add here 653 if filename in sizes: 654 type(result).st_size = sizes[filename] 655 656 return result 657 658 with mock.patch('twisterlib.testsuite._find_src_dir_path', mock_fsdp), \ 659 mock.patch('twisterlib.testsuite.find_c_files_in', mock_find), \ 660 mock.patch('twisterlib.testsuite.scan_file', mock_sf), \ 661 mock.patch('os.stat', mock_stat), \ 662 pytest.raises(type(expected_exception)) if \ 663 expected_exception else nullcontext() as exception: 664 result = scan_testsuite_path(testsuite_path) 665 666 assert all( 667 [expected_log in " ".join(caplog.text.split()) \ 668 for expected_log in expected_logs] 669 ) 670 671 if expected_exception: 672 assert str(expected_exception) == str(exception.value) 673 return 674 675 assert len(result[0]) == len(expected[0]) 676 assert all( 677 [expected_subcase in result[0] for expected_subcase in expected[0]] 678 ) 679 assert len(result[1]) == len(expected[1]) 680 assert all( 681 [expected_subcase in result[1] for expected_subcase in expected[1]] 682 ) 683 684 685TESTDATA_9 = [ 686 ('dummy/path', 'dummy/path/src', 'dummy/path/src'), 687 ('dummy/path', 'dummy/src', 'dummy/src'), 688 ('dummy/path', 'another/path', '') 689] 690 691 692@pytest.mark.parametrize( 693 'test_dir_path, isdir_path, expected', 694 TESTDATA_9, 695 ids=['src here', 'src in parent', 'no path'] 696) 697def test_find_src_dir_path(test_dir_path, isdir_path, expected): 698 def mock_isdir(path, *args, **kwargs): 699 return os.path.normpath(path) == isdir_path 700 701 with mock.patch('os.path.isdir', mock_isdir): 702 result = _find_src_dir_path(test_dir_path) 703 704 assert os.path.normpath(result) == expected or result == expected 705 706 707TEST_DATA_REL_PATH = os.path.join( 708 'scripts', 709 'tests', 710 'twister', 711 'test_data', 712 'testsuites' 713) 714 715 716TESTDATA_10 = [ 717 ( 718 ZEPHYR_BASE, 719 ZEPHYR_BASE, 720 'test_a.check_1', 721 { 722 'testcases': ['testcase1', 'testcase2'] 723 }, 724 ['subcase1', 'subcase2'], 725 ['testsuite_a', 'testsuite_b'], 726 [ 727 ('test_a.check_1.testcase1', False), 728 ('test_a.check_1.testcase2', False) 729 ], 730 ), 731 ( 732 ZEPHYR_BASE, 733 ZEPHYR_BASE, 734 'test_a.check_1', 735 {}, 736 ['subcase_repeat', 'subcase_repeat', 'subcase_alone'], 737 ['testsuite_a'], 738 [ 739 ('test_a.check_1.subcase_repeat', False), 740 ('test_a.check_1.subcase_alone', False) 741 ], 742 ), 743 ( 744 ZEPHYR_BASE, 745 ZEPHYR_BASE, 746 'test_a.check_1', 747 {}, 748 [], 749 ['testsuite_a', 'testsuite_a'], 750 [ 751 ('test_a.check_1', True) 752 ], 753 ), 754] 755 756 757@pytest.mark.parametrize( 758 'testsuite_root, suite_path, name, data,' \ 759 ' parsed_subcases, suite_names, expected', 760 TESTDATA_10, 761 ids=['data', 'subcases', 'empty'] 762) 763def test_testsuite_add_subcases( 764 testsuite_root, 765 suite_path, 766 name, 767 data, 768 parsed_subcases, 769 suite_names, 770 expected 771): 772 """ 773 Test to check if the unique name is given 774 for each testsuite root and workdir 775 """ 776 777 suite = TestSuite(testsuite_root, suite_path, name) 778 suite.add_subcases(data, parsed_subcases, suite_names) 779 780 assert sorted(suite.ztest_suite_names) == sorted(suite_names) 781 782 assert len(suite.testcases) == len(expected) 783 for testcase in suite.testcases: 784 for expected_value in expected: 785 if expected_value[0] == testcase.name and \ 786 expected_value[1] == testcase.freeform: 787 break 788 else: 789 assert False 790 791 792TESTDATA_11 = [ 793# ( 794# ZEPHYR_BASE, 795# ZEPHYR_BASE, 796# 'test_a.check_1', 797# { 798# 'testcases': ['testcase1', 'testcase2'] 799# }, 800# [], 801# ), 802 ( 803 ZEPHYR_BASE, 804 ZEPHYR_BASE, 805 'test_a.check_1', 806 { 807 'testcases': ['testcase1', 'testcase2'], 808 'harness': 'console', 809 'harness_config': { 'dummy': 'config' } 810 }, 811 [ 812 ('harness', 'console'), 813 ('harness_config', { 'dummy': 'config' }) 814 ], 815 ), 816# ( 817# ZEPHYR_BASE, 818# ZEPHYR_BASE, 819# 'test_a.check_1', 820# { 821# 'harness': 'console' 822# }, 823# Exception, 824# ) 825] 826 827 828@pytest.mark.parametrize( 829 'testsuite_root, suite_path, name, data, expected', 830 TESTDATA_11, 831 ids=[ 832# 'no harness', 833 'proper harness', 834# 'harness error' 835 ] 836) 837def test_testsuite_load( 838 testsuite_root, 839 suite_path, 840 name, 841 data, 842 expected 843): 844 suite = TestSuite(testsuite_root, suite_path, name) 845 846 with pytest.raises(expected) if \ 847 isinstance(expected, type) and issubclass(expected, Exception) \ 848 else nullcontext() as exception: 849 suite.load(data) 850 851 if exception: 852 assert str(exception.value) == 'Harness config error: console harness' \ 853 ' defined without a configuration.' 854 return 855 856 for attr_name, value in expected: 857 assert getattr(suite, attr_name) == value 858 859 860def test_testcase_dunders(): 861 case_lesser = TestCase(name='A lesser name') 862 case_greater = TestCase(name='a greater name') 863 case_greater.status = TwisterStatus.FAIL 864 865 assert case_lesser < case_greater 866 assert str(case_greater) == 'a greater name' 867 assert repr(case_greater) == f'<TestCase a greater name with {str(TwisterStatus.FAIL)}>' 868 869 870TESTDATA_11 = [ 871 ( 872 ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites', 873 ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites/tests/test_a', 874 'test_a.check_1', 875 'test_a.check_1' 876 ), 877 ( 878 ZEPHYR_BASE, 879 ZEPHYR_BASE, 880 'test_a.check_1', 881 'test_a.check_1' 882 ), 883 ( 884 ZEPHYR_BASE, 885 ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites/test_b', 886 'test_b.check_1', 887 'test_b.check_1' 888 ), 889 ( 890 os.path.join(ZEPHYR_BASE, 'scripts/tests'), 891 os.path.join(ZEPHYR_BASE, 'scripts/tests'), 892 'test_b.check_1', 893 'test_b.check_1' 894 ), 895 ( 896 ZEPHYR_BASE, 897 ZEPHYR_BASE, 898 'test_a.check_1.check_2', 899 'test_a.check_1.check_2' 900 ), 901] 902@pytest.mark.parametrize("testsuite_root, suite_path, name, expected", TESTDATA_11) 903def test_get_no_detailed_test_id(testsuite_root, suite_path, name, expected): 904 '''Test to check if the name without path is given for each testsuite''' 905 suite = TestSuite(testsuite_root, suite_path, name, detailed_test_id=False) 906 print(suite.name) 907 assert suite.name == expected 908