1#!/usr/bin/env python3 2# vim: set syntax=python ts=4 : 3# 4# Copyright (c) 2018 Intel Corporation 5# SPDX-License-Identifier: Apache-2.0 6 7import json 8import logging 9import os 10import string 11import xml.etree.ElementTree as ET 12from datetime import datetime 13from enum import Enum 14from pathlib import Path 15 16from colorama import Fore 17from twisterlib.statuses import TwisterStatus 18 19logger = logging.getLogger('twister') 20logger.setLevel(logging.DEBUG) 21 22 23class ReportStatus(str, Enum): 24 def __str__(self): 25 return str(self.value) 26 27 ERROR = 'error' 28 FAIL = 'failure' 29 SKIP = 'skipped' 30 31 32class ReportingJSONEncoder(json.JSONEncoder): 33 def default(self, obj): 34 if isinstance(obj, Path): 35 return str(obj) 36 return super().default(obj) 37 38 39class Reporting: 40 41 json_filters = { 42 'twister.json': { 43 'deny_suite': ['footprint'] 44 }, 45 'footprint.json': { 46 'deny_status': ['FILTER'], 47 'deny_suite': ['testcases', 'execution_time', 'recording', 'retries', 'runnable'] 48 } 49 } 50 51 def __init__(self, plan, env) -> None: 52 self.plan = plan #FIXME 53 self.instances = plan.instances 54 self.platforms = plan.platforms 55 self.selected_platforms = plan.selected_platforms 56 self.env = env 57 self.timestamp = datetime.now().isoformat() 58 self.outdir = os.path.abspath(env.options.outdir) 59 self.instance_fail_count = plan.instance_fail_count 60 self.footprint = None 61 self.coverage_status = None 62 63 64 @staticmethod 65 def process_log(log_file): 66 filtered_string = "" 67 if os.path.exists(log_file): 68 with open(log_file, "rb") as f: 69 log = f.read().decode("utf-8") 70 filtered_string = ''.join(filter(lambda x: x in string.printable, log)) 71 72 return filtered_string 73 74 75 @staticmethod 76 def xunit_testcase( 77 eleTestsuite, 78 name, 79 classname, 80 status: TwisterStatus, 81 ts_status: TwisterStatus, 82 reason, 83 duration, 84 runnable, 85 stats, 86 log, 87 build_only_as_skip 88 ): 89 fails, passes, errors, skips = stats 90 91 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 92 duration = 0 93 94 eleTestcase = ET.SubElement( 95 eleTestsuite, "testcase", 96 classname=classname, 97 name=f"{name}", 98 time=f"{duration}") 99 100 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 101 skips += 1 102 # temporarily add build_only_as_skip to restore existing CI report behaviour 103 if ts_status == TwisterStatus.PASS and not runnable: 104 tc_type = "build" 105 else: 106 tc_type = status 107 ET.SubElement(eleTestcase, ReportStatus.SKIP, type=f"{tc_type}", message=f"{reason}") 108 elif status in [TwisterStatus.FAIL, TwisterStatus.BLOCK]: 109 fails += 1 110 el = ET.SubElement(eleTestcase, ReportStatus.FAIL, type="failure", message=f"{reason}") 111 if log: 112 el.text = log 113 elif status == TwisterStatus.ERROR: 114 errors += 1 115 el = ET.SubElement(eleTestcase, ReportStatus.ERROR, type="failure", message=f"{reason}") 116 if log: 117 el.text = log 118 elif status == TwisterStatus.PASS: 119 passes += 1 120 elif status == TwisterStatus.NOTRUN: 121 if build_only_as_skip: 122 ET.SubElement(eleTestcase, ReportStatus.SKIP, type="build", message="built only") 123 skips += 1 124 else: 125 passes += 1 126 else: 127 if status == TwisterStatus.NONE: 128 logger.debug(f"{name}: No status") 129 ET.SubElement( 130 eleTestcase, 131 ReportStatus.SKIP, 132 type="untested", 133 message="No results captured, testsuite misconfiguration?" 134 ) 135 else: 136 logger.error(f"{name}: Unknown status '{status}'") 137 138 return (fails, passes, errors, skips) 139 140 # Generate a report with all testsuites instead of doing this per platform 141 def xunit_report_suites(self, json_file, filename): 142 143 json_data = {} 144 with open(json_file) as json_results: 145 json_data = json.load(json_results) 146 147 148 env = json_data.get('environment', {}) 149 version = env.get('zephyr_version', None) 150 151 eleTestsuites = ET.Element('testsuites') 152 all_suites = json_data.get("testsuites", []) 153 154 suites_to_report = all_suites 155 # do not create entry if everything is filtered out 156 if not self.env.options.detailed_skipped_report: 157 suites_to_report = list( 158 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, all_suites) 159 ) 160 161 for suite in suites_to_report: 162 duration = 0 163 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 164 name=suite.get("name"), time="0", 165 timestamp = self.timestamp, 166 tests="0", 167 failures="0", 168 errors="0", skipped="0") 169 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 170 # Multiple 'property' can be added to 'properties' 171 # differing by name and value 172 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 173 ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform")) 174 ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch")) 175 176 total = 0 177 fails = passes = errors = skips = 0 178 handler_time = suite.get('execution_time', 0) 179 runnable = suite.get('runnable', 0) 180 duration += float(handler_time) 181 ts_status = TwisterStatus(suite.get('status')) 182 classname = Path(suite.get("name","")).name 183 for tc in suite.get("testcases", []): 184 status = TwisterStatus(tc.get('status')) 185 reason = tc.get('reason', suite.get('reason', 'Unknown')) 186 log = tc.get("log", suite.get("log")) 187 188 tc_duration = tc.get('execution_time', handler_time) 189 name = tc.get("identifier") 190 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 191 name, classname, status, ts_status, reason, tc_duration, runnable, 192 (fails, passes, errors, skips), log, True) 193 194 total = errors + passes + fails + skips 195 196 eleTestsuite.attrib['time'] = f"{duration}" 197 eleTestsuite.attrib['failures'] = f"{fails}" 198 eleTestsuite.attrib['errors'] = f"{errors}" 199 eleTestsuite.attrib['skipped'] = f"{skips}" 200 eleTestsuite.attrib['tests'] = f"{total}" 201 202 ET.indent(eleTestsuites, space="\t", level=0) 203 result = ET.tostring(eleTestsuites) 204 with open(filename, 'wb') as report: 205 report.write(result) 206 207 def xunit_report(self, json_file, filename, selected_platform=None, full_report=False): 208 if selected_platform: 209 selected = [selected_platform] 210 logger.info(f"Writing target report for {selected_platform}...") 211 else: 212 logger.info(f"Writing xunit report {filename}...") 213 selected = self.selected_platforms 214 215 json_data = {} 216 with open(json_file) as json_results: 217 json_data = json.load(json_results) 218 219 220 env = json_data.get('environment', {}) 221 version = env.get('zephyr_version', None) 222 223 eleTestsuites = ET.Element('testsuites') 224 all_suites = json_data.get("testsuites", []) 225 226 for platform in selected: 227 suites = list(filter(lambda d: d['platform'] == platform, all_suites)) 228 # do not create entry if everything is filtered out 229 if not self.env.options.detailed_skipped_report: 230 non_filtered = list( 231 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, suites) 232 ) 233 if not non_filtered: 234 continue 235 236 duration = 0 237 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 238 name=platform, 239 timestamp = self.timestamp, 240 time="0", 241 tests="0", 242 failures="0", 243 errors="0", skipped="0") 244 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 245 # Multiple 'property' can be added to 'properties' 246 # differing by name and value 247 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 248 249 total = 0 250 fails = passes = errors = skips = 0 251 for ts in suites: 252 handler_time = ts.get('execution_time', 0) 253 runnable = ts.get('runnable', 0) 254 duration += float(handler_time) 255 256 ts_status = TwisterStatus(ts.get('status')) 257 # Do not report filtered testcases 258 if ( 259 ts_status == TwisterStatus.FILTER 260 and not self.env.options.detailed_skipped_report 261 ): 262 continue 263 if full_report: 264 classname = Path(ts.get("name","")).name 265 for tc in ts.get("testcases", []): 266 status = TwisterStatus(tc.get('status')) 267 reason = tc.get('reason', ts.get('reason', 'Unknown')) 268 log = tc.get("log", ts.get("log")) 269 270 tc_duration = tc.get('execution_time', handler_time) 271 name = tc.get("identifier") 272 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 273 name, classname, status, ts_status, reason, tc_duration, runnable, 274 (fails, passes, errors, skips), log, True) 275 else: 276 reason = ts.get('reason', 'Unknown') 277 name = ts.get("name") 278 classname = f"{platform}:{name}" 279 log = ts.get("log") 280 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 281 name, classname, ts_status, ts_status, reason, handler_time, runnable, 282 (fails, passes, errors, skips), log, False) 283 284 total = errors + passes + fails + skips 285 286 eleTestsuite.attrib['time'] = f"{duration}" 287 eleTestsuite.attrib['failures'] = f"{fails}" 288 eleTestsuite.attrib['errors'] = f"{errors}" 289 eleTestsuite.attrib['skipped'] = f"{skips}" 290 eleTestsuite.attrib['tests'] = f"{total}" 291 292 ET.indent(eleTestsuites, space="\t", level=0) 293 result = ET.tostring(eleTestsuites) 294 with open(filename, 'wb') as report: 295 report.write(result) 296 297 def json_report(self, filename, version="NA", platform=None, filters=None): 298 logger.info(f"Writing JSON report {filename}") 299 300 if self.env.options.report_all_options: 301 report_options = vars(self.env.options) 302 else: 303 report_options = self.env.non_default_options() 304 305 report = {} 306 report["environment"] = {"os": os.name, 307 "zephyr_version": version, 308 "toolchain": self.env.toolchain, 309 "commit_date": self.env.commit_date, 310 "run_date": self.env.run_date, 311 "options": report_options 312 } 313 suites = [] 314 315 for instance in self.instances.values(): 316 if platform and platform != instance.platform.name: 317 continue 318 if instance.status == TwisterStatus.FILTER and not self.env.options.report_filtered: 319 continue 320 if (filters and 'allow_status' in filters and \ 321 instance.status not in [TwisterStatus[s] for s in filters['allow_status']]): 322 logger.debug( 323 f"Skip test suite '{instance.testsuite.name}'" 324 f" status '{instance.status}' not allowed for {filename}" 325 ) 326 continue 327 if (filters and 'deny_status' in filters and \ 328 instance.status in [TwisterStatus[s] for s in filters['deny_status']]): 329 logger.debug( 330 f"Skip test suite '{instance.testsuite.name}'" 331 f" status '{instance.status}' denied for {filename}" 332 ) 333 continue 334 suite = {} 335 handler_log = os.path.join(instance.build_dir, "handler.log") 336 pytest_log = os.path.join(instance.build_dir, "twister_harness.log") 337 build_log = os.path.join(instance.build_dir, "build.log") 338 device_log = os.path.join(instance.build_dir, "device.log") 339 340 handler_time = instance.metrics.get('handler_time', 0) 341 used_ram = instance.metrics.get ("used_ram", 0) 342 used_rom = instance.metrics.get("used_rom",0) 343 available_ram = instance.metrics.get("available_ram", 0) 344 available_rom = instance.metrics.get("available_rom", 0) 345 suite = { 346 "name": instance.testsuite.name, 347 "arch": instance.platform.arch, 348 "platform": instance.platform.name, 349 "path": instance.testsuite.source_dir_rel 350 } 351 if instance.run_id: 352 suite['run_id'] = instance.run_id 353 354 suite["runnable"] = False 355 if instance.status != TwisterStatus.FILTER: 356 suite["runnable"] = instance.run 357 358 if used_ram: 359 suite["used_ram"] = used_ram 360 if used_rom: 361 suite["used_rom"] = used_rom 362 363 suite['retries'] = instance.retries 364 if instance.toolchain: 365 suite['toolchain'] = instance.toolchain 366 367 if instance.dut: 368 suite["dut"] = instance.dut 369 if available_ram: 370 suite["available_ram"] = available_ram 371 if available_rom: 372 suite["available_rom"] = available_rom 373 if instance.status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 374 suite['status'] = instance.status 375 # FIXME 376 if os.path.exists(pytest_log): 377 suite["log"] = self.process_log(pytest_log) 378 elif os.path.exists(handler_log): 379 suite["log"] = self.process_log(handler_log) 380 elif os.path.exists(device_log): 381 suite["log"] = self.process_log(device_log) 382 else: 383 suite["log"] = self.process_log(build_log) 384 385 suite["reason"] = self.get_detailed_reason(instance.reason, suite["log"]) 386 # update the reason to get more details also in other reports (e.g. junit) 387 # where build log is not available 388 instance.reason = suite["reason"] 389 elif instance.status == TwisterStatus.FILTER: 390 suite["status"] = TwisterStatus.FILTER 391 suite["reason"] = instance.reason 392 elif instance.status == TwisterStatus.PASS: 393 suite["status"] = TwisterStatus.PASS 394 elif instance.status == TwisterStatus.SKIP: 395 suite["status"] = TwisterStatus.SKIP 396 suite["reason"] = instance.reason 397 elif instance.status == TwisterStatus.NOTRUN: 398 suite["status"] = TwisterStatus.NOTRUN 399 suite["reason"] = instance.reason 400 else: 401 suite["status"] = TwisterStatus.NONE 402 suite["reason"] = 'Unknown Instance status.' 403 404 if instance.status != TwisterStatus.NONE: 405 suite["execution_time"] = f"{float(handler_time):.2f}" 406 suite["build_time"] = f"{float(instance.build_time):.2f}" 407 408 testcases = [] 409 410 if len(instance.testcases) == 1: 411 single_case_duration = f"{float(handler_time):.2f}" 412 else: 413 single_case_duration = 0 414 415 for case in instance.testcases: 416 # freeform was set when no sub testcases were parsed, however, 417 # if we discover those at runtime, the fallback testcase wont be 418 # needed anymore and can be removed from the output, it does 419 # not have a status and would otherwise be reported as skipped. 420 if ( 421 case.freeform 422 and case.status == TwisterStatus.NONE 423 and len(instance.testcases) > 1 424 ): 425 continue 426 testcase = {} 427 testcase['identifier'] = case.name 428 if instance.status != TwisterStatus.NONE: 429 if single_case_duration: 430 testcase['execution_time'] = single_case_duration 431 else: 432 testcase['execution_time'] = f"{float(case.duration):.2f}" 433 434 if case.output != "": 435 testcase['log'] = case.output 436 437 if case.status == TwisterStatus.SKIP: 438 if instance.status == TwisterStatus.FILTER: 439 testcase["status"] = TwisterStatus.FILTER 440 else: 441 testcase["status"] = TwisterStatus.SKIP 442 testcase["reason"] = case.reason or instance.reason 443 else: 444 testcase["status"] = case.status 445 if case.reason: 446 testcase["reason"] = case.reason 447 448 testcases.append(testcase) 449 450 suite['testcases'] = testcases 451 452 if instance.recording is not None: 453 suite['recording'] = instance.recording 454 455 if ( 456 instance.status not in [ 457 TwisterStatus.NONE, 458 TwisterStatus.ERROR, 459 TwisterStatus.FILTER 460 ] 461 and self.env.options.create_rom_ram_report 462 and self.env.options.footprint_report is not None 463 ): 464 # Init as empty data preparing for filtering properties. 465 suite['footprint'] = {} 466 467 # Pass suite properties through the context filters. 468 if filters and 'allow_suite' in filters: 469 suite = {k:v for k,v in suite.items() if k in filters['allow_suite']} 470 471 if filters and 'deny_suite' in filters: 472 suite = {k:v for k,v in suite.items() if k not in filters['deny_suite']} 473 474 # Compose external data only to these properties which pass filtering. 475 if 'footprint' in suite: 476 do_all = 'all' in self.env.options.footprint_report 477 footprint_files = { 'ROM': 'rom.json', 'RAM': 'ram.json' } 478 for k,v in footprint_files.items(): 479 if do_all or k in self.env.options.footprint_report: 480 footprint_fname = os.path.join(instance.build_dir, v) 481 try: 482 with open(footprint_fname) as footprint_json: 483 logger.debug(f"Collect footprint.{k} for '{instance.name}'") 484 suite['footprint'][k] = json.load(footprint_json) 485 except FileNotFoundError: 486 logger.error(f"Missing footprint.{k} for '{instance.name}'") 487 # 488 # 489 490 suites.append(suite) 491 492 report["testsuites"] = suites 493 with open(filename, 'w') as json_file: 494 json.dump(report, json_file, indent=4, separators=(',',':'), cls=ReportingJSONEncoder) 495 496 497 def compare_metrics(self, filename): 498 # name, datatype, lower results better 499 interesting_metrics = [("used_ram", int, True), 500 ("used_rom", int, True)] 501 502 if not os.path.exists(filename): 503 logger.error(f"Cannot compare metrics, {filename} not found") 504 return [] 505 506 results = [] 507 saved_metrics = {} 508 with open(filename) as fp: 509 jt = json.load(fp) 510 for ts in jt.get("testsuites", []): 511 d = {} 512 for m, _, _ in interesting_metrics: 513 d[m] = ts.get(m, 0) 514 ts_name = ts.get('name') 515 ts_platform = ts.get('platform') 516 saved_metrics[(ts_name, ts_platform)] = d 517 518 for instance in self.instances.values(): 519 mkey = (instance.testsuite.name, instance.platform.name) 520 if mkey not in saved_metrics: 521 continue 522 sm = saved_metrics[mkey] 523 for metric, mtype, lower_better in interesting_metrics: 524 if metric not in instance.metrics: 525 continue 526 if sm[metric] == "": 527 continue 528 delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) 529 if delta == 0: 530 continue 531 results.append((instance, metric, instance.metrics.get(metric, 0), delta, 532 lower_better)) 533 return results 534 535 def footprint_reports(self, report, show_footprint, all_deltas, 536 footprint_threshold, last_metrics): 537 if not report: 538 return 539 540 logger.debug("running footprint_reports") 541 deltas = self.compare_metrics(report) 542 warnings = 0 543 if deltas: 544 for i, metric, value, delta, lower_better in deltas: 545 if not all_deltas and ((delta < 0 and lower_better) or 546 (delta > 0 and not lower_better)): 547 continue 548 549 percentage = 0 550 if value > delta: 551 percentage = (float(delta) / float(value - delta)) 552 553 if not all_deltas and (percentage < (footprint_threshold / 100.0)): 554 continue 555 556 if show_footprint: 557 logger.log( 558 logging.INFO if all_deltas else logging.WARNING, 559 f"{i.platform.name:<25} {i.testsuite.name:<60} {metric} {delta:<+4}," 560 f" is now {value:6} {percentage:+.2%}" 561 ) 562 563 warnings += 1 564 565 if warnings: 566 logger.warning("Found {} footprint deltas to {} as a baseline.".format( 567 warnings, 568 (report if not last_metrics else "the last twister run."))) 569 570 def synopsis(self): 571 if self.env.options.report_summary == 0: 572 count = self.instance_fail_count 573 log_txt = f"The following issues were found (showing the all {count} items):" 574 elif self.env.options.report_summary: 575 count = self.env.options.report_summary 576 log_txt = "The following issues were found " 577 if count > self.instance_fail_count: 578 log_txt += ( 579 f"(presenting {self.instance_fail_count} out of the {count} items requested):" 580 ) 581 else: 582 log_txt += f"(showing the {count} of {self.instance_fail_count} items):" 583 else: 584 count = 10 585 log_txt = f"The following issues were found (showing the top {count} items):" 586 cnt = 0 587 example_instance = None 588 detailed_test_id = self.env.options.detailed_test_id 589 for instance in self.instances.values(): 590 if instance.status not in [ 591 TwisterStatus.PASS, 592 TwisterStatus.FILTER, 593 TwisterStatus.SKIP, 594 TwisterStatus.NOTRUN 595 ]: 596 cnt += 1 597 if cnt == 1: 598 logger.info("-+" * 40) 599 logger.info(log_txt) 600 601 status = instance.status 602 if self.env.options.report_summary is not None and \ 603 status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 604 status = Fore.RED + status.upper() + Fore.RESET 605 logger.info( 606 f"{cnt}) {instance.testsuite.name} on {instance.platform.name}" 607 f" {status} ({instance.reason})" 608 ) 609 example_instance = instance 610 if cnt == count: 611 break 612 if cnt == 0 and self.env.options.report_summary is not None: 613 logger.info("-+" * 40) 614 logger.info("No errors/fails found") 615 616 if cnt and example_instance: 617 cwd_rel_path = os.path.relpath(example_instance.testsuite.source_dir, start=os.getcwd()) 618 619 logger.info("") 620 logger.info("To rerun the tests, call twister using the following commandline:") 621 extra_parameters = '' if detailed_test_id else ' --no-detailed-test-id' 622 logger.info(f"west twister -p <PLATFORM> -s <TEST ID>{extra_parameters}, for example:") 623 logger.info("") 624 logger.info( 625 f"west twister -p {example_instance.platform.name}" 626 f" -s {example_instance.testsuite.name}" 627 f"{extra_parameters}" 628 ) 629 logger.info("or with west:") 630 logger.info( 631 f"west build -p -b {example_instance.platform.name} {cwd_rel_path}" 632 f" -T {example_instance.testsuite.id}" 633 ) 634 logger.info("-+" * 40) 635 636 def summary(self, results, ignore_unrecognized_sections, duration): 637 failed = 0 638 run = 0 639 for instance in self.instances.values(): 640 if instance.status == TwisterStatus.FAIL: 641 failed += 1 642 elif not ignore_unrecognized_sections and instance.metrics.get("unrecognized"): 643 logger.error( 644 f"{Fore.RED}FAILED{Fore.RESET}:" 645 f" {instance.name} has unrecognized binary sections:" 646 f" {instance.metrics.get('unrecognized', [])!s}" 647 ) 648 failed += 1 649 650 # FIXME: need a better way to identify executed tests 651 handler_time = instance.metrics.get('handler_time', 0) 652 if float(handler_time) > 0: 653 run += 1 654 655 if results.total and results.total != results.filtered_configs: 656 pass_rate = (float(results.passed) / float(results.total - results.filtered_configs)) 657 else: 658 pass_rate = 0 659 660 passed_color = ( 661 TwisterStatus.get_color(TwisterStatus.FAIL) 662 if failed 663 else TwisterStatus.get_color(TwisterStatus.PASS) 664 ) 665 unfiltered_configs = results.total - results.filtered_configs 666 notrun_number_section = ( 667 f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' 668 if results.notrun 669 else f'{results.notrun}' 670 ) 671 failed_number_section = ( 672 f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' 673 if results.failed 674 else f'{results.failed}' 675 ) 676 error_number_section = ( 677 f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' 678 if results.error 679 else f'{results.error}' 680 ) 681 warnings_number_section = ( 682 f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}' 683 if (self.plan.warnings + results.warnings) 684 else 'no' 685 ) 686 logger.info( 687 f"{passed_color}{results.passed} of {unfiltered_configs}{Fore.RESET}" 688 f" executed test configurations passed ({pass_rate:.2%})," 689 f" {notrun_number_section} built (not run)," 690 f" {failed_number_section} failed," 691 f" {error_number_section} errored," 692 f" with {warnings_number_section} warnings" 693 f" in {duration:.2f} seconds." 694 ) 695 696 total_platforms = len(self.platforms) 697 filtered_platforms = set( 698 instance.platform.name for instance in self.instances.values() 699 if instance.status not in [ 700 TwisterStatus.FILTER, 701 TwisterStatus.NOTRUN, 702 TwisterStatus.SKIP 703 ] 704 ) 705 # if we are only building, do not report about tests being executed. 706 if self.platforms and not self.env.options.build_only: 707 executed_cases = ( 708 results.cases 709 - results.filtered_cases 710 - results.skipped_cases 711 - results.notrun_cases 712 ) 713 pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \ 714 if executed_cases != 0 else 0 715 platform_rate = (100 * len(filtered_platforms) / len(self.platforms)) 716 blocked_after_comma = ", " + str(results.blocked_cases) + " blocked" 717 failed_after_comma = ", " + str(results.failed_cases) + " failed" 718 error_after_comma = ", " + str(results.error_cases) + " errored" 719 none_after_comma = ", " + str(results.none_cases) + " without a status" 720 logger.info( 721 f'{results.passed_cases} of {executed_cases} executed test cases passed' 722 f' ({pass_rate:02.2f}%)' 723 f'{blocked_after_comma if results.blocked_cases else ""}' 724 f'{failed_after_comma if results.failed_cases else ""}' 725 f'{error_after_comma if results.error_cases else ""}' 726 f'{none_after_comma if results.none_cases else ""}' 727 f' on {len(filtered_platforms)} out of total {total_platforms} platforms' 728 f' ({platform_rate:02.2f}%).' 729 ) 730 if results.skipped_cases or results.notrun_cases: 731 not_executed = results.skipped_cases + results.notrun_cases 732 skipped_after_colon = " " + str(results.skipped_cases) + " skipped" 733 notrun_after_comma = ( 734 (", " if results.skipped_cases else " ") 735 + str(results.notrun_cases) 736 + " not run (built only)" 737 ) 738 logger.info( 739 f'{not_executed} selected test cases not executed:' \ 740 f'{skipped_after_colon if results.skipped_cases else ""}' \ 741 f'{notrun_after_comma if results.notrun_cases else ""}' \ 742 f'.' 743 ) 744 745 built_only = results.total - run - results.filtered_configs 746 logger.info( 747 f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms," 748 f" {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET}" 749 " test configurations were only built." 750 ) 751 752 def save_reports(self, name, suffix, report_dir, no_update, platform_reports): 753 if not self.instances: 754 return 755 756 logger.info("Saving reports...") 757 if name: 758 report_name = name 759 else: 760 report_name = "twister" 761 762 if report_dir: 763 os.makedirs(report_dir, exist_ok=True) 764 filename = os.path.join(report_dir, report_name) 765 outdir = report_dir 766 else: 767 outdir = self.outdir 768 filename = os.path.join(outdir, report_name) 769 770 if suffix: 771 filename = f"{filename}_{suffix}" 772 773 if not no_update: 774 json_file = filename + ".json" 775 self.json_report(json_file, version=self.env.version, 776 filters=self.json_filters['twister.json']) 777 if self.env.options.footprint_report is not None: 778 self.json_report(filename + "_footprint.json", version=self.env.version, 779 filters=self.json_filters['footprint.json']) 780 self.xunit_report(json_file, filename + ".xml", full_report=False) 781 self.xunit_report(json_file, filename + "_report.xml", full_report=True) 782 self.xunit_report_suites(json_file, filename + "_suite_report.xml") 783 784 if platform_reports: 785 self.target_report(json_file, outdir, suffix) 786 787 788 def target_report(self, json_file, outdir, suffix): 789 platforms = {repr(inst.platform):inst.platform for _, inst in self.instances.items()} 790 for platform in platforms.values(): 791 if suffix: 792 filename = os.path.join(outdir,f"{platform.normalized_name}_{suffix}.xml") 793 json_platform_file = os.path.join(outdir,f"{platform.normalized_name}_{suffix}") 794 else: 795 filename = os.path.join(outdir,f"{platform.normalized_name}.xml") 796 json_platform_file = os.path.join(outdir, platform.normalized_name) 797 self.xunit_report(json_file, filename, platform.name, full_report=True) 798 self.json_report(json_platform_file + ".json", 799 version=self.env.version, platform=platform.name, 800 filters=self.json_filters['twister.json']) 801 if self.env.options.footprint_report is not None: 802 self.json_report(json_platform_file + "_footprint.json", 803 version=self.env.version, platform=platform.name, 804 filters=self.json_filters['footprint.json']) 805 806 def get_detailed_reason(self, reason: str, log: str) -> str: 807 if reason == 'CMake build failure': 808 if error_key := self._parse_cmake_build_failure(log): 809 return f"{reason} - {error_key}" 810 elif reason == 'Build failure': # noqa SIM102 811 if error_key := self._parse_build_failure(log): 812 return f"{reason} - {error_key}" 813 return reason 814 815 @staticmethod 816 def _parse_cmake_build_failure(log: str) -> str | None: 817 last_warning = 'no warning found' 818 lines = log.splitlines() 819 for i, line in enumerate(lines): 820 if "warning: " in line: 821 last_warning = line 822 elif "devicetree error: " in line: 823 return "devicetree error" 824 elif "fatal error: " in line: 825 return line[line.index('fatal error: ') :].strip() 826 elif "error: " in line: # error: Aborting due to Kconfig warnings 827 if "undefined symbol" in last_warning: 828 return last_warning[last_warning.index('undefined symbol') :].strip() 829 return last_warning 830 elif "CMake Error at" in line: 831 for next_line in lines[i + 1 :]: 832 if next_line.strip(): 833 return line + ' ' + next_line 834 return line 835 return None 836 837 @staticmethod 838 def _parse_build_failure(log: str) -> str | None: 839 last_warning = '' 840 lines = log.splitlines() 841 for i, line in enumerate(lines): 842 if "undefined reference" in line: 843 return line[line.index('undefined reference') :].strip() 844 elif "error: ld returned" in line: 845 if last_warning: 846 return last_warning 847 elif "overflowed by" in lines[i - 1]: 848 return "ld.bfd: region overflowed" 849 elif "ld.bfd: warning: " in lines[i - 1]: 850 return "ld.bfd:" + lines[i - 1].split("ld.bfd:", 1)[-1] 851 return line 852 elif "error: " in line: 853 return line[line.index('error: ') :].strip() 854 elif ": in function " in line: 855 last_warning = line[line.index('in function') :].strip() 856 return None 857