1#!/usr/bin/env python3 2# vim: set syntax=python ts=4 : 3# 4# Copyright (c) 2018 Intel Corporation 5# SPDX-License-Identifier: Apache-2.0 6 7import json 8import logging 9import os 10import string 11import xml.etree.ElementTree as ET 12from datetime import datetime 13from enum import Enum 14from pathlib import Path, PosixPath 15 16from colorama import Fore 17from twisterlib.statuses import TwisterStatus 18 19logger = logging.getLogger('twister') 20logger.setLevel(logging.DEBUG) 21 22 23class ReportStatus(str, Enum): 24 def __str__(self): 25 return str(self.value) 26 27 ERROR = 'error' 28 FAIL = 'failure' 29 SKIP = 'skipped' 30 31 32class Reporting: 33 34 json_filters = { 35 'twister.json': { 36 'deny_suite': ['footprint'] 37 }, 38 'footprint.json': { 39 'deny_status': ['FILTER'], 40 'deny_suite': ['testcases', 'execution_time', 'recording', 'retries', 'runnable'] 41 } 42 } 43 44 def __init__(self, plan, env) -> None: 45 self.plan = plan #FIXME 46 self.instances = plan.instances 47 self.platforms = plan.platforms 48 self.selected_platforms = plan.selected_platforms 49 self.env = env 50 self.timestamp = datetime.now().isoformat() 51 self.outdir = os.path.abspath(env.options.outdir) 52 self.instance_fail_count = plan.instance_fail_count 53 self.footprint = None 54 55 56 @staticmethod 57 def process_log(log_file): 58 filtered_string = "" 59 if os.path.exists(log_file): 60 with open(log_file, "rb") as f: 61 log = f.read().decode("utf-8") 62 filtered_string = ''.join(filter(lambda x: x in string.printable, log)) 63 64 return filtered_string 65 66 67 @staticmethod 68 def xunit_testcase( 69 eleTestsuite, 70 name, 71 classname, 72 status: TwisterStatus, 73 ts_status: TwisterStatus, 74 reason, 75 duration, 76 runnable, 77 stats, 78 log, 79 build_only_as_skip 80 ): 81 fails, passes, errors, skips = stats 82 83 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 84 duration = 0 85 86 eleTestcase = ET.SubElement( 87 eleTestsuite, "testcase", 88 classname=classname, 89 name=f"{name}", 90 time=f"{duration}") 91 92 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 93 skips += 1 94 # temporarily add build_only_as_skip to restore existing CI report behaviour 95 if ts_status == TwisterStatus.PASS and not runnable: 96 tc_type = "build" 97 else: 98 tc_type = status 99 ET.SubElement(eleTestcase, ReportStatus.SKIP, type=f"{tc_type}", message=f"{reason}") 100 elif status in [TwisterStatus.FAIL, TwisterStatus.BLOCK]: 101 fails += 1 102 el = ET.SubElement(eleTestcase, ReportStatus.FAIL, type="failure", message=f"{reason}") 103 if log: 104 el.text = log 105 elif status == TwisterStatus.ERROR: 106 errors += 1 107 el = ET.SubElement(eleTestcase, ReportStatus.ERROR, type="failure", message=f"{reason}") 108 if log: 109 el.text = log 110 elif status == TwisterStatus.PASS: 111 passes += 1 112 elif status == TwisterStatus.NOTRUN: 113 if build_only_as_skip: 114 ET.SubElement(eleTestcase, ReportStatus.SKIP, type="build", message="built only") 115 skips += 1 116 else: 117 passes += 1 118 else: 119 if status == TwisterStatus.NONE: 120 logger.debug(f"{name}: No status") 121 ET.SubElement( 122 eleTestcase, 123 ReportStatus.SKIP, 124 type="untested", 125 message="No results captured, testsuite misconfiguration?" 126 ) 127 else: 128 logger.error(f"{name}: Unknown status '{status}'") 129 130 return (fails, passes, errors, skips) 131 132 # Generate a report with all testsuites instead of doing this per platform 133 def xunit_report_suites(self, json_file, filename): 134 135 json_data = {} 136 with open(json_file) as json_results: 137 json_data = json.load(json_results) 138 139 140 env = json_data.get('environment', {}) 141 version = env.get('zephyr_version', None) 142 143 eleTestsuites = ET.Element('testsuites') 144 all_suites = json_data.get("testsuites", []) 145 146 suites_to_report = all_suites 147 # do not create entry if everything is filtered out 148 if not self.env.options.detailed_skipped_report: 149 suites_to_report = list( 150 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, all_suites) 151 ) 152 153 for suite in suites_to_report: 154 duration = 0 155 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 156 name=suite.get("name"), time="0", 157 timestamp = self.timestamp, 158 tests="0", 159 failures="0", 160 errors="0", skipped="0") 161 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 162 # Multiple 'property' can be added to 'properties' 163 # differing by name and value 164 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 165 ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform")) 166 ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch")) 167 168 total = 0 169 fails = passes = errors = skips = 0 170 handler_time = suite.get('execution_time', 0) 171 runnable = suite.get('runnable', 0) 172 duration += float(handler_time) 173 ts_status = TwisterStatus(suite.get('status')) 174 classname = Path(suite.get("name","")).name 175 for tc in suite.get("testcases", []): 176 status = TwisterStatus(tc.get('status')) 177 reason = tc.get('reason', suite.get('reason', 'Unknown')) 178 log = tc.get("log", suite.get("log")) 179 180 tc_duration = tc.get('execution_time', handler_time) 181 name = tc.get("identifier") 182 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 183 name, classname, status, ts_status, reason, tc_duration, runnable, 184 (fails, passes, errors, skips), log, True) 185 186 total = errors + passes + fails + skips 187 188 eleTestsuite.attrib['time'] = f"{duration}" 189 eleTestsuite.attrib['failures'] = f"{fails}" 190 eleTestsuite.attrib['errors'] = f"{errors}" 191 eleTestsuite.attrib['skipped'] = f"{skips}" 192 eleTestsuite.attrib['tests'] = f"{total}" 193 194 ET.indent(eleTestsuites, space="\t", level=0) 195 result = ET.tostring(eleTestsuites) 196 with open(filename, 'wb') as report: 197 report.write(result) 198 199 def xunit_report(self, json_file, filename, selected_platform=None, full_report=False): 200 if selected_platform: 201 selected = [selected_platform] 202 logger.info(f"Writing target report for {selected_platform}...") 203 else: 204 logger.info(f"Writing xunit report {filename}...") 205 selected = self.selected_platforms 206 207 json_data = {} 208 with open(json_file) as json_results: 209 json_data = json.load(json_results) 210 211 212 env = json_data.get('environment', {}) 213 version = env.get('zephyr_version', None) 214 215 eleTestsuites = ET.Element('testsuites') 216 all_suites = json_data.get("testsuites", []) 217 218 for platform in selected: 219 suites = list(filter(lambda d: d['platform'] == platform, all_suites)) 220 # do not create entry if everything is filtered out 221 if not self.env.options.detailed_skipped_report: 222 non_filtered = list( 223 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, suites) 224 ) 225 if not non_filtered: 226 continue 227 228 duration = 0 229 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 230 name=platform, 231 timestamp = self.timestamp, 232 time="0", 233 tests="0", 234 failures="0", 235 errors="0", skipped="0") 236 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 237 # Multiple 'property' can be added to 'properties' 238 # differing by name and value 239 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 240 241 total = 0 242 fails = passes = errors = skips = 0 243 for ts in suites: 244 handler_time = ts.get('execution_time', 0) 245 runnable = ts.get('runnable', 0) 246 duration += float(handler_time) 247 248 ts_status = TwisterStatus(ts.get('status')) 249 # Do not report filtered testcases 250 if ( 251 ts_status == TwisterStatus.FILTER 252 and not self.env.options.detailed_skipped_report 253 ): 254 continue 255 if full_report: 256 classname = Path(ts.get("name","")).name 257 for tc in ts.get("testcases", []): 258 status = TwisterStatus(tc.get('status')) 259 reason = tc.get('reason', ts.get('reason', 'Unknown')) 260 log = tc.get("log", ts.get("log")) 261 262 tc_duration = tc.get('execution_time', handler_time) 263 name = tc.get("identifier") 264 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 265 name, classname, status, ts_status, reason, tc_duration, runnable, 266 (fails, passes, errors, skips), log, True) 267 else: 268 reason = ts.get('reason', 'Unknown') 269 name = ts.get("name") 270 classname = f"{platform}:{name}" 271 log = ts.get("log") 272 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 273 name, classname, ts_status, ts_status, reason, handler_time, runnable, 274 (fails, passes, errors, skips), log, False) 275 276 total = errors + passes + fails + skips 277 278 eleTestsuite.attrib['time'] = f"{duration}" 279 eleTestsuite.attrib['failures'] = f"{fails}" 280 eleTestsuite.attrib['errors'] = f"{errors}" 281 eleTestsuite.attrib['skipped'] = f"{skips}" 282 eleTestsuite.attrib['tests'] = f"{total}" 283 284 ET.indent(eleTestsuites, space="\t", level=0) 285 result = ET.tostring(eleTestsuites) 286 with open(filename, 'wb') as report: 287 report.write(result) 288 289 def json_report(self, filename, version="NA", platform=None, filters=None): 290 logger.info(f"Writing JSON report {filename}") 291 292 if self.env.options.report_all_options: 293 report_options = vars(self.env.options) 294 else: 295 report_options = self.env.non_default_options() 296 297 # Resolve known JSON serialization problems. 298 for k,v in report_options.items(): 299 report_options[k] = str(v) if type(v) in [PosixPath] else v 300 301 report = {} 302 report["environment"] = {"os": os.name, 303 "zephyr_version": version, 304 "toolchain": self.env.toolchain, 305 "commit_date": self.env.commit_date, 306 "run_date": self.env.run_date, 307 "options": report_options 308 } 309 suites = [] 310 311 for instance in self.instances.values(): 312 if platform and platform != instance.platform.name: 313 continue 314 if instance.status == TwisterStatus.FILTER and not self.env.options.report_filtered: 315 continue 316 if (filters and 'allow_status' in filters and \ 317 instance.status not in [TwisterStatus[s] for s in filters['allow_status']]): 318 logger.debug( 319 f"Skip test suite '{instance.testsuite.name}'" 320 f" status '{instance.status}' not allowed for {filename}" 321 ) 322 continue 323 if (filters and 'deny_status' in filters and \ 324 instance.status in [TwisterStatus[s] for s in filters['deny_status']]): 325 logger.debug( 326 f"Skip test suite '{instance.testsuite.name}'" 327 f" status '{instance.status}' denied for {filename}" 328 ) 329 continue 330 suite = {} 331 handler_log = os.path.join(instance.build_dir, "handler.log") 332 pytest_log = os.path.join(instance.build_dir, "twister_harness.log") 333 build_log = os.path.join(instance.build_dir, "build.log") 334 device_log = os.path.join(instance.build_dir, "device.log") 335 336 handler_time = instance.metrics.get('handler_time', 0) 337 used_ram = instance.metrics.get ("used_ram", 0) 338 used_rom = instance.metrics.get("used_rom",0) 339 available_ram = instance.metrics.get("available_ram", 0) 340 available_rom = instance.metrics.get("available_rom", 0) 341 suite = { 342 "name": instance.testsuite.name, 343 "arch": instance.platform.arch, 344 "platform": instance.platform.name, 345 "path": instance.testsuite.source_dir_rel 346 } 347 if instance.run_id: 348 suite['run_id'] = instance.run_id 349 350 suite["runnable"] = False 351 if instance.status != TwisterStatus.FILTER: 352 suite["runnable"] = instance.run 353 354 if used_ram: 355 suite["used_ram"] = used_ram 356 if used_rom: 357 suite["used_rom"] = used_rom 358 359 suite['retries'] = instance.retries 360 361 if instance.dut: 362 suite["dut"] = instance.dut 363 if available_ram: 364 suite["available_ram"] = available_ram 365 if available_rom: 366 suite["available_rom"] = available_rom 367 if instance.status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 368 suite['status'] = instance.status 369 suite["reason"] = instance.reason 370 # FIXME 371 if os.path.exists(pytest_log): 372 suite["log"] = self.process_log(pytest_log) 373 elif os.path.exists(handler_log): 374 suite["log"] = self.process_log(handler_log) 375 elif os.path.exists(device_log): 376 suite["log"] = self.process_log(device_log) 377 else: 378 suite["log"] = self.process_log(build_log) 379 elif instance.status == TwisterStatus.FILTER: 380 suite["status"] = TwisterStatus.FILTER 381 suite["reason"] = instance.reason 382 elif instance.status == TwisterStatus.PASS: 383 suite["status"] = TwisterStatus.PASS 384 elif instance.status == TwisterStatus.SKIP: 385 suite["status"] = TwisterStatus.SKIP 386 suite["reason"] = instance.reason 387 elif instance.status == TwisterStatus.NOTRUN: 388 suite["status"] = TwisterStatus.NOTRUN 389 suite["reason"] = instance.reason 390 else: 391 suite["status"] = TwisterStatus.NONE 392 suite["reason"] = 'Unknown Instance status.' 393 394 if instance.status != TwisterStatus.NONE: 395 suite["execution_time"] = f"{float(handler_time):.2f}" 396 suite["build_time"] = f"{float(instance.build_time):.2f}" 397 398 testcases = [] 399 400 if len(instance.testcases) == 1: 401 single_case_duration = f"{float(handler_time):.2f}" 402 else: 403 single_case_duration = 0 404 405 for case in instance.testcases: 406 # freeform was set when no sub testcases were parsed, however, 407 # if we discover those at runtime, the fallback testcase wont be 408 # needed anymore and can be removed from the output, it does 409 # not have a status and would otherwise be reported as skipped. 410 if ( 411 case.freeform 412 and case.status == TwisterStatus.NONE 413 and len(instance.testcases) > 1 414 ): 415 continue 416 testcase = {} 417 testcase['identifier'] = case.name 418 if instance.status != TwisterStatus.NONE: 419 if single_case_duration: 420 testcase['execution_time'] = single_case_duration 421 else: 422 testcase['execution_time'] = f"{float(case.duration):.2f}" 423 424 if case.output != "": 425 testcase['log'] = case.output 426 427 if case.status == TwisterStatus.SKIP: 428 if instance.status == TwisterStatus.FILTER: 429 testcase["status"] = TwisterStatus.FILTER 430 else: 431 testcase["status"] = TwisterStatus.SKIP 432 testcase["reason"] = case.reason or instance.reason 433 else: 434 testcase["status"] = case.status 435 if case.reason: 436 testcase["reason"] = case.reason 437 438 testcases.append(testcase) 439 440 suite['testcases'] = testcases 441 442 if instance.recording is not None: 443 suite['recording'] = instance.recording 444 445 if ( 446 instance.status not in [ 447 TwisterStatus.NONE, 448 TwisterStatus.ERROR, 449 TwisterStatus.FILTER 450 ] 451 and self.env.options.create_rom_ram_report 452 and self.env.options.footprint_report is not None 453 ): 454 # Init as empty data preparing for filtering properties. 455 suite['footprint'] = {} 456 457 # Pass suite properties through the context filters. 458 if filters and 'allow_suite' in filters: 459 suite = {k:v for k,v in suite.items() if k in filters['allow_suite']} 460 461 if filters and 'deny_suite' in filters: 462 suite = {k:v for k,v in suite.items() if k not in filters['deny_suite']} 463 464 # Compose external data only to these properties which pass filtering. 465 if 'footprint' in suite: 466 do_all = 'all' in self.env.options.footprint_report 467 footprint_files = { 'ROM': 'rom.json', 'RAM': 'ram.json' } 468 for k,v in footprint_files.items(): 469 if do_all or k in self.env.options.footprint_report: 470 footprint_fname = os.path.join(instance.build_dir, v) 471 try: 472 with open(footprint_fname) as footprint_json: 473 logger.debug(f"Collect footprint.{k} for '{instance.name}'") 474 suite['footprint'][k] = json.load(footprint_json) 475 except FileNotFoundError: 476 logger.error(f"Missing footprint.{k} for '{instance.name}'") 477 # 478 # 479 480 suites.append(suite) 481 482 report["testsuites"] = suites 483 with open(filename, 'w') as json_file: 484 json.dump(report, json_file, indent=4, separators=(',',':')) 485 486 487 def compare_metrics(self, filename): 488 # name, datatype, lower results better 489 interesting_metrics = [("used_ram", int, True), 490 ("used_rom", int, True)] 491 492 if not os.path.exists(filename): 493 logger.error(f"Cannot compare metrics, {filename} not found") 494 return [] 495 496 results = [] 497 saved_metrics = {} 498 with open(filename) as fp: 499 jt = json.load(fp) 500 for ts in jt.get("testsuites", []): 501 d = {} 502 for m, _, _ in interesting_metrics: 503 d[m] = ts.get(m, 0) 504 ts_name = ts.get('name') 505 ts_platform = ts.get('platform') 506 saved_metrics[(ts_name, ts_platform)] = d 507 508 for instance in self.instances.values(): 509 mkey = (instance.testsuite.name, instance.platform.name) 510 if mkey not in saved_metrics: 511 continue 512 sm = saved_metrics[mkey] 513 for metric, mtype, lower_better in interesting_metrics: 514 if metric not in instance.metrics: 515 continue 516 if sm[metric] == "": 517 continue 518 delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) 519 if delta == 0: 520 continue 521 results.append((instance, metric, instance.metrics.get(metric, 0), delta, 522 lower_better)) 523 return results 524 525 def footprint_reports(self, report, show_footprint, all_deltas, 526 footprint_threshold, last_metrics): 527 if not report: 528 return 529 530 logger.debug("running footprint_reports") 531 deltas = self.compare_metrics(report) 532 warnings = 0 533 if deltas: 534 for i, metric, value, delta, lower_better in deltas: 535 if not all_deltas and ((delta < 0 and lower_better) or 536 (delta > 0 and not lower_better)): 537 continue 538 539 percentage = 0 540 if value > delta: 541 percentage = (float(delta) / float(value - delta)) 542 543 if not all_deltas and (percentage < (footprint_threshold / 100.0)): 544 continue 545 546 if show_footprint: 547 logger.log( 548 logging.INFO if all_deltas else logging.WARNING, 549 f"{i.platform.name:<25} {i.testsuite.name:<60} {metric} {delta:<+4}," 550 f" is now {value:6} {percentage:+.2%}" 551 ) 552 553 warnings += 1 554 555 if warnings: 556 logger.warning("Found {} footprint deltas to {} as a baseline.".format( 557 warnings, 558 (report if not last_metrics else "the last twister run."))) 559 560 def synopsis(self): 561 if self.env.options.report_summary == 0: 562 count = self.instance_fail_count 563 log_txt = f"The following issues were found (showing the all {count} items):" 564 elif self.env.options.report_summary: 565 count = self.env.options.report_summary 566 log_txt = "The following issues were found " 567 if count > self.instance_fail_count: 568 log_txt += ( 569 f"(presenting {self.instance_fail_count} out of the {count} items requested):" 570 ) 571 else: 572 log_txt += f"(showing the {count} of {self.instance_fail_count} items):" 573 else: 574 count = 10 575 log_txt = f"The following issues were found (showing the top {count} items):" 576 cnt = 0 577 example_instance = None 578 detailed_test_id = self.env.options.detailed_test_id 579 for instance in self.instances.values(): 580 if instance.status not in [ 581 TwisterStatus.PASS, 582 TwisterStatus.FILTER, 583 TwisterStatus.SKIP, 584 TwisterStatus.NOTRUN 585 ]: 586 cnt += 1 587 if cnt == 1: 588 logger.info("-+" * 40) 589 logger.info(log_txt) 590 591 status = instance.status 592 if self.env.options.report_summary is not None and \ 593 status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 594 status = Fore.RED + status.upper() + Fore.RESET 595 logger.info( 596 f"{cnt}) {instance.testsuite.name} on {instance.platform.name}" 597 f" {status} ({instance.reason})" 598 ) 599 example_instance = instance 600 if cnt == count: 601 break 602 if cnt == 0 and self.env.options.report_summary is not None: 603 logger.info("-+" * 40) 604 logger.info("No errors/fails found") 605 606 if cnt and example_instance: 607 cwd_rel_path = os.path.relpath(example_instance.testsuite.source_dir, start=os.getcwd()) 608 609 logger.info("") 610 logger.info("To rerun the tests, call twister using the following commandline:") 611 extra_parameters = '' if detailed_test_id else ' --no-detailed-test-id' 612 logger.info(f"west twister -p <PLATFORM> -s <TEST ID>{extra_parameters}, for example:") 613 logger.info("") 614 logger.info( 615 f"west twister -p {example_instance.platform.name}" 616 f" -s {example_instance.testsuite.name}" 617 f"{extra_parameters}" 618 ) 619 logger.info("or with west:") 620 logger.info( 621 f"west build -p -b {example_instance.platform.name} {cwd_rel_path}" 622 f" -T {example_instance.testsuite.id}" 623 ) 624 logger.info("-+" * 40) 625 626 def summary(self, results, ignore_unrecognized_sections, duration): 627 failed = 0 628 run = 0 629 for instance in self.instances.values(): 630 if instance.status == TwisterStatus.FAIL: 631 failed += 1 632 elif not ignore_unrecognized_sections and instance.metrics.get("unrecognized"): 633 logger.error( 634 f"{Fore.RED}FAILED{Fore.RESET}:" 635 f" {instance.name} has unrecognized binary sections:" 636 f" {instance.metrics.get('unrecognized', [])!s}" 637 ) 638 failed += 1 639 640 # FIXME: need a better way to identify executed tests 641 handler_time = instance.metrics.get('handler_time', 0) 642 if float(handler_time) > 0: 643 run += 1 644 645 if results.total and results.total != results.filtered_configs: 646 pass_rate = (float(results.passed) / float(results.total - results.filtered_configs)) 647 else: 648 pass_rate = 0 649 650 passed_color = ( 651 TwisterStatus.get_color(TwisterStatus.FAIL) 652 if failed 653 else TwisterStatus.get_color(TwisterStatus.PASS) 654 ) 655 unfiltered_configs = results.total - results.filtered_configs 656 notrun_number_section = ( 657 f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' 658 if results.notrun 659 else f'{results.notrun}' 660 ) 661 failed_number_section = ( 662 f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' 663 if results.failed 664 else f'{results.failed}' 665 ) 666 error_number_section = ( 667 f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' 668 if results.error 669 else f'{results.error}' 670 ) 671 warnings_number_section = ( 672 f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}' 673 if (self.plan.warnings + results.warnings) 674 else 'no' 675 ) 676 logger.info( 677 f"{passed_color}{results.passed} of {unfiltered_configs}{Fore.RESET}" 678 f" executed test configurations passed ({pass_rate:.2%})," 679 f" {notrun_number_section} built (not run)," 680 f" {failed_number_section} failed," 681 f" {error_number_section} errored," 682 f" with {warnings_number_section} warnings" 683 f" in {duration:.2f} seconds." 684 ) 685 686 total_platforms = len(self.platforms) 687 filtered_platforms = set( 688 instance.platform.name for instance in self.instances.values() 689 if instance.status not in [ 690 TwisterStatus.FILTER, 691 TwisterStatus.NOTRUN, 692 TwisterStatus.SKIP 693 ] 694 ) 695 # if we are only building, do not report about tests being executed. 696 if self.platforms and not self.env.options.build_only: 697 executed_cases = ( 698 results.cases 699 - results.filtered_cases 700 - results.skipped_cases 701 - results.notrun_cases 702 ) 703 pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \ 704 if executed_cases != 0 else 0 705 platform_rate = (100 * len(filtered_platforms) / len(self.platforms)) 706 blocked_after_comma = ", " + str(results.blocked_cases) + " blocked" 707 failed_after_comma = ", " + str(results.failed_cases) + " failed" 708 error_after_comma = ", " + str(results.error_cases) + " errored" 709 none_after_comma = ", " + str(results.none_cases) + " without a status" 710 logger.info( 711 f'{results.passed_cases} of {executed_cases} executed test cases passed' 712 f' ({pass_rate:02.2f}%)' 713 f'{blocked_after_comma if results.blocked_cases else ""}' 714 f'{failed_after_comma if results.failed_cases else ""}' 715 f'{error_after_comma if results.error_cases else ""}' 716 f'{none_after_comma if results.none_cases else ""}' 717 f' on {len(filtered_platforms)} out of total {total_platforms} platforms' 718 f' ({platform_rate:02.2f}%).' 719 ) 720 if results.skipped_cases or results.notrun_cases: 721 not_executed = results.skipped_cases + results.notrun_cases 722 skipped_after_colon = " " + str(results.skipped_cases) + " skipped" 723 notrun_after_comma = ( 724 (", " if results.skipped_cases else " ") 725 + str(results.notrun_cases) 726 + " not run (built only)" 727 ) 728 logger.info( 729 f'{not_executed} selected test cases not executed:' \ 730 f'{skipped_after_colon if results.skipped_cases else ""}' \ 731 f'{notrun_after_comma if results.notrun_cases else ""}' \ 732 f'.' 733 ) 734 735 built_only = results.total - run - results.filtered_configs 736 logger.info( 737 f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms," 738 f" {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET}" 739 " test configurations were only built." 740 ) 741 742 def save_reports(self, name, suffix, report_dir, no_update, platform_reports): 743 if not self.instances: 744 return 745 746 logger.info("Saving reports...") 747 if name: 748 report_name = name 749 else: 750 report_name = "twister" 751 752 if report_dir: 753 os.makedirs(report_dir, exist_ok=True) 754 filename = os.path.join(report_dir, report_name) 755 outdir = report_dir 756 else: 757 outdir = self.outdir 758 filename = os.path.join(outdir, report_name) 759 760 if suffix: 761 filename = f"{filename}_{suffix}" 762 763 if not no_update: 764 json_file = filename + ".json" 765 self.json_report(json_file, version=self.env.version, 766 filters=self.json_filters['twister.json']) 767 if self.env.options.footprint_report is not None: 768 self.json_report(filename + "_footprint.json", version=self.env.version, 769 filters=self.json_filters['footprint.json']) 770 self.xunit_report(json_file, filename + ".xml", full_report=False) 771 self.xunit_report(json_file, filename + "_report.xml", full_report=True) 772 self.xunit_report_suites(json_file, filename + "_suite_report.xml") 773 774 if platform_reports: 775 self.target_report(json_file, outdir, suffix) 776 777 778 def target_report(self, json_file, outdir, suffix): 779 platforms = {repr(inst.platform):inst.platform for _, inst in self.instances.items()} 780 for platform in platforms.values(): 781 if suffix: 782 filename = os.path.join(outdir,f"{platform.normalized_name}_{suffix}.xml") 783 json_platform_file = os.path.join(outdir,f"{platform.normalized_name}_{suffix}") 784 else: 785 filename = os.path.join(outdir,f"{platform.normalized_name}.xml") 786 json_platform_file = os.path.join(outdir, platform.normalized_name) 787 self.xunit_report(json_file, filename, platform.name, full_report=True) 788 self.json_report(json_platform_file + ".json", 789 version=self.env.version, platform=platform.name, 790 filters=self.json_filters['twister.json']) 791 if self.env.options.footprint_report is not None: 792 self.json_report(json_platform_file + "_footprint.json", 793 version=self.env.version, platform=platform.name, 794 filters=self.json_filters['footprint.json']) 795