1#!/usr/bin/env python3 2# vim: set syntax=python ts=4 : 3# 4# Copyright (c) 2018 Intel Corporation 5# SPDX-License-Identifier: Apache-2.0 6 7import os 8import json 9import logging 10from colorama import Fore 11import xml.etree.ElementTree as ET 12import string 13from datetime import datetime 14 15logger = logging.getLogger('twister') 16logger.setLevel(logging.DEBUG) 17 18class Reporting: 19 20 def __init__(self, plan, env) -> None: 21 self.plan = plan #FIXME 22 self.instances = plan.instances 23 self.platforms = plan.platforms 24 self.selected_platforms = plan.selected_platforms 25 self.filtered_platforms = plan.filtered_platforms 26 self.env = env 27 self.timestamp = datetime.now().isoformat() 28 self.outdir = os.path.abspath(env.options.outdir) 29 30 @staticmethod 31 def process_log(log_file): 32 filtered_string = "" 33 if os.path.exists(log_file): 34 with open(log_file, "rb") as f: 35 log = f.read().decode("utf-8") 36 filtered_string = ''.join(filter(lambda x: x in string.printable, log)) 37 38 return filtered_string 39 40 41 @staticmethod 42 def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip): 43 fails, passes, errors, skips = stats 44 45 if status in ['skipped', 'filtered']: 46 duration = 0 47 48 eleTestcase = ET.SubElement( 49 eleTestsuite, "testcase", 50 classname=classname, 51 name=f"{name}", 52 time=f"{duration}") 53 54 if status in ['skipped', 'filtered']: 55 skips += 1 56 # temporarily add build_only_as_skip to restore existing CI report behaviour 57 if ts_status == "passed" and not runnable: 58 tc_type = "build" 59 else: 60 tc_type = status 61 ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}") 62 elif status in ["failed", "blocked"]: 63 fails += 1 64 el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}") 65 if log: 66 el.text = log 67 elif status == "error": 68 errors += 1 69 el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}") 70 if log: 71 el.text = log 72 elif status == 'passed': 73 if not runnable and build_only_as_skip: 74 ET.SubElement(eleTestcase, 'skipped', type="build", message="built only") 75 skips += 1 76 else: 77 passes += 1 78 else: 79 if not status: 80 logger.debug(f"{name}: No status") 81 ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?") 82 else: 83 logger.error(f"{name}: Unknown status '{status}'") 84 85 return (fails, passes, errors, skips) 86 87 # Generate a report with all testsuites instead of doing this per platform 88 def xunit_report_suites(self, json_file, filename): 89 90 json_data = {} 91 with open(json_file, "r") as json_results: 92 json_data = json.load(json_results) 93 94 95 env = json_data.get('environment', {}) 96 version = env.get('zephyr_version', None) 97 98 eleTestsuites = ET.Element('testsuites') 99 all_suites = json_data.get("testsuites", []) 100 101 suites_to_report = all_suites 102 # do not create entry if everything is filtered out 103 if not self.env.options.detailed_skipped_report: 104 suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites)) 105 106 for suite in suites_to_report: 107 duration = 0 108 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 109 name=suite.get("name"), time="0", 110 timestamp = self.timestamp, 111 tests="0", 112 failures="0", 113 errors="0", skipped="0") 114 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 115 # Multiple 'property' can be added to 'properties' 116 # differing by name and value 117 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 118 ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform")) 119 ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch")) 120 121 total = 0 122 fails = passes = errors = skips = 0 123 handler_time = suite.get('execution_time', 0) 124 runnable = suite.get('runnable', 0) 125 duration += float(handler_time) 126 ts_status = suite.get('status') 127 for tc in suite.get("testcases", []): 128 status = tc.get('status') 129 reason = tc.get('reason', suite.get('reason', 'Unknown')) 130 log = tc.get("log", suite.get("log")) 131 132 tc_duration = tc.get('execution_time', handler_time) 133 name = tc.get("identifier") 134 classname = ".".join(name.split(".")[:2]) 135 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 136 name, classname, status, ts_status, reason, tc_duration, runnable, 137 (fails, passes, errors, skips), log, True) 138 139 total = errors + passes + fails + skips 140 141 eleTestsuite.attrib['time'] = f"{duration}" 142 eleTestsuite.attrib['failures'] = f"{fails}" 143 eleTestsuite.attrib['errors'] = f"{errors}" 144 eleTestsuite.attrib['skipped'] = f"{skips}" 145 eleTestsuite.attrib['tests'] = f"{total}" 146 147 result = ET.tostring(eleTestsuites) 148 with open(filename, 'wb') as report: 149 report.write(result) 150 151 def xunit_report(self, json_file, filename, selected_platform=None, full_report=False): 152 if selected_platform: 153 selected = [selected_platform] 154 logger.info(f"Writing target report for {selected_platform}...") 155 else: 156 logger.info(f"Writing xunit report {filename}...") 157 selected = self.selected_platforms 158 159 json_data = {} 160 with open(json_file, "r") as json_results: 161 json_data = json.load(json_results) 162 163 164 env = json_data.get('environment', {}) 165 version = env.get('zephyr_version', None) 166 167 eleTestsuites = ET.Element('testsuites') 168 all_suites = json_data.get("testsuites", []) 169 170 for platform in selected: 171 suites = list(filter(lambda d: d['platform'] == platform, all_suites)) 172 # do not create entry if everything is filtered out 173 if not self.env.options.detailed_skipped_report: 174 non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites)) 175 if not non_filtered: 176 continue 177 178 duration = 0 179 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 180 name=platform, 181 timestamp = self.timestamp, 182 time="0", 183 tests="0", 184 failures="0", 185 errors="0", skipped="0") 186 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 187 # Multiple 'property' can be added to 'properties' 188 # differing by name and value 189 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 190 191 total = 0 192 fails = passes = errors = skips = 0 193 for ts in suites: 194 handler_time = ts.get('execution_time', 0) 195 runnable = ts.get('runnable', 0) 196 duration += float(handler_time) 197 198 ts_status = ts.get('status') 199 # Do not report filtered testcases 200 if ts_status == 'filtered' and not self.env.options.detailed_skipped_report: 201 continue 202 if full_report: 203 for tc in ts.get("testcases", []): 204 status = tc.get('status') 205 reason = tc.get('reason', ts.get('reason', 'Unknown')) 206 log = tc.get("log", ts.get("log")) 207 208 tc_duration = tc.get('execution_time', handler_time) 209 name = tc.get("identifier") 210 classname = ".".join(name.split(".")[:2]) 211 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 212 name, classname, status, ts_status, reason, tc_duration, runnable, 213 (fails, passes, errors, skips), log, True) 214 else: 215 reason = ts.get('reason', 'Unknown') 216 name = ts.get("name") 217 classname = f"{platform}:{name}" 218 log = ts.get("log") 219 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 220 name, classname, ts_status, ts_status, reason, duration, runnable, 221 (fails, passes, errors, skips), log, False) 222 223 total = errors + passes + fails + skips 224 225 eleTestsuite.attrib['time'] = f"{duration}" 226 eleTestsuite.attrib['failures'] = f"{fails}" 227 eleTestsuite.attrib['errors'] = f"{errors}" 228 eleTestsuite.attrib['skipped'] = f"{skips}" 229 eleTestsuite.attrib['tests'] = f"{total}" 230 231 result = ET.tostring(eleTestsuites) 232 with open(filename, 'wb') as report: 233 report.write(result) 234 235 def json_report(self, filename, version="NA"): 236 logger.info(f"Writing JSON report {filename}") 237 report = {} 238 report["environment"] = {"os": os.name, 239 "zephyr_version": version, 240 "toolchain": self.env.toolchain, 241 "commit_date": self.env.commit_date, 242 "run_date": self.env.run_date 243 } 244 suites = [] 245 246 for instance in self.instances.values(): 247 suite = {} 248 handler_log = os.path.join(instance.build_dir, "handler.log") 249 build_log = os.path.join(instance.build_dir, "build.log") 250 device_log = os.path.join(instance.build_dir, "device.log") 251 252 handler_time = instance.metrics.get('handler_time', 0) 253 used_ram = instance.metrics.get ("used_ram", 0) 254 used_rom = instance.metrics.get("used_rom",0) 255 available_ram = instance.metrics.get("available_ram", 0) 256 available_rom = instance.metrics.get("available_rom", 0) 257 suite = { 258 "name": instance.testsuite.name, 259 "arch": instance.platform.arch, 260 "platform": instance.platform.name, 261 "path": instance.testsuite.source_dir_rel 262 } 263 if instance.run_id: 264 suite['run_id'] = instance.run_id 265 266 suite["runnable"] = False 267 if instance.status != 'filtered': 268 suite["runnable"] = instance.run 269 270 if used_ram: 271 suite["used_ram"] = used_ram 272 if used_rom: 273 suite["used_rom"] = used_rom 274 275 suite['retries'] = instance.retries 276 277 if instance.dut: 278 suite["dut"] = instance.dut 279 if available_ram: 280 suite["available_ram"] = available_ram 281 if available_rom: 282 suite["available_rom"] = available_rom 283 if instance.status in ["error", "failed"]: 284 suite['status'] = instance.status 285 suite["reason"] = instance.reason 286 # FIXME 287 if os.path.exists(handler_log): 288 suite["log"] = self.process_log(handler_log) 289 elif os.path.exists(device_log): 290 suite["log"] = self.process_log(device_log) 291 else: 292 suite["log"] = self.process_log(build_log) 293 elif instance.status == 'filtered': 294 suite["status"] = "filtered" 295 suite["reason"] = instance.reason 296 elif instance.status == 'passed': 297 suite["status"] = "passed" 298 elif instance.status == 'skipped': 299 suite["status"] = "skipped" 300 suite["reason"] = instance.reason 301 302 if instance.status is not None: 303 suite["execution_time"] = f"{float(handler_time):.2f}" 304 305 testcases = [] 306 307 if len(instance.testcases) == 1: 308 single_case_duration = f"{float(handler_time):.2f}" 309 else: 310 single_case_duration = 0 311 312 for case in instance.testcases: 313 # freeform was set when no sub testcases were parsed, however, 314 # if we discover those at runtime, the fallback testcase wont be 315 # needed anymore and can be removed from the output, it does 316 # not have a status and would otherwise be reported as skipped. 317 if case.freeform and case.status is None and len(instance.testcases) > 1: 318 continue 319 testcase = {} 320 testcase['identifier'] = case.name 321 if instance.status: 322 if single_case_duration: 323 testcase['execution_time'] = single_case_duration 324 else: 325 testcase['execution_time'] = f"{float(case.duration):.2f}" 326 327 if case.output != "": 328 testcase['log'] = case.output 329 330 if case.status == "skipped": 331 if instance.status == "filtered": 332 testcase["status"] = "filtered" 333 else: 334 testcase["status"] = "skipped" 335 testcase["reason"] = case.reason or instance.reason 336 else: 337 testcase["status"] = case.status 338 if case.reason: 339 testcase["reason"] = case.reason 340 341 testcases.append(testcase) 342 343 suite['testcases'] = testcases 344 suites.append(suite) 345 346 report["testsuites"] = suites 347 with open(filename, "wt") as json_file: 348 json.dump(report, json_file, indent=4, separators=(',',':')) 349 350 351 def compare_metrics(self, filename): 352 # name, datatype, lower results better 353 interesting_metrics = [("used_ram", int, True), 354 ("used_rom", int, True)] 355 356 if not os.path.exists(filename): 357 logger.error("Cannot compare metrics, %s not found" % filename) 358 return [] 359 360 results = [] 361 saved_metrics = {} 362 with open(filename) as fp: 363 jt = json.load(fp) 364 for ts in jt.get("testsuites", []): 365 d = {} 366 for m, _, _ in interesting_metrics: 367 d[m] = ts.get(m, 0) 368 ts_name = ts.get('name') 369 ts_platform = ts.get('platform') 370 saved_metrics[(ts_name, ts_platform)] = d 371 372 for instance in self.instances.values(): 373 mkey = (instance.testsuite.name, instance.platform.name) 374 if mkey not in saved_metrics: 375 continue 376 sm = saved_metrics[mkey] 377 for metric, mtype, lower_better in interesting_metrics: 378 if metric not in instance.metrics: 379 continue 380 if sm[metric] == "": 381 continue 382 delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) 383 if delta == 0: 384 continue 385 results.append((instance, metric, instance.metrics.get(metric, 0), delta, 386 lower_better)) 387 return results 388 389 def footprint_reports(self, report, show_footprint, all_deltas, 390 footprint_threshold, last_metrics): 391 if not report: 392 return 393 394 logger.debug("running footprint_reports") 395 deltas = self.compare_metrics(report) 396 warnings = 0 397 if deltas and show_footprint: 398 for i, metric, value, delta, lower_better in deltas: 399 if not all_deltas and ((delta < 0 and lower_better) or 400 (delta > 0 and not lower_better)): 401 continue 402 403 percentage = 0 404 if value > delta: 405 percentage = (float(delta) / float(value - delta)) 406 407 if not all_deltas and (percentage < (footprint_threshold / 100.0)): 408 continue 409 410 logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( 411 i.platform.name, i.testsuite.name, Fore.YELLOW, 412 "INFO" if all_deltas else "WARNING", Fore.RESET, 413 metric, delta, value, percentage)) 414 warnings += 1 415 416 if warnings: 417 logger.warning("Deltas based on metrics from last %s" % 418 ("release" if not last_metrics else "run")) 419 420 def synopsis(self): 421 cnt = 0 422 example_instance = None 423 detailed_test_id = self.env.options.detailed_test_id 424 for instance in self.instances.values(): 425 if instance.status not in ["passed", "filtered", "skipped"]: 426 cnt = cnt + 1 427 if cnt == 1: 428 logger.info("-+" * 40) 429 logger.info("The following issues were found (showing the top 10 items):") 430 431 logger.info(f"{cnt}) {instance.testsuite.name} on {instance.platform.name} {instance.status} ({instance.reason})") 432 example_instance = instance 433 if cnt == 10: 434 break 435 436 if cnt and example_instance: 437 logger.info("") 438 logger.info("To rerun the tests, call twister using the following commandline:") 439 extra_parameters = '' if detailed_test_id else ' --no-detailed-test-id' 440 logger.info(f"west twister -p <PLATFORM> -s <TEST ID>{extra_parameters}, for example:") 441 logger.info("") 442 logger.info(f"west twister -p {example_instance.platform.name} -s {example_instance.testsuite.name}" 443 f"{extra_parameters}") 444 logger.info(f"or with west:") 445 logger.info(f"west build -p -b {example_instance.platform.name} " 446 f"{example_instance.testsuite.source_dir_rel} -T {example_instance.testsuite.id}") 447 logger.info("-+" * 40) 448 449 def summary(self, results, unrecognized_sections, duration): 450 failed = 0 451 run = 0 452 for instance in self.instances.values(): 453 if instance.status == "failed": 454 failed += 1 455 elif instance.metrics.get("unrecognized") and not unrecognized_sections: 456 logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" % 457 (Fore.RED, Fore.RESET, instance.name, 458 str(instance.metrics.get("unrecognized", [])))) 459 failed += 1 460 461 # FIXME: need a better way to identify executed tests 462 handler_time = instance.metrics.get('handler_time', 0) 463 if float(handler_time) > 0: 464 run += 1 465 466 if results.total and results.total != results.skipped_configs: 467 pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) 468 else: 469 pass_rate = 0 470 471 logger.info( 472 "{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {}{}{} errored, {} skipped with {}{}{} warnings in {:.2f} seconds".format( 473 Fore.RED if failed else Fore.GREEN, 474 results.passed, 475 results.total, 476 Fore.RESET, 477 pass_rate, 478 Fore.RED if results.failed else Fore.RESET, 479 results.failed, 480 Fore.RESET, 481 Fore.RED if results.error else Fore.RESET, 482 results.error, 483 Fore.RESET, 484 results.skipped_configs, 485 Fore.YELLOW if self.plan.warnings else Fore.RESET, 486 self.plan.warnings, 487 Fore.RESET, 488 duration)) 489 490 total_platforms = len(self.platforms) 491 # if we are only building, do not report about tests being executed. 492 if self.platforms and not self.env.options.build_only: 493 logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format( 494 results.cases - results.skipped_cases, 495 results.skipped_cases, 496 len(self.filtered_platforms), 497 total_platforms, 498 (100 * len(self.filtered_platforms) / len(self.platforms)) 499 )) 500 501 built_only = results.total - run - results.skipped_configs 502 logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ 503{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.") 504 505 def save_reports(self, name, suffix, report_dir, no_update, platform_reports): 506 if not self.instances: 507 return 508 509 logger.info("Saving reports...") 510 if name: 511 report_name = name 512 else: 513 report_name = "twister" 514 515 if report_dir: 516 os.makedirs(report_dir, exist_ok=True) 517 filename = os.path.join(report_dir, report_name) 518 outdir = report_dir 519 else: 520 outdir = self.outdir 521 filename = os.path.join(outdir, report_name) 522 523 if suffix: 524 filename = "{}_{}".format(filename, suffix) 525 526 if not no_update: 527 json_file = filename + ".json" 528 self.json_report(json_file, version=self.env.version) 529 self.xunit_report(json_file, filename + ".xml", full_report=False) 530 self.xunit_report(json_file, filename + "_report.xml", full_report=True) 531 self.xunit_report_suites(json_file, filename + "_suite_report.xml") 532 533 if platform_reports: 534 self.target_report(json_file, outdir, suffix) 535 536 537 def target_report(self, json_file, outdir, suffix): 538 platforms = {inst.platform.name for _, inst in self.instances.items()} 539 for platform in platforms: 540 if suffix: 541 filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix)) 542 else: 543 filename = os.path.join(outdir,"{}.xml".format(platform)) 544 self.xunit_report(json_file, filename, platform, full_report=True) 545