Lines Matching +full:0 +full:f
68 with open(log_file, "rb") as f:
69 log = f.read().decode("utf-8")
92 duration = 0
97 name=f"{name}",
98 time=f"{duration}")
107 ET.SubElement(eleTestcase, ReportStatus.SKIP, type=f"{tc_type}", message=f"{reason}")
110 el = ET.SubElement(eleTestcase, ReportStatus.FAIL, type="failure", message=f"{reason}")
115 el = ET.SubElement(eleTestcase, ReportStatus.ERROR, type="failure", message=f"{reason}")
128 logger.debug(f"{name}: No status")
136 logger.error(f"{name}: Unknown status '{status}'")
162 duration = 0
164 name=suite.get("name"), time="0",
166 tests="0",
167 failures="0",
168 errors="0", skipped="0")
176 total = 0
177 fails = passes = errors = skips = 0
178 handler_time = suite.get('execution_time', 0)
179 runnable = suite.get('runnable', 0)
196 eleTestsuite.attrib['time'] = f"{duration}"
197 eleTestsuite.attrib['failures'] = f"{fails}"
198 eleTestsuite.attrib['errors'] = f"{errors}"
199 eleTestsuite.attrib['skipped'] = f"{skips}"
200 eleTestsuite.attrib['tests'] = f"{total}"
202 ET.indent(eleTestsuites, space="\t", level=0)
210 logger.info(f"Writing target report for {selected_platform}...")
212 logger.info(f"Writing xunit report {filename}...")
236 duration = 0
240 time="0",
241 tests="0",
242 failures="0",
243 errors="0", skipped="0")
249 total = 0
250 fails = passes = errors = skips = 0
252 handler_time = ts.get('execution_time', 0)
253 runnable = ts.get('runnable', 0)
278 classname = f"{platform}:{name}"
286 eleTestsuite.attrib['time'] = f"{duration}"
287 eleTestsuite.attrib['failures'] = f"{fails}"
288 eleTestsuite.attrib['errors'] = f"{errors}"
289 eleTestsuite.attrib['skipped'] = f"{skips}"
290 eleTestsuite.attrib['tests'] = f"{total}"
292 ET.indent(eleTestsuites, space="\t", level=0)
298 logger.info(f"Writing JSON report {filename}")
323 f"Skip test suite '{instance.testsuite.name}'"
324 f" status '{instance.status}' not allowed for {filename}"
330 f"Skip test suite '{instance.testsuite.name}'"
331 f" status '{instance.status}' denied for {filename}"
340 handler_time = instance.metrics.get('handler_time', 0)
341 used_ram = instance.metrics.get ("used_ram", 0)
342 used_rom = instance.metrics.get("used_rom",0)
343 available_ram = instance.metrics.get("available_ram", 0)
344 available_rom = instance.metrics.get("available_rom", 0)
405 suite["execution_time"] = f"{float(handler_time):.2f}"
406 suite["build_time"] = f"{float(instance.build_time):.2f}"
411 single_case_duration = f"{float(handler_time):.2f}"
413 single_case_duration = 0
432 testcase['execution_time'] = f"{float(case.duration):.2f}"
483 logger.debug(f"Collect footprint.{k} for '{instance.name}'")
486 logger.error(f"Missing footprint.{k} for '{instance.name}'")
503 logger.error(f"Cannot compare metrics, {filename} not found")
513 d[m] = ts.get(m, 0)
528 delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
529 if delta == 0:
531 results.append((instance, metric, instance.metrics.get(metric, 0), delta,
542 warnings = 0
545 if not all_deltas and ((delta < 0 and lower_better) or
546 (delta > 0 and not lower_better)):
549 percentage = 0
559 f"{i.platform.name:<25} {i.testsuite.name:<60} {metric} {delta:<+4},"
560 f" is now {value:6} {percentage:+.2%}"
571 if self.env.options.report_summary == 0:
573 log_txt = f"The following issues were found (showing the all {count} items):"
579 f"(presenting {self.instance_fail_count} out of the {count} items requested):"
582 log_txt += f"(showing the {count} of {self.instance_fail_count} items):"
585 log_txt = f"The following issues were found (showing the top {count} items):"
586 cnt = 0
606 f"{cnt}) {instance.testsuite.name} on {instance.platform.name}"
607 f" {status} ({instance.reason})"
612 if cnt == 0 and self.env.options.report_summary is not None:
622 logger.info(f"west twister -p <PLATFORM> -s <TEST ID>{extra_parameters}, for example:")
625 f"west twister -p {example_instance.platform.name}"
626 f" -s {example_instance.testsuite.name}"
627 f"{extra_parameters}"
631 f"west build -p -b {example_instance.platform.name} {cwd_rel_path}"
632 f" -T {example_instance.testsuite.id}"
637 failed = 0
638 run = 0
644 f"{Fore.RED}FAILED{Fore.RESET}:"
645 f" {instance.name} has unrecognized binary sections:"
646 f" {instance.metrics.get('unrecognized', [])!s}"
651 handler_time = instance.metrics.get('handler_time', 0)
652 if float(handler_time) > 0:
658 pass_rate = 0
667 f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}'
669 else f'{results.notrun}'
672 f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}'
674 else f'{results.failed}'
677 f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}'
679 else f'{results.error}'
682 f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}'
687 f"{passed_color}{results.passed} of {unfiltered_configs}{Fore.RESET}"
688 f" executed test configurations passed ({pass_rate:.2%}),"
689 f" {notrun_number_section} built (not run),"
690 f" {failed_number_section} failed,"
691 f" {error_number_section} errored,"
692 f" with {warnings_number_section} warnings"
693 f" in {duration:.2f} seconds."
714 if executed_cases != 0 else 0
721 f'{results.passed_cases} of {executed_cases} executed test cases passed'
722 f' ({pass_rate:02.2f}%)'
723 f'{blocked_after_comma if results.blocked_cases else ""}'
724 f'{failed_after_comma if results.failed_cases else ""}'
725 f'{error_after_comma if results.error_cases else ""}'
726 f'{none_after_comma if results.none_cases else ""}'
727 f' on {len(filtered_platforms)} out of total {total_platforms} platforms'
728 f' ({platform_rate:02.2f}%).'
739 f'{not_executed} selected test cases not executed:' \
740 f'{skipped_after_colon if results.skipped_cases else ""}' \
741 f'{notrun_after_comma if results.notrun_cases else ""}' \
742 f'.'
747 f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms,"
748 f" {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET}"
771 filename = f"{filename}_{suffix}"
792 filename = os.path.join(outdir,f"{platform.normalized_name}_{suffix}.xml")
793 json_platform_file = os.path.join(outdir,f"{platform.normalized_name}_{suffix}")
795 filename = os.path.join(outdir,f"{platform.normalized_name}.xml")
809 return f"{reason} - {error_key}"
812 return f"{reason} - {error_key}"