1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_FUNCTION_TRACER 14 bool 15 help 16 See Documentation/trace/ftrace-design.rst 17 18config HAVE_FUNCTION_GRAPH_TRACER 19 bool 20 help 21 See Documentation/trace/ftrace-design.rst 22 23config HAVE_DYNAMIC_FTRACE 24 bool 25 help 26 See Documentation/trace/ftrace-design.rst 27 28config HAVE_DYNAMIC_FTRACE_WITH_REGS 29 bool 30 31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 32 bool 33 34config HAVE_FTRACE_MCOUNT_RECORD 35 bool 36 help 37 See Documentation/trace/ftrace-design.rst 38 39config HAVE_SYSCALL_TRACEPOINTS 40 bool 41 help 42 See Documentation/trace/ftrace-design.rst 43 44config HAVE_FENTRY 45 bool 46 help 47 Arch supports the gcc options -pg with -mfentry 48 49config HAVE_NOP_MCOUNT 50 bool 51 help 52 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 53 54config HAVE_C_RECORDMCOUNT 55 bool 56 help 57 C version of recordmcount available? 58 59config TRACER_MAX_TRACE 60 bool 61 62config TRACE_CLOCK 63 bool 64 65config RING_BUFFER 66 bool 67 select TRACE_CLOCK 68 select IRQ_WORK 69 70config EVENT_TRACING 71 select CONTEXT_SWITCH_TRACER 72 select GLOB 73 bool 74 75config CONTEXT_SWITCH_TRACER 76 bool 77 78config RING_BUFFER_ALLOW_SWAP 79 bool 80 help 81 Allow the use of ring_buffer_swap_cpu. 82 Adds a very slight overhead to tracing when enabled. 83 84config PREEMPTIRQ_TRACEPOINTS 85 bool 86 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 87 select TRACING 88 default y 89 help 90 Create preempt/irq toggle tracepoints if needed, so that other parts 91 of the kernel can use them to generate or add hooks to them. 92 93# All tracer options should select GENERIC_TRACER. For those options that are 94# enabled by all tracers (context switch and event tracer) they select TRACING. 95# This allows those options to appear when no other tracer is selected. But the 96# options do not appear when something else selects it. We need the two options 97# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 98# hiding of the automatic options. 99 100config TRACING 101 bool 102 select RING_BUFFER 103 select STACKTRACE if STACKTRACE_SUPPORT 104 select TRACEPOINTS 105 select NOP_TRACER 106 select BINARY_PRINTF 107 select EVENT_TRACING 108 select TRACE_CLOCK 109 110config GENERIC_TRACER 111 bool 112 select TRACING 113 114# 115# Minimum requirements an architecture has to meet for us to 116# be able to offer generic tracing facilities: 117# 118config TRACING_SUPPORT 119 bool 120 depends on TRACE_IRQFLAGS_SUPPORT 121 depends on STACKTRACE_SUPPORT 122 default y 123 124if TRACING_SUPPORT 125 126menuconfig FTRACE 127 bool "Tracers" 128 default y if DEBUG_KERNEL 129 help 130 Enable the kernel tracing infrastructure. 131 132if FTRACE 133 134config BOOTTIME_TRACING 135 bool "Boot-time Tracing support" 136 depends on TRACING 137 select BOOT_CONFIG 138 help 139 Enable developer to setup ftrace subsystem via supplemental 140 kernel cmdline at boot time for debugging (tracing) driver 141 initialization and boot process. 142 143config FUNCTION_TRACER 144 bool "Kernel Function Tracer" 145 depends on HAVE_FUNCTION_TRACER 146 select KALLSYMS 147 select GENERIC_TRACER 148 select CONTEXT_SWITCH_TRACER 149 select GLOB 150 select TASKS_RCU if PREEMPTION 151 select TASKS_RUDE_RCU 152 help 153 Enable the kernel to trace every kernel function. This is done 154 by using a compiler feature to insert a small, 5-byte No-Operation 155 instruction at the beginning of every kernel function, which NOP 156 sequence is then dynamically patched into a tracer call when 157 tracing is enabled by the administrator. If it's runtime disabled 158 (the bootup default), then the overhead of the instructions is very 159 small and not measurable even in micro-benchmarks. 160 161config FUNCTION_GRAPH_TRACER 162 bool "Kernel Function Graph Tracer" 163 depends on HAVE_FUNCTION_GRAPH_TRACER 164 depends on FUNCTION_TRACER 165 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 166 default y 167 help 168 Enable the kernel to trace a function at both its return 169 and its entry. 170 Its first purpose is to trace the duration of functions and 171 draw a call graph for each thread with some information like 172 the return value. This is done by setting the current return 173 address on the current task structure into a stack of calls. 174 175config DYNAMIC_FTRACE 176 bool "enable/disable function tracing dynamically" 177 depends on FUNCTION_TRACER 178 depends on HAVE_DYNAMIC_FTRACE 179 default y 180 help 181 This option will modify all the calls to function tracing 182 dynamically (will patch them out of the binary image and 183 replace them with a No-Op instruction) on boot up. During 184 compile time, a table is made of all the locations that ftrace 185 can function trace, and this table is linked into the kernel 186 image. When this is enabled, functions can be individually 187 enabled, and the functions not enabled will not affect 188 performance of the system. 189 190 See the files in /sys/kernel/debug/tracing: 191 available_filter_functions 192 set_ftrace_filter 193 set_ftrace_notrace 194 195 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 196 otherwise has native performance as long as no tracing is active. 197 198config DYNAMIC_FTRACE_WITH_REGS 199 def_bool y 200 depends on DYNAMIC_FTRACE 201 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 202 203config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 204 def_bool y 205 depends on DYNAMIC_FTRACE_WITH_REGS 206 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 207 208config FUNCTION_PROFILER 209 bool "Kernel function profiler" 210 depends on FUNCTION_TRACER 211 default n 212 help 213 This option enables the kernel function profiler. A file is created 214 in debugfs called function_profile_enabled which defaults to zero. 215 When a 1 is echoed into this file profiling begins, and when a 216 zero is entered, profiling stops. A "functions" file is created in 217 the trace_stat directory; this file shows the list of functions that 218 have been hit and their counters. 219 220 If in doubt, say N. 221 222config STACK_TRACER 223 bool "Trace max stack" 224 depends on HAVE_FUNCTION_TRACER 225 select FUNCTION_TRACER 226 select STACKTRACE 227 select KALLSYMS 228 help 229 This special tracer records the maximum stack footprint of the 230 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 231 232 This tracer works by hooking into every function call that the 233 kernel executes, and keeping a maximum stack depth value and 234 stack-trace saved. If this is configured with DYNAMIC_FTRACE 235 then it will not have any overhead while the stack tracer 236 is disabled. 237 238 To enable the stack tracer on bootup, pass in 'stacktrace' 239 on the kernel command line. 240 241 The stack tracer can also be enabled or disabled via the 242 sysctl kernel.stack_tracer_enabled 243 244 Say N if unsure. 245 246config TRACE_PREEMPT_TOGGLE 247 bool 248 help 249 Enables hooks which will be called when preemption is first disabled, 250 and last enabled. 251 252config IRQSOFF_TRACER 253 bool "Interrupts-off Latency Tracer" 254 default n 255 depends on TRACE_IRQFLAGS_SUPPORT 256 depends on !ARCH_USES_GETTIMEOFFSET 257 select TRACE_IRQFLAGS 258 select GENERIC_TRACER 259 select TRACER_MAX_TRACE 260 select RING_BUFFER_ALLOW_SWAP 261 select TRACER_SNAPSHOT 262 select TRACER_SNAPSHOT_PER_CPU_SWAP 263 help 264 This option measures the time spent in irqs-off critical 265 sections, with microsecond accuracy. 266 267 The default measurement method is a maximum search, which is 268 disabled by default and can be runtime (re-)started 269 via: 270 271 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 272 273 (Note that kernel size and overhead increase with this option 274 enabled. This option and the preempt-off timing option can be 275 used together or separately.) 276 277config PREEMPT_TRACER 278 bool "Preemption-off Latency Tracer" 279 default n 280 depends on !ARCH_USES_GETTIMEOFFSET 281 depends on PREEMPTION 282 select GENERIC_TRACER 283 select TRACER_MAX_TRACE 284 select RING_BUFFER_ALLOW_SWAP 285 select TRACER_SNAPSHOT 286 select TRACER_SNAPSHOT_PER_CPU_SWAP 287 select TRACE_PREEMPT_TOGGLE 288 help 289 This option measures the time spent in preemption-off critical 290 sections, with microsecond accuracy. 291 292 The default measurement method is a maximum search, which is 293 disabled by default and can be runtime (re-)started 294 via: 295 296 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 297 298 (Note that kernel size and overhead increase with this option 299 enabled. This option and the irqs-off timing option can be 300 used together or separately.) 301 302config SCHED_TRACER 303 bool "Scheduling Latency Tracer" 304 select GENERIC_TRACER 305 select CONTEXT_SWITCH_TRACER 306 select TRACER_MAX_TRACE 307 select TRACER_SNAPSHOT 308 help 309 This tracer tracks the latency of the highest priority task 310 to be scheduled in, starting from the point it has woken up. 311 312config HWLAT_TRACER 313 bool "Tracer to detect hardware latencies (like SMIs)" 314 select GENERIC_TRACER 315 help 316 This tracer, when enabled will create one or more kernel threads, 317 depending on what the cpumask file is set to, which each thread 318 spinning in a loop looking for interruptions caused by 319 something other than the kernel. For example, if a 320 System Management Interrupt (SMI) takes a noticeable amount of 321 time, this tracer will detect it. This is useful for testing 322 if a system is reliable for Real Time tasks. 323 324 Some files are created in the tracing directory when this 325 is enabled: 326 327 hwlat_detector/width - time in usecs for how long to spin for 328 hwlat_detector/window - time in usecs between the start of each 329 iteration 330 331 A kernel thread is created that will spin with interrupts disabled 332 for "width" microseconds in every "window" cycle. It will not spin 333 for "window - width" microseconds, where the system can 334 continue to operate. 335 336 The output will appear in the trace and trace_pipe files. 337 338 When the tracer is not running, it has no affect on the system, 339 but when it is running, it can cause the system to be 340 periodically non responsive. Do not run this tracer on a 341 production system. 342 343 To enable this tracer, echo in "hwlat" into the current_tracer 344 file. Every time a latency is greater than tracing_thresh, it will 345 be recorded into the ring buffer. 346 347config MMIOTRACE 348 bool "Memory mapped IO tracing" 349 depends on HAVE_MMIOTRACE_SUPPORT && PCI 350 select GENERIC_TRACER 351 help 352 Mmiotrace traces Memory Mapped I/O access and is meant for 353 debugging and reverse engineering. It is called from the ioremap 354 implementation and works via page faults. Tracing is disabled by 355 default and can be enabled at run-time. 356 357 See Documentation/trace/mmiotrace.rst. 358 If you are not helping to develop drivers, say N. 359 360config ENABLE_DEFAULT_TRACERS 361 bool "Trace process context switches and events" 362 depends on !GENERIC_TRACER 363 select TRACING 364 help 365 This tracer hooks to various trace points in the kernel, 366 allowing the user to pick and choose which trace point they 367 want to trace. It also includes the sched_switch tracer plugin. 368 369config FTRACE_SYSCALLS 370 bool "Trace syscalls" 371 depends on HAVE_SYSCALL_TRACEPOINTS 372 select GENERIC_TRACER 373 select KALLSYMS 374 help 375 Basic tracer to catch the syscall entry and exit events. 376 377config TRACER_SNAPSHOT 378 bool "Create a snapshot trace buffer" 379 select TRACER_MAX_TRACE 380 help 381 Allow tracing users to take snapshot of the current buffer using the 382 ftrace interface, e.g.: 383 384 echo 1 > /sys/kernel/debug/tracing/snapshot 385 cat snapshot 386 387config TRACER_SNAPSHOT_PER_CPU_SWAP 388 bool "Allow snapshot to swap per CPU" 389 depends on TRACER_SNAPSHOT 390 select RING_BUFFER_ALLOW_SWAP 391 help 392 Allow doing a snapshot of a single CPU buffer instead of a 393 full swap (all buffers). If this is set, then the following is 394 allowed: 395 396 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 397 398 After which, only the tracing buffer for CPU 2 was swapped with 399 the main tracing buffer, and the other CPU buffers remain the same. 400 401 When this is enabled, this adds a little more overhead to the 402 trace recording, as it needs to add some checks to synchronize 403 recording with swaps. But this does not affect the performance 404 of the overall system. This is enabled by default when the preempt 405 or irq latency tracers are enabled, as those need to swap as well 406 and already adds the overhead (plus a lot more). 407 408config TRACE_BRANCH_PROFILING 409 bool 410 select GENERIC_TRACER 411 412choice 413 prompt "Branch Profiling" 414 default BRANCH_PROFILE_NONE 415 help 416 The branch profiling is a software profiler. It will add hooks 417 into the C conditionals to test which path a branch takes. 418 419 The likely/unlikely profiler only looks at the conditions that 420 are annotated with a likely or unlikely macro. 421 422 The "all branch" profiler will profile every if-statement in the 423 kernel. This profiler will also enable the likely/unlikely 424 profiler. 425 426 Either of the above profilers adds a bit of overhead to the system. 427 If unsure, choose "No branch profiling". 428 429config BRANCH_PROFILE_NONE 430 bool "No branch profiling" 431 help 432 No branch profiling. Branch profiling adds a bit of overhead. 433 Only enable it if you want to analyse the branching behavior. 434 Otherwise keep it disabled. 435 436config PROFILE_ANNOTATED_BRANCHES 437 bool "Trace likely/unlikely profiler" 438 select TRACE_BRANCH_PROFILING 439 help 440 This tracer profiles all likely and unlikely macros 441 in the kernel. It will display the results in: 442 443 /sys/kernel/debug/tracing/trace_stat/branch_annotated 444 445 Note: this will add a significant overhead; only turn this 446 on if you need to profile the system's use of these macros. 447 448config PROFILE_ALL_BRANCHES 449 bool "Profile all if conditionals" if !FORTIFY_SOURCE 450 select TRACE_BRANCH_PROFILING 451 help 452 This tracer profiles all branch conditions. Every if () 453 taken in the kernel is recorded whether it hit or miss. 454 The results will be displayed in: 455 456 /sys/kernel/debug/tracing/trace_stat/branch_all 457 458 This option also enables the likely/unlikely profiler. 459 460 This configuration, when enabled, will impose a great overhead 461 on the system. This should only be enabled when the system 462 is to be analyzed in much detail. 463endchoice 464 465config TRACING_BRANCHES 466 bool 467 help 468 Selected by tracers that will trace the likely and unlikely 469 conditions. This prevents the tracers themselves from being 470 profiled. Profiling the tracing infrastructure can only happen 471 when the likelys and unlikelys are not being traced. 472 473config BRANCH_TRACER 474 bool "Trace likely/unlikely instances" 475 depends on TRACE_BRANCH_PROFILING 476 select TRACING_BRANCHES 477 help 478 This traces the events of likely and unlikely condition 479 calls in the kernel. The difference between this and the 480 "Trace likely/unlikely profiler" is that this is not a 481 histogram of the callers, but actually places the calling 482 events into a running trace buffer to see when and where the 483 events happened, as well as their results. 484 485 Say N if unsure. 486 487config BLK_DEV_IO_TRACE 488 bool "Support for tracing block IO actions" 489 depends on SYSFS 490 depends on BLOCK 491 select RELAY 492 select DEBUG_FS 493 select TRACEPOINTS 494 select GENERIC_TRACER 495 select STACKTRACE 496 help 497 Say Y here if you want to be able to trace the block layer actions 498 on a given queue. Tracing allows you to see any traffic happening 499 on a block device queue. For more information (and the userspace 500 support tools needed), fetch the blktrace tools from: 501 502 git://git.kernel.dk/blktrace.git 503 504 Tracing also is possible using the ftrace interface, e.g.: 505 506 echo 1 > /sys/block/sda/sda1/trace/enable 507 echo blk > /sys/kernel/debug/tracing/current_tracer 508 cat /sys/kernel/debug/tracing/trace_pipe 509 510 If unsure, say N. 511 512config KPROBE_EVENTS 513 depends on KPROBES 514 depends on HAVE_REGS_AND_STACK_ACCESS_API 515 bool "Enable kprobes-based dynamic events" 516 select TRACING 517 select PROBE_EVENTS 518 select DYNAMIC_EVENTS 519 default y 520 help 521 This allows the user to add tracing events (similar to tracepoints) 522 on the fly via the ftrace interface. See 523 Documentation/trace/kprobetrace.rst for more details. 524 525 Those events can be inserted wherever kprobes can probe, and record 526 various register and memory values. 527 528 This option is also required by perf-probe subcommand of perf tools. 529 If you want to use perf tools, this option is strongly recommended. 530 531config KPROBE_EVENTS_ON_NOTRACE 532 bool "Do NOT protect notrace function from kprobe events" 533 depends on KPROBE_EVENTS 534 depends on KPROBES_ON_FTRACE 535 default n 536 help 537 This is only for the developers who want to debug ftrace itself 538 using kprobe events. 539 540 If kprobes can use ftrace instead of breakpoint, ftrace related 541 functions are protected from kprobe-events to prevent an infinit 542 recursion or any unexpected execution path which leads to a kernel 543 crash. 544 545 This option disables such protection and allows you to put kprobe 546 events on ftrace functions for debugging ftrace by itself. 547 Note that this might let you shoot yourself in the foot. 548 549 If unsure, say N. 550 551config UPROBE_EVENTS 552 bool "Enable uprobes-based dynamic events" 553 depends on ARCH_SUPPORTS_UPROBES 554 depends on MMU 555 depends on PERF_EVENTS 556 select UPROBES 557 select PROBE_EVENTS 558 select DYNAMIC_EVENTS 559 select TRACING 560 default y 561 help 562 This allows the user to add tracing events on top of userspace 563 dynamic events (similar to tracepoints) on the fly via the trace 564 events interface. Those events can be inserted wherever uprobes 565 can probe, and record various registers. 566 This option is required if you plan to use perf-probe subcommand 567 of perf tools on user space applications. 568 569config BPF_EVENTS 570 depends on BPF_SYSCALL 571 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 572 bool 573 default y 574 help 575 This allows the user to attach BPF programs to kprobe, uprobe, and 576 tracepoint events. 577 578config DYNAMIC_EVENTS 579 def_bool n 580 581config PROBE_EVENTS 582 def_bool n 583 584config BPF_KPROBE_OVERRIDE 585 bool "Enable BPF programs to override a kprobed function" 586 depends on BPF_EVENTS 587 depends on FUNCTION_ERROR_INJECTION 588 default n 589 help 590 Allows BPF to override the execution of a probed function and 591 set a different return value. This is used for error injection. 592 593config FTRACE_MCOUNT_RECORD 594 def_bool y 595 depends on DYNAMIC_FTRACE 596 depends on HAVE_FTRACE_MCOUNT_RECORD 597 598config TRACING_MAP 599 bool 600 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 601 help 602 tracing_map is a special-purpose lock-free map for tracing, 603 separated out as a stand-alone facility in order to allow it 604 to be shared between multiple tracers. It isn't meant to be 605 generally used outside of that context, and is normally 606 selected by tracers that use it. 607 608config SYNTH_EVENTS 609 bool "Synthetic trace events" 610 select TRACING 611 select DYNAMIC_EVENTS 612 default n 613 help 614 Synthetic events are user-defined trace events that can be 615 used to combine data from other trace events or in fact any 616 data source. Synthetic events can be generated indirectly 617 via the trace() action of histogram triggers or directly 618 by way of an in-kernel API. 619 620 See Documentation/trace/events.rst or 621 Documentation/trace/histogram.rst for details and examples. 622 623 If in doubt, say N. 624 625config HIST_TRIGGERS 626 bool "Histogram triggers" 627 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 628 select TRACING_MAP 629 select TRACING 630 select DYNAMIC_EVENTS 631 select SYNTH_EVENTS 632 default n 633 help 634 Hist triggers allow one or more arbitrary trace event fields 635 to be aggregated into hash tables and dumped to stdout by 636 reading a debugfs/tracefs file. They're useful for 637 gathering quick and dirty (though precise) summaries of 638 event activity as an initial guide for further investigation 639 using more advanced tools. 640 641 Inter-event tracing of quantities such as latencies is also 642 supported using hist triggers under this option. 643 644 See Documentation/trace/histogram.rst. 645 If in doubt, say N. 646 647config TRACE_EVENT_INJECT 648 bool "Trace event injection" 649 depends on TRACING 650 help 651 Allow user-space to inject a specific trace event into the ring 652 buffer. This is mainly used for testing purpose. 653 654 If unsure, say N. 655 656config TRACEPOINT_BENCHMARK 657 bool "Add tracepoint that benchmarks tracepoints" 658 help 659 This option creates the tracepoint "benchmark:benchmark_event". 660 When the tracepoint is enabled, it kicks off a kernel thread that 661 goes into an infinite loop (calling cond_sched() to let other tasks 662 run), and calls the tracepoint. Each iteration will record the time 663 it took to write to the tracepoint and the next iteration that 664 data will be passed to the tracepoint itself. That is, the tracepoint 665 will report the time it took to do the previous tracepoint. 666 The string written to the tracepoint is a static string of 128 bytes 667 to keep the time the same. The initial string is simply a write of 668 "START". The second string records the cold cache time of the first 669 write which is not added to the rest of the calculations. 670 671 As it is a tight loop, it benchmarks as hot cache. That's fine because 672 we care most about hot paths that are probably in cache already. 673 674 An example of the output: 675 676 START 677 first=3672 [COLD CACHED] 678 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 679 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 680 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 681 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 682 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 683 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 684 685 686config RING_BUFFER_BENCHMARK 687 tristate "Ring buffer benchmark stress tester" 688 depends on RING_BUFFER 689 help 690 This option creates a test to stress the ring buffer and benchmark it. 691 It creates its own ring buffer such that it will not interfere with 692 any other users of the ring buffer (such as ftrace). It then creates 693 a producer and consumer that will run for 10 seconds and sleep for 694 10 seconds. Each interval it will print out the number of events 695 it recorded and give a rough estimate of how long each iteration took. 696 697 It does not disable interrupts or raise its priority, so it may be 698 affected by processes that are running. 699 700 If unsure, say N. 701 702config TRACE_EVAL_MAP_FILE 703 bool "Show eval mappings for trace events" 704 depends on TRACING 705 help 706 The "print fmt" of the trace events will show the enum/sizeof names 707 instead of their values. This can cause problems for user space tools 708 that use this string to parse the raw data as user space does not know 709 how to convert the string to its value. 710 711 To fix this, there's a special macro in the kernel that can be used 712 to convert an enum/sizeof into its value. If this macro is used, then 713 the print fmt strings will be converted to their values. 714 715 If something does not get converted properly, this option can be 716 used to show what enums/sizeof the kernel tried to convert. 717 718 This option is for debugging the conversions. A file is created 719 in the tracing directory called "eval_map" that will show the 720 names matched with their values and what trace event system they 721 belong too. 722 723 Normally, the mapping of the strings to values will be freed after 724 boot up or module load. With this option, they will not be freed, as 725 they are needed for the "eval_map" file. Enabling this option will 726 increase the memory footprint of the running kernel. 727 728 If unsure, say N. 729 730config GCOV_PROFILE_FTRACE 731 bool "Enable GCOV profiling on ftrace subsystem" 732 depends on GCOV_KERNEL 733 help 734 Enable GCOV profiling on ftrace subsystem for checking 735 which functions/lines are tested. 736 737 If unsure, say N. 738 739 Note that on a kernel compiled with this config, ftrace will 740 run significantly slower. 741 742config FTRACE_SELFTEST 743 bool 744 745config FTRACE_STARTUP_TEST 746 bool "Perform a startup test on ftrace" 747 depends on GENERIC_TRACER 748 select FTRACE_SELFTEST 749 help 750 This option performs a series of startup tests on ftrace. On bootup 751 a series of tests are made to verify that the tracer is 752 functioning properly. It will do tests on all the configured 753 tracers of ftrace. 754 755config EVENT_TRACE_STARTUP_TEST 756 bool "Run selftest on trace events" 757 depends on FTRACE_STARTUP_TEST 758 default y 759 help 760 This option performs a test on all trace events in the system. 761 It basically just enables each event and runs some code that 762 will trigger events (not necessarily the event it enables) 763 This may take some time run as there are a lot of events. 764 765config EVENT_TRACE_TEST_SYSCALLS 766 bool "Run selftest on syscall events" 767 depends on EVENT_TRACE_STARTUP_TEST 768 help 769 This option will also enable testing every syscall event. 770 It only enables the event and disables it and runs various loads 771 with the event enabled. This adds a bit more time for kernel boot 772 up since it runs this on every system call defined. 773 774 TBD - enable a way to actually call the syscalls as we test their 775 events 776 777config RING_BUFFER_STARTUP_TEST 778 bool "Ring buffer startup self test" 779 depends on RING_BUFFER 780 help 781 Run a simple self test on the ring buffer on boot up. Late in the 782 kernel boot sequence, the test will start that kicks off 783 a thread per cpu. Each thread will write various size events 784 into the ring buffer. Another thread is created to send IPIs 785 to each of the threads, where the IPI handler will also write 786 to the ring buffer, to test/stress the nesting ability. 787 If any anomalies are discovered, a warning will be displayed 788 and all ring buffers will be disabled. 789 790 The test runs for 10 seconds. This will slow your boot time 791 by at least 10 more seconds. 792 793 At the end of the test, statics and more checks are done. 794 It will output the stats of each per cpu buffer. What 795 was written, the sizes, what was read, what was lost, and 796 other similar details. 797 798 If unsure, say N 799 800config MMIOTRACE_TEST 801 tristate "Test module for mmiotrace" 802 depends on MMIOTRACE && m 803 help 804 This is a dumb module for testing mmiotrace. It is very dangerous 805 as it will write garbage to IO memory starting at a given address. 806 However, it should be safe to use on e.g. unused portion of VRAM. 807 808 Say N, unless you absolutely know what you are doing. 809 810config PREEMPTIRQ_DELAY_TEST 811 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 812 depends on m 813 help 814 Select this option to build a test module that can help test latency 815 tracers by executing a preempt or irq disable section with a user 816 configurable delay. The module busy waits for the duration of the 817 critical section. 818 819 For example, the following invocation generates a burst of three 820 irq-disabled critical sections for 500us: 821 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 822 823 If unsure, say N 824 825config SYNTH_EVENT_GEN_TEST 826 tristate "Test module for in-kernel synthetic event generation" 827 depends on SYNTH_EVENTS 828 help 829 This option creates a test module to check the base 830 functionality of in-kernel synthetic event definition and 831 generation. 832 833 To test, insert the module, and then check the trace buffer 834 for the generated sample events. 835 836 If unsure, say N. 837 838config KPROBE_EVENT_GEN_TEST 839 tristate "Test module for in-kernel kprobe event generation" 840 depends on KPROBE_EVENTS 841 help 842 This option creates a test module to check the base 843 functionality of in-kernel kprobe event definition. 844 845 To test, insert the module, and then check the trace buffer 846 for the generated kprobe events. 847 848 If unsure, say N. 849 850config HIST_TRIGGERS_DEBUG 851 bool "Hist trigger debug support" 852 depends on HIST_TRIGGERS 853 help 854 Add "hist_debug" file for each event, which when read will 855 dump out a bunch of internal details about the hist triggers 856 defined on that event. 857 858 The hist_debug file serves a couple of purposes: 859 860 - Helps developers verify that nothing is broken. 861 862 - Provides educational information to support the details 863 of the hist trigger internals as described by 864 Documentation/trace/histogram-design.rst. 865 866 The hist_debug output only covers the data structures 867 related to the histogram definitions themselves and doesn't 868 display the internals of map buckets or variable values of 869 running histograms. 870 871 If unsure, say N. 872 873endif # FTRACE 874 875endif # TRACING_SUPPORT 876 877