Searched refs:call_site (Results 1 – 6 of 6) sorted by relevance
/Linux-v4.19/include/trace/events/ |
D | kmem.h | 14 TP_PROTO(unsigned long call_site, 20 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), 23 __field( unsigned long, call_site ) 31 __entry->call_site = call_site; 39 __entry->call_site, 48 TP_PROTO(unsigned long call_site, const void *ptr, 51 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 56 TP_PROTO(unsigned long call_site, const void *ptr, 59 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 64 TP_PROTO(unsigned long call_site, [all …]
|
/Linux-v4.19/Documentation/trace/ |
D | histogram.rst | 233 field:unsigned long call_site; offset:8; size:8; signed:0; 243 # echo 'hist:key=call_site:val=bytes_req' > \ 247 call_site field of the kmalloc event as the key for the table, which 248 just means that each unique call_site address will have an entry 250 the hist trigger that for each unique entry (call_site) in the 252 requested by that call_site. 259 # trigger info: hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active] 261 { call_site: 18446744072106379007 } hitcount: 1 bytes_req: 176 262 { call_site: 18446744071579557049 } hitcount: 1 bytes_req: 1024 263 { call_site: 18446744071580608289 } hitcount: 1 bytes_req: 16384 [all …]
|
D | events-kmem.rst | 21 kmalloc call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s 22 kmalloc_node call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d 23 kfree call_site=%lx ptr=%p 36 kmem_cache_alloc call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s 37 kmem_cache_alloc_node call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d 38 kmem_cache_free call_site=%lx ptr=%p 43 but the call_site can usually be used to extrapolate that information.
|
/Linux-v4.19/tools/perf/ |
D | builtin-kmem.c | 55 u64 call_site; member 80 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, in insert_alloc_stat() argument 118 data->call_site = call_site; in insert_alloc_stat() 125 static int insert_caller_stat(unsigned long call_site, in insert_caller_stat() argument 136 if (call_site > data->call_site) in insert_caller_stat() 138 else if (call_site < data->call_site) in insert_caller_stat() 144 if (data && data->call_site == call_site) { in insert_caller_stat() 154 data->call_site = call_site; in insert_caller_stat() 171 call_site = perf_evsel__intval(evsel, sample, "call_site"); in perf_evsel__process_alloc_event() local 175 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || in perf_evsel__process_alloc_event() [all …]
|
/Linux-v4.19/tools/perf/scripts/python/ |
D | check-perf-trace.py | 41 common_callchain, call_site, ptr, bytes_req, bytes_alloc, argument 50 (call_site, ptr, bytes_req, bytes_alloc,
|
/Linux-v4.19/tools/perf/scripts/perl/ |
D | check-perf-trace.pl | 47 $call_site, $ptr, $bytes_req, $bytes_alloc, 57 $call_site, $ptr, $bytes_req, $bytes_alloc,
|