/Linux-v4.19/tools/perf/arch/x86/util/ |
D | kvm-stat.c | 32 key->key = perf_evsel__intval(evsel, sample, "gpa"); in mmio_event_get_key() 33 key->info = perf_evsel__intval(evsel, sample, "type"); in mmio_event_get_key() 49 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { in mmio_event_begin() 66 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { in mmio_event_end() 95 key->key = perf_evsel__intval(evsel, sample, "port"); in ioport_event_get_key() 96 key->info = perf_evsel__intval(evsel, sample, "rw"); in ioport_event_get_key()
|
/Linux-v4.19/tools/perf/arch/s390/util/ |
D | kvm-stat.c | 34 insn = perf_evsel__intval(evsel, sample, "instruction"); in event_icpt_insn_get_key() 43 key->key = perf_evsel__intval(evsel, sample, "order_code"); in event_sigp_get_key() 51 key->key = perf_evsel__intval(evsel, sample, "code"); in event_diag_get_key() 59 key->key = perf_evsel__intval(evsel, sample, "code"); in event_icpt_prog_get_key()
|
/Linux-v4.19/tools/perf/ |
D | builtin-timechart.c | 584 u32 state = perf_evsel__intval(evsel, sample, "state"); in process_sample_cpu_idle() 585 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); in process_sample_cpu_idle() 600 u32 state = perf_evsel__intval(evsel, sample, "state"); in process_sample_cpu_frequency() 601 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); in process_sample_cpu_frequency() 613 u8 flags = perf_evsel__intval(evsel, sample, "common_flags"); in process_sample_sched_wakeup() 614 int waker = perf_evsel__intval(evsel, sample, "common_pid"); in process_sample_sched_wakeup() 615 int wakee = perf_evsel__intval(evsel, sample, "pid"); in process_sample_sched_wakeup() 627 int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"); in process_sample_sched_switch() 628 int next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in process_sample_sched_switch() 629 u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in process_sample_sched_switch() [all …]
|
D | builtin-kmem.c | 170 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), in perf_evsel__process_alloc_event() 171 call_site = perf_evsel__intval(evsel, sample, "call_site"); in perf_evsel__process_alloc_event() 172 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), in perf_evsel__process_alloc_event() 173 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); in perf_evsel__process_alloc_event() 193 node2 = perf_evsel__intval(evsel, sample, "node"); in perf_evsel__process_alloc_node_event() 233 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); in perf_evsel__process_free_event() 784 unsigned int order = perf_evsel__intval(evsel, sample, "order"); in perf_evsel__process_page_alloc_event() 785 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); in perf_evsel__process_page_alloc_event() 786 unsigned int migrate_type = perf_evsel__intval(evsel, sample, in perf_evsel__process_page_alloc_event() 798 page = perf_evsel__intval(evsel, sample, "pfn"); in perf_evsel__process_page_alloc_event() [all …]
|
D | builtin-sched.c | 805 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in replay_wakeup_event() 828 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), in replay_switch_event() 829 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in replay_switch_event() 830 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in replay_switch_event() 1096 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), in latency_switch_event() 1097 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in latency_switch_event() 1098 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in latency_switch_event() 1166 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in latency_runtime_event() 1167 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); in latency_runtime_event() 1201 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in latency_wakeup_event() [all …]
|
D | builtin-lock.c | 407 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_acquire_event() 408 int flag = perf_evsel__intval(evsel, sample, "flag"); in report_lock_acquire_event() 480 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_acquired_event() 542 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_contended_event() 597 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_release_event()
|
D | builtin-kvm.c | 65 key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason); in exit_event_get_key() 412 vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, in per_vcpu_record()
|
D | builtin-inject.c | 522 u32 pid = perf_evsel__intval(evsel, sample, "pid"); in perf_inject__sched_stat()
|
D | builtin-trace.c | 1964 u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); in trace__sched_stat_runtime() 1984 (pid_t)perf_evsel__intval(evsel, sample, "pid"), in trace__sched_stat_runtime() 1986 perf_evsel__intval(evsel, sample, "vruntime")); in trace__sched_stat_runtime()
|
/Linux-v4.19/tools/perf/tests/ |
D | openat-syscall-tp-fields.c | 113 tp_flags = perf_evsel__intval(evsel, &sample, "flags"); in test__syscall_openat_tp_fields()
|
D | switch-tracking.c | 131 next_tid = perf_evsel__intval(evsel, &sample, "next_pid"); in process_sample_event() 132 prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid"); in process_sample_event()
|
/Linux-v4.19/tools/perf/arch/powerpc/util/ |
D | kvm-stat.c | 38 key->key = perf_evsel__intval(evsel, sample, "req"); in hcall_event_get_key()
|
/Linux-v4.19/tools/perf/util/ |
D | evsel.h | 289 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
D | intel-pt.c | 1873 tid = perf_evsel__intval(evsel, sample, "next_pid"); in intel_pt_process_switch()
|
D | evsel.c | 2751 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__intval() function
|