Lines Matching +full:min +full:- +full:sample +full:- +full:time

1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/build-id.h"
16 #include <subcmd/parse-options.h>
17 #include "util/trace-event.h"
21 #include "util/synthetic-events.h"
24 #include "util/ordered-events.h"
25 #include "util/kvm-stat.h"
32 #include <sys/time.h>
68 struct perf_sample *sample, in exit_event_get_key() argument
71 key->info = 0; in exit_event_get_key()
72 key->key = evsel__intval(evsel, sample, kvm_exit_reason); in exit_event_get_key()
77 return !strcmp(evsel->name, kvm_exit_trace); in kvm_exit_event()
81 struct perf_sample *sample, struct event_key *key) in exit_event_begin() argument
84 exit_event_get_key(evsel, sample, key); in exit_event_begin()
93 return !strcmp(evsel->name, kvm_entry_trace); in kvm_entry_event()
97 struct perf_sample *sample __maybe_unused, in exit_event_end()
107 while (tbl->reason != NULL) { in get_exit_reason()
108 if (tbl->exit_code == exit_code) in get_exit_reason()
109 return tbl->reason; in get_exit_reason()
114 (unsigned long long)exit_code, kvm->exit_reasons_isa); in get_exit_reason()
122 const char *exit_reason = get_exit_reason(kvm, key->exit_reasons, in exit_event_decode_key()
123 key->key); in exit_event_decode_key()
132 for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) { in register_kvm_events_ops()
133 if (!strcmp(events_ops->name, kvm->report_event)) { in register_kvm_events_ops()
134 kvm->events_ops = events_ops->ops; in register_kvm_events_ops()
154 INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); in init_kvm_event_record()
169 event->total.time = 0; in clear_events_cache_stats()
170 init_stats(&event->total.stats); in clear_events_cache_stats()
172 for (j = 0; j < event->max_vcpu; ++j) { in clear_events_cache_stats()
173 event->vcpu[j].time = 0; in clear_events_cache_stats()
174 init_stats(&event->vcpu[j].stats); in clear_events_cache_stats()
183 return key & (EVENTS_CACHE_SIZE - 1); in kvm_events_hash_fn()
188 int old_max_vcpu = event->max_vcpu; in kvm_event_expand()
191 if (vcpu_id < event->max_vcpu) in kvm_event_expand()
194 while (event->max_vcpu <= vcpu_id) in kvm_event_expand()
195 event->max_vcpu += DEFAULT_VCPU_NUM; in kvm_event_expand()
197 prev = event->vcpu; in kvm_event_expand()
198 event->vcpu = realloc(event->vcpu, in kvm_event_expand()
199 event->max_vcpu * sizeof(*event->vcpu)); in kvm_event_expand()
200 if (!event->vcpu) { in kvm_event_expand()
206 memset(event->vcpu + old_max_vcpu, 0, in kvm_event_expand()
207 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); in kvm_event_expand()
221 event->key = *key; in kvm_alloc_init_event()
222 init_stats(&event->total.stats); in kvm_alloc_init_event()
232 BUG_ON(key->key == INVALID_KEY); in find_create_kvm_event()
234 head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; in find_create_kvm_event()
236 if (event->key.key == key->key && event->key.info == key->info) in find_create_kvm_event()
244 list_add(&event->hash_entry, head); in find_create_kvm_event()
254 if (key->key != INVALID_KEY) in handle_begin_event()
257 vcpu_record->last_event = event; in handle_begin_event()
258 vcpu_record->start_time = timestamp; in handle_begin_event()
265 kvm_stats->time += time_diff; in kvm_update_event_stats()
266 update_stats(&kvm_stats->stats, time_diff); in kvm_update_event_stats()
271 struct kvm_event_stats *kvm_stats = &event->total; in kvm_event_rel_stddev()
273 if (vcpu_id != -1) in kvm_event_rel_stddev()
274 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev()
276 return rel_stddev_stats(stddev_stats(&kvm_stats->stats), in kvm_event_rel_stddev()
277 avg_stats(&kvm_stats->stats)); in kvm_event_rel_stddev()
283 if (vcpu_id == -1) { in update_kvm_event()
284 kvm_update_event_stats(&event->total, time_diff); in update_kvm_event()
291 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); in update_kvm_event()
297 struct perf_sample *sample, in is_child_event() argument
302 child_ops = kvm->events_ops->child_ops; in is_child_event()
307 for (; child_ops->name; child_ops++) { in is_child_event()
308 if (!strcmp(evsel->name, child_ops->name)) { in is_child_event()
309 child_ops->get_key(evsel, sample, key); in is_child_event()
320 struct perf_sample *sample __maybe_unused) in handle_child_event()
324 if (key->key != INVALID_KEY) in handle_child_event()
327 vcpu_record->last_event = event; in handle_child_event()
346 struct perf_sample *sample) in handle_end_event() argument
352 if (kvm->trace_vcpu == -1) in handle_end_event()
353 vcpu = -1; in handle_end_event()
355 vcpu = vcpu_record->vcpu_id; in handle_end_event()
357 event = vcpu_record->last_event; in handle_end_event()
358 time_begin = vcpu_record->start_time; in handle_end_event()
366 * the actual event is recognized in the 'end event' (e.g. mmio-event). in handle_end_event()
370 if (!event && key->key == INVALID_KEY) in handle_end_event()
379 vcpu_record->last_event = NULL; in handle_end_event()
380 vcpu_record->start_time = 0; in handle_end_event()
383 if (sample->time < time_begin) { in handle_end_event()
384 pr_debug("End time before begin time; skipping event.\n"); in handle_end_event()
388 time_diff = sample->time - time_begin; in handle_end_event()
390 if (kvm->duration && time_diff > kvm->duration) { in handle_end_event()
393 kvm->events_ops->decode_key(kvm, &event->key, decode); in handle_end_event()
396 sample->time, sample->pid, vcpu_record->vcpu_id, in handle_end_event()
407 struct perf_sample *sample) in per_vcpu_record() argument
419 vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str); in per_vcpu_record()
429 struct perf_sample *sample) in handle_kvm_event() argument
433 .exit_reasons = kvm->exit_reasons }; in handle_kvm_event()
435 vcpu_record = per_vcpu_record(thread, evsel, sample); in handle_kvm_event()
440 if ((kvm->trace_vcpu != -1) && in handle_kvm_event()
441 (kvm->trace_vcpu != vcpu_record->vcpu_id)) in handle_kvm_event()
444 if (kvm->events_ops->is_begin_event(evsel, sample, &key)) in handle_kvm_event()
445 return handle_begin_event(kvm, vcpu_record, &key, sample->time); in handle_kvm_event()
447 if (is_child_event(kvm, evsel, sample, &key)) in handle_kvm_event()
448 return handle_child_event(kvm, vcpu_record, &key, sample); in handle_kvm_event()
450 if (kvm->events_ops->is_end_event(evsel, sample, &key)) in handle_kvm_event()
451 return handle_end_event(kvm, vcpu_record, &key, sample); in handle_kvm_event()
459 if (vcpu == -1) \
460 return event->total.field; \
462 if (vcpu >= event->max_vcpu) \
465 return event->vcpu[vcpu].field; \
477 GET_EVENT_KEY(time, time);
481 GET_EVENT_KEY(min, stats.min);
487 DEF_SORT_NAME_KEY(sample, count),
488 DEF_SORT_NAME_KEY(time, mean),
497 if (!strcmp(keys[i].name, kvm->sort_key)) { in select_key()
498 kvm->compare = keys[i].key; in select_key()
503 pr_err("Unknown compare key:%s\n", kvm->sort_key); in select_key()
510 struct rb_node **rb = &result->rb_node; in insert_to_result()
519 rb = &(*rb)->rb_left; in insert_to_result()
521 rb = &(*rb)->rb_right; in insert_to_result()
524 rb_link_node(&event->rb, parent, rb); in insert_to_result()
525 rb_insert_color(&event->rb, result); in insert_to_result()
531 int vcpu = kvm->trace_vcpu; in update_total_count()
533 kvm->total_count += get_event_count(event, vcpu); in update_total_count()
534 kvm->total_time += get_event_time(event, vcpu); in update_total_count()
545 int vcpu = kvm->trace_vcpu; in sort_result()
549 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { in sort_result()
552 insert_to_result(&kvm->result, event, in sort_result()
553 kvm->compare, vcpu); in sort_result()
573 int vcpu = kvm->trace_vcpu; in print_vcpu_info()
577 if (kvm->opts.target.system_wide) in print_vcpu_info()
579 else if (kvm->opts.target.pid) in print_vcpu_info()
580 pr_info("pid(s) %s, ", kvm->opts.target.pid); in print_vcpu_info()
584 if (vcpu == -1) in print_vcpu_info()
610 int vcpu = kvm->trace_vcpu; in print_result()
612 if (kvm->live) { in print_result()
619 pr_info("%*s ", decode_str_len, kvm->events_ops->name); in print_result()
623 pr_info("%9s ", "Time%"); in print_result()
624 pr_info("%11s ", "Min Time"); in print_result()
625 pr_info("%11s ", "Max Time"); in print_result()
626 pr_info("%16s ", "Avg time"); in print_result()
629 while ((event = pop_from_result(&kvm->result))) { in print_result()
630 u64 ecount, etime, max, min; in print_result() local
635 min = get_event_min(event, vcpu); in print_result()
637 kvm->events_ops->decode_key(kvm, &event->key, decode); in print_result()
640 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100); in print_result()
641 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100); in print_result()
642 pr_info("%9.2fus ", (double)min / NSEC_PER_USEC); in print_result()
644 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC, in print_result()
649 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", in print_result()
650 kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC); in print_result()
652 if (kvm->lost_events) in print_result()
653 pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events); in print_result()
659 struct perf_sample *sample __maybe_unused, in process_lost_event()
664 kvm->lost_events++; in process_lost_event()
670 struct perf_sample *sample) in skip_sample() argument
672 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL) in skip_sample()
680 struct perf_sample *sample, in process_sample_event() argument
689 if (skip_sample(kvm, sample)) in process_sample_event()
692 thread = machine__findnew_thread(machine, sample->pid, sample->tid); in process_sample_event()
695 event->header.type); in process_sample_event()
696 return -1; in process_sample_event()
699 if (!handle_kvm_event(kvm, thread, evsel, sample)) in process_sample_event()
700 err = -1; in process_sample_event()
711 if (kvm->live) { in cpu_isa_config()
716 return -err; in cpu_isa_config()
720 cpuid = kvm->session->header.env.cpuid; in cpu_isa_config()
724 return -EINVAL; in cpu_isa_config()
728 if (err == -ENOTSUP) in cpu_isa_config()
736 if (vcpu != -1 && vcpu < 0) { in verify_vcpu()
753 struct evlist *evlist = kvm->evlist; in perf_kvm__mmap_read_idx()
761 md = &evlist->mmap[idx]; in perf_kvm__mmap_read_idx()
762 err = perf_mmap__read_init(&md->core); in perf_kvm__mmap_read_idx()
764 return (err == -EAGAIN) ? 0 : -1; in perf_kvm__mmap_read_idx()
766 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_kvm__mmap_read_idx()
769 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx()
770 pr_err("Failed to parse sample\n"); in perf_kvm__mmap_read_idx()
771 return -1; in perf_kvm__mmap_read_idx()
774 err = perf_session__queue_event(kvm->session, event, timestamp, 0); in perf_kvm__mmap_read_idx()
779 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx()
782 pr_err("Failed to enqueue sample: %d\n", err); in perf_kvm__mmap_read_idx()
783 return -1; in perf_kvm__mmap_read_idx()
786 /* save time stamp of our first sample for this mmap */ in perf_kvm__mmap_read_idx()
796 perf_mmap__read_done(&md->core); in perf_kvm__mmap_read_idx()
806 for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) { in perf_kvm__mmap_read()
809 return -1; in perf_kvm__mmap_read()
811 /* flush time is going to be the minimum of all the individual in perf_kvm__mmap_read()
813 * from the last pass under our minimal start time -- that leaves in perf_kvm__mmap_read()
828 struct ordered_events *oe = &kvm->session->ordered_events; in perf_kvm__mmap_read()
830 oe->next_flush = flush_time; in perf_kvm__mmap_read()
833 if (kvm->lost_events) in perf_kvm__mmap_read()
835 kvm->lost_events); in perf_kvm__mmap_read()
853 int rc = -1; in perf_kvm__timerfd_create()
855 kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); in perf_kvm__timerfd_create()
856 if (kvm->timerfd < 0) { in perf_kvm__timerfd_create()
861 new_value.it_value.tv_sec = kvm->display_time; in perf_kvm__timerfd_create()
863 new_value.it_interval.tv_sec = kvm->display_time; in perf_kvm__timerfd_create()
866 if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) { in perf_kvm__timerfd_create()
868 close(kvm->timerfd); in perf_kvm__timerfd_create()
882 rc = read(kvm->timerfd, &c, sizeof(uint64_t)); in perf_kvm__handle_timerfd()
888 return -1; in perf_kvm__handle_timerfd()
892 pr_err("Error reading timer fd - invalid size returned\n"); in perf_kvm__handle_timerfd()
893 return -1; in perf_kvm__handle_timerfd()
897 pr_debug("Missed timer beats: %" PRIu64 "\n", c-1); in perf_kvm__handle_timerfd()
904 clear_events_cache_stats(kvm->kvm_events_cache); in perf_kvm__handle_timerfd()
905 kvm->total_count = 0; in perf_kvm__handle_timerfd()
906 kvm->total_time = 0; in perf_kvm__handle_timerfd()
907 kvm->lost_events = 0; in perf_kvm__handle_timerfd()
919 return -1; in fd_set_nonblock()
923 pr_err("Failed to set non-block option on fd %d\n", fd); in fd_set_nonblock()
924 return -1; in fd_set_nonblock()
943 int nr_stdin, ret, err = -EINVAL; in kvm_events_live_report()
947 kvm->live = true; in kvm_events_live_report()
953 if (!verify_vcpu(kvm->trace_vcpu) || in kvm_events_live_report()
967 err = -1; in kvm_events_live_report()
971 if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0) in kvm_events_live_report()
974 nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin)); in kvm_events_live_report()
981 /* everything is good - enable the events and process */ in kvm_events_live_report()
982 evlist__enable(kvm->evlist); in kvm_events_live_report()
985 struct fdarray *fda = &kvm->evlist->core.pollfd; in kvm_events_live_report()
996 if (fda->entries[nr_stdin].revents & POLLIN) in kvm_events_live_report()
1000 err = evlist__poll(kvm->evlist, 100); in kvm_events_live_report()
1003 evlist__disable(kvm->evlist); in kvm_events_live_report()
1011 if (kvm->timerfd >= 0) in kvm_events_live_report()
1012 close(kvm->timerfd); in kvm_events_live_report()
1020 int err, rc = -1; in kvm_live_open_events()
1022 struct evlist *evlist = kvm->evlist; in kvm_live_open_events()
1025 perf_evlist__config(evlist, &kvm->opts, NULL); in kvm_live_open_events()
1032 struct perf_event_attr *attr = &pos->core.attr; in kvm_live_open_events()
1036 evsel__set_sample_bit(pos, TIME); in kvm_live_open_events()
1039 /* make sure these are *not*; want as small a sample as possible */ in kvm_live_open_events()
1045 attr->mmap = 0; in kvm_live_open_events()
1046 attr->comm = 0; in kvm_live_open_events()
1047 attr->task = 0; in kvm_live_open_events()
1049 attr->sample_period = 1; in kvm_live_open_events()
1051 attr->watermark = 0; in kvm_live_open_events()
1052 attr->wakeup_events = 1000; in kvm_live_open_events()
1055 attr->disabled = 1; in kvm_live_open_events()
1065 if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) { in kvm_live_open_events()
1084 .sample = process_sample_event, in read_events()
1090 .path = kvm->file_name, in read_events()
1092 .force = kvm->force, in read_events()
1095 kvm->tool = eops; in read_events()
1096 kvm->session = perf_session__new(&file, false, &kvm->tool); in read_events()
1097 if (IS_ERR(kvm->session)) { in read_events()
1099 return PTR_ERR(kvm->session); in read_events()
1102 symbol__init(&kvm->session->header.env); in read_events()
1104 if (!perf_session__has_traces(kvm->session, "kvm record")) { in read_events()
1105 ret = -EINVAL; in read_events()
1117 ret = perf_session__process_events(kvm->session); in read_events()
1120 perf_session__delete(kvm->session); in read_events()
1126 if (kvm->opts.target.pid) { in parse_target_str()
1127 kvm->pid_list = intlist__new(kvm->opts.target.pid); in parse_target_str()
1128 if (kvm->pid_list == NULL) { in parse_target_str()
1130 return -EINVAL; in parse_target_str()
1139 int ret = -EINVAL; in kvm_events_report_vcpu()
1140 int vcpu = kvm->trace_vcpu; in kvm_events_report_vcpu()
1172 return -ENOMEM; \
1188 "-R", in kvm_events_record()
1189 "-m", "1024", in kvm_events_record()
1190 "-c", "1", in kvm_events_record()
1214 return -ENOMEM; in kvm_events_record()
1220 rec_argv[i++] = "-e"; in kvm_events_record()
1224 rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); in kvm_events_record()
1225 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name); in kvm_events_record()
1232 set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN); in kvm_events_record()
1237 set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED); in kvm_events_record()
1241 set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED); in kvm_events_record()
1242 set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED); in kvm_events_record()
1243 set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED); in kvm_events_record()
1245 set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED); in kvm_events_record()
1246 set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED); in kvm_events_record()
1258 OPT_STRING(0, "event", &kvm->report_event, "report event", in kvm_events_report()
1261 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, in kvm_events_report()
1263 OPT_STRING('k', "key", &kvm->sort_key, "sort-key", in kvm_events_report()
1264 "key for sorting: sample(sort by samples number)" in kvm_events_report()
1265 " time (sort by avg time)"), in kvm_events_report()
1266 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", in kvm_events_report()
1268 OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"), in kvm_events_report()
1286 if (!kvm->opts.target.pid) in kvm_events_report()
1287 kvm->opts.target.system_wide = true; in kvm_events_report()
1297 int err = -1; in kvm_live_event_list()
1349 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", in kvm_events_live()
1351 OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages", in kvm_events_live()
1356 OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide, in kvm_events_live()
1357 "system-wide collection from all CPUs"), in kvm_events_live()
1358 OPT_UINTEGER('d', "display", &kvm->display_time, in kvm_events_live()
1359 "time in seconds between display updates"), in kvm_events_live()
1360 OPT_STRING(0, "event", &kvm->report_event, "report event", in kvm_events_live()
1363 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, in kvm_events_live()
1365 OPT_STRING('k', "key", &kvm->sort_key, "sort-key", in kvm_events_live()
1366 "key for sorting: sample(sort by samples number)" in kvm_events_live()
1367 " time (sort by avg time)"), in kvm_events_live()
1368 OPT_U64(0, "duration", &kvm->duration, in kvm_events_live()
1372 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, in kvm_events_live()
1386 kvm->tool.sample = process_sample_event; in kvm_events_live()
1387 kvm->tool.comm = perf_event__process_comm; in kvm_events_live()
1388 kvm->tool.exit = perf_event__process_exit; in kvm_events_live()
1389 kvm->tool.fork = perf_event__process_fork; in kvm_events_live()
1390 kvm->tool.lost = process_lost_event; in kvm_events_live()
1391 kvm->tool.namespaces = perf_event__process_namespaces; in kvm_events_live()
1392 kvm->tool.ordered_events = true; in kvm_events_live()
1393 perf_tool__fill_defaults(&kvm->tool); in kvm_events_live()
1396 kvm->display_time = 1; in kvm_events_live()
1397 kvm->opts.user_interval = 1; in kvm_events_live()
1398 kvm->opts.mmap_pages = 512; in kvm_events_live()
1399 kvm->opts.target.uses_mmap = false; in kvm_events_live()
1400 kvm->opts.target.uid_str = NULL; in kvm_events_live()
1401 kvm->opts.target.uid = UINT_MAX; in kvm_events_live()
1415 kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */ in kvm_events_live()
1420 err = target__validate(&kvm->opts.target); in kvm_events_live()
1422 target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); in kvm_events_live()
1426 if (target__none(&kvm->opts.target)) in kvm_events_live()
1427 kvm->opts.target.system_wide = true; in kvm_events_live()
1439 kvm->evlist = kvm_live_event_list(); in kvm_events_live()
1440 if (kvm->evlist == NULL) { in kvm_events_live()
1441 err = -1; in kvm_events_live()
1445 if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0) in kvm_events_live()
1451 kvm->session = perf_session__new(&data, false, &kvm->tool); in kvm_events_live()
1452 if (IS_ERR(kvm->session)) { in kvm_events_live()
1453 err = PTR_ERR(kvm->session); in kvm_events_live()
1456 kvm->session->evlist = kvm->evlist; in kvm_events_live()
1457 perf_session__set_id_hdr_size(kvm->session); in kvm_events_live()
1458 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true); in kvm_events_live()
1459 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, in kvm_events_live()
1460 kvm->evlist->core.threads, false, 1); in kvm_events_live()
1468 perf_session__delete(kvm->session); in kvm_events_live()
1469 kvm->session = NULL; in kvm_events_live()
1470 evlist__delete(kvm->evlist); in kvm_events_live()
1493 .trace_vcpu = -1, in kvm_cmd_stat()
1495 .sort_key = "sample", in kvm_cmd_stat()
1505 return kvm_events_record(&kvm, argc - 1, argv + 1); in kvm_cmd_stat()
1508 return kvm_events_report(&kvm, argc - 1 , argv + 1); in kvm_cmd_stat()
1512 return kvm_events_live(&kvm, argc - 1 , argv + 1); in kvm_cmd_stat()
1533 return -EINVAL; in __cmd_record()
1538 rec_argv[i++] = strdup("-o"); in __cmd_record()
1556 rec_argv[i++] = strdup("-i"); in __cmd_report()
1574 rec_argv[i++] = strdup("buildid-list"); in __cmd_buildid_list()
1575 rec_argv[i++] = strdup("-i"); in __cmd_buildid_list()
1612 "buildid-list", "stat", NULL }; in cmd_kvm()
1631 return -ENOMEM; in cmd_kvm()
1643 else if (!strncmp(argv[0], "buildid-list", 12)) in cmd_kvm()