1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6 /* Manage metrics and groups of metrics from JSON files */
7
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "expr.h"
18 #include "rblist.h"
19 #include <string.h>
20 #include <errno.h>
21 #include "strlist.h"
22 #include <assert.h>
23 #include <linux/ctype.h>
24 #include <linux/list_sort.h>
25 #include <linux/string.h>
26 #include <linux/zalloc.h>
27 #include <perf/cpumap.h>
28 #include <subcmd/parse-options.h>
29 #include <api/fs/fs.h>
30 #include "util.h"
31 #include <asm/bug.h>
32 #include "cgroup.h"
33 #include "util/hashmap.h"
34
metricgroup__lookup(struct rblist * metric_events,struct evsel * evsel,bool create)35 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
36 struct evsel *evsel,
37 bool create)
38 {
39 struct rb_node *nd;
40 struct metric_event me = {
41 .evsel = evsel
42 };
43
44 if (!metric_events)
45 return NULL;
46
47 nd = rblist__find(metric_events, &me);
48 if (nd)
49 return container_of(nd, struct metric_event, nd);
50 if (create) {
51 rblist__add_node(metric_events, &me);
52 nd = rblist__find(metric_events, &me);
53 if (nd)
54 return container_of(nd, struct metric_event, nd);
55 }
56 return NULL;
57 }
58
metric_event_cmp(struct rb_node * rb_node,const void * entry)59 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
60 {
61 struct metric_event *a = container_of(rb_node,
62 struct metric_event,
63 nd);
64 const struct metric_event *b = entry;
65
66 if (a->evsel == b->evsel)
67 return 0;
68 if ((char *)a->evsel < (char *)b->evsel)
69 return -1;
70 return +1;
71 }
72
metric_event_new(struct rblist * rblist __maybe_unused,const void * entry)73 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
74 const void *entry)
75 {
76 struct metric_event *me = malloc(sizeof(struct metric_event));
77
78 if (!me)
79 return NULL;
80 memcpy(me, entry, sizeof(struct metric_event));
81 me->evsel = ((struct metric_event *)entry)->evsel;
82 me->is_default = false;
83 INIT_LIST_HEAD(&me->head);
84 return &me->nd;
85 }
86
metric_event_delete(struct rblist * rblist __maybe_unused,struct rb_node * rb_node)87 static void metric_event_delete(struct rblist *rblist __maybe_unused,
88 struct rb_node *rb_node)
89 {
90 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
91 struct metric_expr *expr, *tmp;
92
93 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
94 zfree(&expr->metric_name);
95 zfree(&expr->metric_refs);
96 zfree(&expr->metric_events);
97 free(expr);
98 }
99
100 free(me);
101 }
102
metricgroup__rblist_init(struct rblist * metric_events)103 static void metricgroup__rblist_init(struct rblist *metric_events)
104 {
105 rblist__init(metric_events);
106 metric_events->node_cmp = metric_event_cmp;
107 metric_events->node_new = metric_event_new;
108 metric_events->node_delete = metric_event_delete;
109 }
110
metricgroup__rblist_exit(struct rblist * metric_events)111 void metricgroup__rblist_exit(struct rblist *metric_events)
112 {
113 rblist__exit(metric_events);
114 }
115
116 /**
117 * The metric under construction. The data held here will be placed in a
118 * metric_expr.
119 */
120 struct metric {
121 struct list_head nd;
122 /**
123 * The expression parse context importantly holding the IDs contained
124 * within the expression.
125 */
126 struct expr_parse_ctx *pctx;
127 const char *pmu;
128 /** The name of the metric such as "IPC". */
129 const char *metric_name;
130 /** Modifier on the metric such as "u" or NULL for none. */
131 const char *modifier;
132 /** The expression to parse, for example, "instructions/cycles". */
133 const char *metric_expr;
134 /** Optional threshold expression where zero value is green, otherwise red. */
135 const char *metric_threshold;
136 /**
137 * The "ScaleUnit" that scales and adds a unit to the metric during
138 * output.
139 */
140 const char *metric_unit;
141 /**
142 * Optional name of the metric group reported
143 * if the Default metric group is being processed.
144 */
145 const char *default_metricgroup_name;
146 /** Optional null terminated array of referenced metrics. */
147 struct metric_ref *metric_refs;
148 /**
149 * Should events of the metric be grouped?
150 */
151 bool group_events;
152 /**
153 * Parsed events for the metric. Optional as events may be taken from a
154 * different metric whose group contains all the IDs necessary for this
155 * one.
156 */
157 struct evlist *evlist;
158 };
159
metric__watchdog_constraint_hint(const char * name,bool foot)160 static void metric__watchdog_constraint_hint(const char *name, bool foot)
161 {
162 static bool violate_nmi_constraint;
163
164 if (!foot) {
165 pr_warning("Not grouping metric %s's events.\n", name);
166 violate_nmi_constraint = true;
167 return;
168 }
169
170 if (!violate_nmi_constraint)
171 return;
172
173 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
174 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
175 " perf stat ...\n"
176 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
177 }
178
metric__group_events(const struct pmu_metric * pm)179 static bool metric__group_events(const struct pmu_metric *pm)
180 {
181 switch (pm->event_grouping) {
182 case MetricNoGroupEvents:
183 return false;
184 case MetricNoGroupEventsNmi:
185 if (!sysctl__nmi_watchdog_enabled())
186 return true;
187 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
188 return false;
189 case MetricNoGroupEventsSmt:
190 return !smt_on();
191 case MetricGroupEvents:
192 default:
193 return true;
194 }
195 }
196
metric__free(struct metric * m)197 static void metric__free(struct metric *m)
198 {
199 if (!m)
200 return;
201
202 zfree(&m->metric_refs);
203 expr__ctx_free(m->pctx);
204 zfree(&m->modifier);
205 evlist__delete(m->evlist);
206 free(m);
207 }
208
metric__new(const struct pmu_metric * pm,const char * modifier,bool metric_no_group,int runtime,const char * user_requested_cpu_list,bool system_wide)209 static struct metric *metric__new(const struct pmu_metric *pm,
210 const char *modifier,
211 bool metric_no_group,
212 int runtime,
213 const char *user_requested_cpu_list,
214 bool system_wide)
215 {
216 struct metric *m;
217
218 m = zalloc(sizeof(*m));
219 if (!m)
220 return NULL;
221
222 m->pctx = expr__ctx_new();
223 if (!m->pctx)
224 goto out_err;
225
226 m->pmu = pm->pmu ?: "cpu";
227 m->metric_name = pm->metric_name;
228 m->default_metricgroup_name = pm->default_metricgroup_name;
229 m->modifier = NULL;
230 if (modifier) {
231 m->modifier = strdup(modifier);
232 if (!m->modifier)
233 goto out_err;
234 }
235 m->metric_expr = pm->metric_expr;
236 m->metric_threshold = pm->metric_threshold;
237 m->metric_unit = pm->unit;
238 m->pctx->sctx.user_requested_cpu_list = NULL;
239 if (user_requested_cpu_list) {
240 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
241 if (!m->pctx->sctx.user_requested_cpu_list)
242 goto out_err;
243 }
244 m->pctx->sctx.runtime = runtime;
245 m->pctx->sctx.system_wide = system_wide;
246 m->group_events = !metric_no_group && metric__group_events(pm);
247 m->metric_refs = NULL;
248 m->evlist = NULL;
249
250 return m;
251 out_err:
252 metric__free(m);
253 return NULL;
254 }
255
contains_metric_id(struct evsel ** metric_events,int num_events,const char * metric_id)256 static bool contains_metric_id(struct evsel **metric_events, int num_events,
257 const char *metric_id)
258 {
259 int i;
260
261 for (i = 0; i < num_events; i++) {
262 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
263 return true;
264 }
265 return false;
266 }
267
268 /**
269 * setup_metric_events - Find a group of events in metric_evlist that correspond
270 * to the IDs from a parsed metric expression.
271 * @pmu: The PMU for the IDs.
272 * @ids: the metric IDs to match.
273 * @metric_evlist: the list of perf events.
274 * @out_metric_events: holds the created metric events array.
275 */
setup_metric_events(const char * pmu,struct hashmap * ids,struct evlist * metric_evlist,struct evsel *** out_metric_events)276 static int setup_metric_events(const char *pmu, struct hashmap *ids,
277 struct evlist *metric_evlist,
278 struct evsel ***out_metric_events)
279 {
280 struct evsel **metric_events;
281 const char *metric_id;
282 struct evsel *ev;
283 size_t ids_size, matched_events, i;
284 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
285
286 *out_metric_events = NULL;
287 ids_size = hashmap__size(ids);
288
289 metric_events = calloc(sizeof(void *), ids_size + 1);
290 if (!metric_events)
291 return -ENOMEM;
292
293 matched_events = 0;
294 evlist__for_each_entry(metric_evlist, ev) {
295 struct expr_id_data *val_ptr;
296
297 /* Don't match events for the wrong hybrid PMU. */
298 if (!all_pmus && ev->pmu_name && evsel__is_hybrid(ev) &&
299 strcmp(ev->pmu_name, pmu))
300 continue;
301 /*
302 * Check for duplicate events with the same name. For
303 * example, uncore_imc/cas_count_read/ will turn into 6
304 * events per socket on skylakex. Only the first such
305 * event is placed in metric_events.
306 */
307 metric_id = evsel__metric_id(ev);
308 if (contains_metric_id(metric_events, matched_events, metric_id))
309 continue;
310 /*
311 * Does this event belong to the parse context? For
312 * combined or shared groups, this metric may not care
313 * about this event.
314 */
315 if (hashmap__find(ids, metric_id, &val_ptr)) {
316 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
317 metric_events[matched_events++] = ev;
318
319 if (matched_events >= ids_size)
320 break;
321 }
322 }
323 if (matched_events < ids_size) {
324 free(metric_events);
325 return -EINVAL;
326 }
327 for (i = 0; i < ids_size; i++) {
328 ev = metric_events[i];
329 ev->collect_stat = true;
330
331 /*
332 * The metric leader points to the identically named
333 * event in metric_events.
334 */
335 ev->metric_leader = ev;
336 /*
337 * Mark two events with identical names in the same
338 * group (or globally) as being in use as uncore events
339 * may be duplicated for each pmu. Set the metric leader
340 * of such events to be the event that appears in
341 * metric_events.
342 */
343 metric_id = evsel__metric_id(ev);
344 evlist__for_each_entry_continue(metric_evlist, ev) {
345 if (!strcmp(evsel__metric_id(ev), metric_id))
346 ev->metric_leader = metric_events[i];
347 }
348 }
349 *out_metric_events = metric_events;
350 return 0;
351 }
352
match_metric(const char * n,const char * list)353 static bool match_metric(const char *n, const char *list)
354 {
355 int len;
356 char *m;
357
358 if (!list)
359 return false;
360 if (!strcmp(list, "all"))
361 return true;
362 if (!n)
363 return !strcasecmp(list, "No_group");
364 len = strlen(list);
365 m = strcasestr(n, list);
366 if (!m)
367 return false;
368 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
369 (m[len] == 0 || m[len] == ';'))
370 return true;
371 return false;
372 }
373
match_pm_metric(const struct pmu_metric * pm,const char * pmu,const char * metric)374 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
375 {
376 const char *pm_pmu = pm->pmu ?: "cpu";
377
378 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
379 return false;
380
381 return match_metric(pm->metric_group, metric) ||
382 match_metric(pm->metric_name, metric);
383 }
384
385 /** struct mep - RB-tree node for building printing information. */
386 struct mep {
387 /** nd - RB-tree element. */
388 struct rb_node nd;
389 /** @metric_group: Owned metric group name, separated others with ';'. */
390 char *metric_group;
391 const char *metric_name;
392 const char *metric_desc;
393 const char *metric_long_desc;
394 const char *metric_expr;
395 const char *metric_threshold;
396 const char *metric_unit;
397 };
398
mep_cmp(struct rb_node * rb_node,const void * entry)399 static int mep_cmp(struct rb_node *rb_node, const void *entry)
400 {
401 struct mep *a = container_of(rb_node, struct mep, nd);
402 struct mep *b = (struct mep *)entry;
403 int ret;
404
405 ret = strcmp(a->metric_group, b->metric_group);
406 if (ret)
407 return ret;
408
409 return strcmp(a->metric_name, b->metric_name);
410 }
411
mep_new(struct rblist * rl __maybe_unused,const void * entry)412 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
413 {
414 struct mep *me = malloc(sizeof(struct mep));
415
416 if (!me)
417 return NULL;
418
419 memcpy(me, entry, sizeof(struct mep));
420 return &me->nd;
421 }
422
mep_delete(struct rblist * rl __maybe_unused,struct rb_node * nd)423 static void mep_delete(struct rblist *rl __maybe_unused,
424 struct rb_node *nd)
425 {
426 struct mep *me = container_of(nd, struct mep, nd);
427
428 zfree(&me->metric_group);
429 free(me);
430 }
431
mep_lookup(struct rblist * groups,const char * metric_group,const char * metric_name)432 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
433 const char *metric_name)
434 {
435 struct rb_node *nd;
436 struct mep me = {
437 .metric_group = strdup(metric_group),
438 .metric_name = metric_name,
439 };
440 nd = rblist__find(groups, &me);
441 if (nd) {
442 free(me.metric_group);
443 return container_of(nd, struct mep, nd);
444 }
445 rblist__add_node(groups, &me);
446 nd = rblist__find(groups, &me);
447 if (nd)
448 return container_of(nd, struct mep, nd);
449 return NULL;
450 }
451
metricgroup__add_to_mep_groups(const struct pmu_metric * pm,struct rblist * groups)452 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
453 struct rblist *groups)
454 {
455 const char *g;
456 char *omg, *mg;
457
458 mg = strdup(pm->metric_group ?: "No_group");
459 if (!mg)
460 return -ENOMEM;
461 omg = mg;
462 while ((g = strsep(&mg, ";")) != NULL) {
463 struct mep *me;
464
465 g = skip_spaces(g);
466 if (strlen(g))
467 me = mep_lookup(groups, g, pm->metric_name);
468 else
469 me = mep_lookup(groups, "No_group", pm->metric_name);
470
471 if (me) {
472 me->metric_desc = pm->desc;
473 me->metric_long_desc = pm->long_desc;
474 me->metric_expr = pm->metric_expr;
475 me->metric_threshold = pm->metric_threshold;
476 me->metric_unit = pm->unit;
477 }
478 }
479 free(omg);
480
481 return 0;
482 }
483
484 struct metricgroup_iter_data {
485 pmu_metric_iter_fn fn;
486 void *data;
487 };
488
metricgroup__sys_event_iter(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * data)489 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
490 const struct pmu_metrics_table *table,
491 void *data)
492 {
493 struct metricgroup_iter_data *d = data;
494 struct perf_pmu *pmu = NULL;
495
496 if (!pm->metric_expr || !pm->compat)
497 return 0;
498
499 while ((pmu = perf_pmus__scan(pmu))) {
500
501 if (!pmu->id || strcmp(pmu->id, pm->compat))
502 continue;
503
504 return d->fn(pm, table, d->data);
505 }
506 return 0;
507 }
508
metricgroup__add_to_mep_groups_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)509 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
510 const struct pmu_metrics_table *table __maybe_unused,
511 void *vdata)
512 {
513 struct rblist *groups = vdata;
514
515 return metricgroup__add_to_mep_groups(pm, groups);
516 }
517
metricgroup__print(const struct print_callbacks * print_cb,void * print_state)518 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
519 {
520 struct rblist groups;
521 const struct pmu_metrics_table *table;
522 struct rb_node *node, *next;
523
524 rblist__init(&groups);
525 groups.node_new = mep_new;
526 groups.node_cmp = mep_cmp;
527 groups.node_delete = mep_delete;
528 table = pmu_metrics_table__find();
529 if (table) {
530 pmu_metrics_table__for_each_metric(table,
531 metricgroup__add_to_mep_groups_callback,
532 &groups);
533 }
534 {
535 struct metricgroup_iter_data data = {
536 .fn = metricgroup__add_to_mep_groups_callback,
537 .data = &groups,
538 };
539 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
540 }
541
542 for (node = rb_first_cached(&groups.entries); node; node = next) {
543 struct mep *me = container_of(node, struct mep, nd);
544
545 print_cb->print_metric(print_state,
546 me->metric_group,
547 me->metric_name,
548 me->metric_desc,
549 me->metric_long_desc,
550 me->metric_expr,
551 me->metric_threshold,
552 me->metric_unit);
553 next = rb_next(node);
554 rblist__remove_node(&groups, node);
555 }
556 }
557
558 static const char *code_characters = ",-=@";
559
encode_metric_id(struct strbuf * sb,const char * x)560 static int encode_metric_id(struct strbuf *sb, const char *x)
561 {
562 char *c;
563 int ret = 0;
564
565 for (; *x; x++) {
566 c = strchr(code_characters, *x);
567 if (c) {
568 ret = strbuf_addch(sb, '!');
569 if (ret)
570 break;
571
572 ret = strbuf_addch(sb, '0' + (c - code_characters));
573 if (ret)
574 break;
575 } else {
576 ret = strbuf_addch(sb, *x);
577 if (ret)
578 break;
579 }
580 }
581 return ret;
582 }
583
decode_metric_id(struct strbuf * sb,const char * x)584 static int decode_metric_id(struct strbuf *sb, const char *x)
585 {
586 const char *orig = x;
587 size_t i;
588 char c;
589 int ret;
590
591 for (; *x; x++) {
592 c = *x;
593 if (*x == '!') {
594 x++;
595 i = *x - '0';
596 if (i > strlen(code_characters)) {
597 pr_err("Bad metric-id encoding in: '%s'", orig);
598 return -1;
599 }
600 c = code_characters[i];
601 }
602 ret = strbuf_addch(sb, c);
603 if (ret)
604 return ret;
605 }
606 return 0;
607 }
608
decode_all_metric_ids(struct evlist * perf_evlist,const char * modifier)609 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
610 {
611 struct evsel *ev;
612 struct strbuf sb = STRBUF_INIT;
613 char *cur;
614 int ret = 0;
615
616 evlist__for_each_entry(perf_evlist, ev) {
617 if (!ev->metric_id)
618 continue;
619
620 ret = strbuf_setlen(&sb, 0);
621 if (ret)
622 break;
623
624 ret = decode_metric_id(&sb, ev->metric_id);
625 if (ret)
626 break;
627
628 free((char *)ev->metric_id);
629 ev->metric_id = strdup(sb.buf);
630 if (!ev->metric_id) {
631 ret = -ENOMEM;
632 break;
633 }
634 /*
635 * If the name is just the parsed event, use the metric-id to
636 * give a more friendly display version.
637 */
638 if (strstr(ev->name, "metric-id=")) {
639 bool has_slash = false;
640
641 zfree(&ev->name);
642 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
643 *cur = '/';
644 has_slash = true;
645 }
646
647 if (modifier) {
648 if (!has_slash && !strchr(sb.buf, ':')) {
649 ret = strbuf_addch(&sb, ':');
650 if (ret)
651 break;
652 }
653 ret = strbuf_addstr(&sb, modifier);
654 if (ret)
655 break;
656 }
657 ev->name = strdup(sb.buf);
658 if (!ev->name) {
659 ret = -ENOMEM;
660 break;
661 }
662 }
663 }
664 strbuf_release(&sb);
665 return ret;
666 }
667
metricgroup__build_event_string(struct strbuf * events,const struct expr_parse_ctx * ctx,const char * modifier,bool group_events)668 static int metricgroup__build_event_string(struct strbuf *events,
669 const struct expr_parse_ctx *ctx,
670 const char *modifier,
671 bool group_events)
672 {
673 struct hashmap_entry *cur;
674 size_t bkt;
675 bool no_group = true, has_tool_events = false;
676 bool tool_events[PERF_TOOL_MAX] = {false};
677 int ret = 0;
678
679 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
680
681 hashmap__for_each_entry(ctx->ids, cur, bkt) {
682 const char *sep, *rsep, *id = cur->pkey;
683 enum perf_tool_event ev;
684
685 pr_debug("found event %s\n", id);
686
687 /* Always move tool events outside of the group. */
688 ev = perf_tool_event__from_str(id);
689 if (ev != PERF_TOOL_NONE) {
690 has_tool_events = true;
691 tool_events[ev] = true;
692 continue;
693 }
694 /* Separate events with commas and open the group if necessary. */
695 if (no_group) {
696 if (group_events) {
697 ret = strbuf_addch(events, '{');
698 RETURN_IF_NON_ZERO(ret);
699 }
700
701 no_group = false;
702 } else {
703 ret = strbuf_addch(events, ',');
704 RETURN_IF_NON_ZERO(ret);
705 }
706 /*
707 * Encode the ID as an event string. Add a qualifier for
708 * metric_id that is the original name except with characters
709 * that parse-events can't parse replaced. For example,
710 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
711 */
712 sep = strchr(id, '@');
713 if (sep != NULL) {
714 ret = strbuf_add(events, id, sep - id);
715 RETURN_IF_NON_ZERO(ret);
716 ret = strbuf_addch(events, '/');
717 RETURN_IF_NON_ZERO(ret);
718 rsep = strrchr(sep, '@');
719 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
720 RETURN_IF_NON_ZERO(ret);
721 ret = strbuf_addstr(events, ",metric-id=");
722 RETURN_IF_NON_ZERO(ret);
723 sep = rsep;
724 } else {
725 sep = strchr(id, ':');
726 if (sep != NULL) {
727 ret = strbuf_add(events, id, sep - id);
728 RETURN_IF_NON_ZERO(ret);
729 } else {
730 ret = strbuf_addstr(events, id);
731 RETURN_IF_NON_ZERO(ret);
732 }
733 ret = strbuf_addstr(events, "/metric-id=");
734 RETURN_IF_NON_ZERO(ret);
735 }
736 ret = encode_metric_id(events, id);
737 RETURN_IF_NON_ZERO(ret);
738 ret = strbuf_addstr(events, "/");
739 RETURN_IF_NON_ZERO(ret);
740
741 if (sep != NULL) {
742 ret = strbuf_addstr(events, sep + 1);
743 RETURN_IF_NON_ZERO(ret);
744 }
745 if (modifier) {
746 ret = strbuf_addstr(events, modifier);
747 RETURN_IF_NON_ZERO(ret);
748 }
749 }
750 if (!no_group && group_events) {
751 ret = strbuf_addf(events, "}:W");
752 RETURN_IF_NON_ZERO(ret);
753 }
754 if (has_tool_events) {
755 int i;
756
757 perf_tool_event__for_each_event(i) {
758 if (tool_events[i]) {
759 if (!no_group) {
760 ret = strbuf_addch(events, ',');
761 RETURN_IF_NON_ZERO(ret);
762 }
763 no_group = false;
764 ret = strbuf_addstr(events, perf_tool_event__to_str(i));
765 RETURN_IF_NON_ZERO(ret);
766 }
767 }
768 }
769
770 return ret;
771 #undef RETURN_IF_NON_ZERO
772 }
773
arch_get_runtimeparam(const struct pmu_metric * pm __maybe_unused)774 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
775 {
776 return 1;
777 }
778
779 /*
780 * A singly linked list on the stack of the names of metrics being
781 * processed. Used to identify recursion.
782 */
783 struct visited_metric {
784 const char *name;
785 const struct visited_metric *parent;
786 };
787
788 struct metricgroup_add_iter_data {
789 struct list_head *metric_list;
790 const char *pmu;
791 const char *metric_name;
792 const char *modifier;
793 int *ret;
794 bool *has_match;
795 bool metric_no_group;
796 bool metric_no_threshold;
797 const char *user_requested_cpu_list;
798 bool system_wide;
799 struct metric *root_metric;
800 const struct visited_metric *visited;
801 const struct pmu_metrics_table *table;
802 };
803
804 static bool metricgroup__find_metric(const char *pmu,
805 const char *metric,
806 const struct pmu_metrics_table *table,
807 struct pmu_metric *pm);
808
809 static int add_metric(struct list_head *metric_list,
810 const struct pmu_metric *pm,
811 const char *modifier,
812 bool metric_no_group,
813 bool metric_no_threshold,
814 const char *user_requested_cpu_list,
815 bool system_wide,
816 struct metric *root_metric,
817 const struct visited_metric *visited,
818 const struct pmu_metrics_table *table);
819
820 /**
821 * resolve_metric - Locate metrics within the root metric and recursively add
822 * references to them.
823 * @metric_list: The list the metric is added to.
824 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
825 * @modifier: if non-null event modifiers like "u".
826 * @metric_no_group: Should events written to events be grouped "{}" or
827 * global. Grouping is the default but due to multiplexing the
828 * user may override.
829 * @user_requested_cpu_list: Command line specified CPUs to record on.
830 * @system_wide: Are events for all processes recorded.
831 * @root_metric: Metrics may reference other metrics to form a tree. In this
832 * case the root_metric holds all the IDs and a list of referenced
833 * metrics. When adding a root this argument is NULL.
834 * @visited: A singly linked list of metric names being added that is used to
835 * detect recursion.
836 * @table: The table that is searched for metrics, most commonly the table for the
837 * architecture perf is running upon.
838 */
resolve_metric(struct list_head * metric_list,const char * pmu,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)839 static int resolve_metric(struct list_head *metric_list,
840 const char *pmu,
841 const char *modifier,
842 bool metric_no_group,
843 bool metric_no_threshold,
844 const char *user_requested_cpu_list,
845 bool system_wide,
846 struct metric *root_metric,
847 const struct visited_metric *visited,
848 const struct pmu_metrics_table *table)
849 {
850 struct hashmap_entry *cur;
851 size_t bkt;
852 struct to_resolve {
853 /* The metric to resolve. */
854 struct pmu_metric pm;
855 /*
856 * The key in the IDs map, this may differ from in case,
857 * etc. from pm->metric_name.
858 */
859 const char *key;
860 } *pending = NULL;
861 int i, ret = 0, pending_cnt = 0;
862
863 /*
864 * Iterate all the parsed IDs and if there's a matching metric and it to
865 * the pending array.
866 */
867 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
868 struct pmu_metric pm;
869
870 if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
871 pending = realloc(pending,
872 (pending_cnt + 1) * sizeof(struct to_resolve));
873 if (!pending)
874 return -ENOMEM;
875
876 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
877 pending[pending_cnt].key = cur->pkey;
878 pending_cnt++;
879 }
880 }
881
882 /* Remove the metric IDs from the context. */
883 for (i = 0; i < pending_cnt; i++)
884 expr__del_id(root_metric->pctx, pending[i].key);
885
886 /*
887 * Recursively add all the metrics, IDs are added to the root metric's
888 * context.
889 */
890 for (i = 0; i < pending_cnt; i++) {
891 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
892 metric_no_threshold, user_requested_cpu_list, system_wide,
893 root_metric, visited, table);
894 if (ret)
895 break;
896 }
897
898 free(pending);
899 return ret;
900 }
901
902 /**
903 * __add_metric - Add a metric to metric_list.
904 * @metric_list: The list the metric is added to.
905 * @pm: The pmu_metric containing the metric to be added.
906 * @modifier: if non-null event modifiers like "u".
907 * @metric_no_group: Should events written to events be grouped "{}" or
908 * global. Grouping is the default but due to multiplexing the
909 * user may override.
910 * @metric_no_threshold: Should threshold expressions be ignored?
911 * @runtime: A special argument for the parser only known at runtime.
912 * @user_requested_cpu_list: Command line specified CPUs to record on.
913 * @system_wide: Are events for all processes recorded.
914 * @root_metric: Metrics may reference other metrics to form a tree. In this
915 * case the root_metric holds all the IDs and a list of referenced
916 * metrics. When adding a root this argument is NULL.
917 * @visited: A singly linked list of metric names being added that is used to
918 * detect recursion.
919 * @table: The table that is searched for metrics, most commonly the table for the
920 * architecture perf is running upon.
921 */
__add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)922 static int __add_metric(struct list_head *metric_list,
923 const struct pmu_metric *pm,
924 const char *modifier,
925 bool metric_no_group,
926 bool metric_no_threshold,
927 int runtime,
928 const char *user_requested_cpu_list,
929 bool system_wide,
930 struct metric *root_metric,
931 const struct visited_metric *visited,
932 const struct pmu_metrics_table *table)
933 {
934 const struct visited_metric *vm;
935 int ret;
936 bool is_root = !root_metric;
937 const char *expr;
938 struct visited_metric visited_node = {
939 .name = pm->metric_name,
940 .parent = visited,
941 };
942
943 for (vm = visited; vm; vm = vm->parent) {
944 if (!strcmp(pm->metric_name, vm->name)) {
945 pr_err("failed: recursion detected for %s\n", pm->metric_name);
946 return -1;
947 }
948 }
949
950 if (is_root) {
951 /*
952 * This metric is the root of a tree and may reference other
953 * metrics that are added recursively.
954 */
955 root_metric = metric__new(pm, modifier, metric_no_group, runtime,
956 user_requested_cpu_list, system_wide);
957 if (!root_metric)
958 return -ENOMEM;
959
960 } else {
961 int cnt = 0;
962
963 /*
964 * This metric was referenced in a metric higher in the
965 * tree. Check if the same metric is already resolved in the
966 * metric_refs list.
967 */
968 if (root_metric->metric_refs) {
969 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
970 if (!strcmp(pm->metric_name,
971 root_metric->metric_refs[cnt].metric_name))
972 return 0;
973 }
974 }
975
976 /* Create reference. Need space for the entry and the terminator. */
977 root_metric->metric_refs = realloc(root_metric->metric_refs,
978 (cnt + 2) * sizeof(struct metric_ref));
979 if (!root_metric->metric_refs)
980 return -ENOMEM;
981
982 /*
983 * Intentionally passing just const char pointers,
984 * from 'pe' object, so they never go away. We don't
985 * need to change them, so there's no need to create
986 * our own copy.
987 */
988 root_metric->metric_refs[cnt].metric_name = pm->metric_name;
989 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
990
991 /* Null terminate array. */
992 root_metric->metric_refs[cnt+1].metric_name = NULL;
993 root_metric->metric_refs[cnt+1].metric_expr = NULL;
994 }
995
996 /*
997 * For both the parent and referenced metrics, we parse
998 * all the metric's IDs and add it to the root context.
999 */
1000 ret = 0;
1001 expr = pm->metric_expr;
1002 if (is_root && pm->metric_threshold) {
1003 /*
1004 * Threshold expressions are built off the actual metric. Switch
1005 * to use that in case of additional necessary events. Change
1006 * the visited node name to avoid this being flagged as
1007 * recursion. If the threshold events are disabled, just use the
1008 * metric's name as a reference. This allows metric threshold
1009 * computation if there are sufficient events.
1010 */
1011 assert(strstr(pm->metric_threshold, pm->metric_name));
1012 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
1013 visited_node.name = "__threshold__";
1014 }
1015 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
1016 /* Broken metric. */
1017 ret = -EINVAL;
1018 }
1019 if (!ret) {
1020 /* Resolve referenced metrics. */
1021 const char *pmu = pm->pmu ?: "cpu";
1022
1023 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
1024 metric_no_threshold, user_requested_cpu_list,
1025 system_wide, root_metric, &visited_node,
1026 table);
1027 }
1028 if (ret) {
1029 if (is_root)
1030 metric__free(root_metric);
1031
1032 } else if (is_root)
1033 list_add(&root_metric->nd, metric_list);
1034
1035 return ret;
1036 }
1037
1038 struct metricgroup__find_metric_data {
1039 const char *pmu;
1040 const char *metric;
1041 struct pmu_metric *pm;
1042 };
1043
metricgroup__find_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)1044 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1045 const struct pmu_metrics_table *table __maybe_unused,
1046 void *vdata)
1047 {
1048 struct metricgroup__find_metric_data *data = vdata;
1049 const char *pm_pmu = pm->pmu ?: "cpu";
1050
1051 if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
1052 return 0;
1053
1054 if (!match_metric(pm->metric_name, data->metric))
1055 return 0;
1056
1057 memcpy(data->pm, pm, sizeof(*pm));
1058 return 1;
1059 }
1060
metricgroup__find_metric(const char * pmu,const char * metric,const struct pmu_metrics_table * table,struct pmu_metric * pm)1061 static bool metricgroup__find_metric(const char *pmu,
1062 const char *metric,
1063 const struct pmu_metrics_table *table,
1064 struct pmu_metric *pm)
1065 {
1066 struct metricgroup__find_metric_data data = {
1067 .pmu = pmu,
1068 .metric = metric,
1069 .pm = pm,
1070 };
1071
1072 return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
1073 ? true : false;
1074 }
1075
add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)1076 static int add_metric(struct list_head *metric_list,
1077 const struct pmu_metric *pm,
1078 const char *modifier,
1079 bool metric_no_group,
1080 bool metric_no_threshold,
1081 const char *user_requested_cpu_list,
1082 bool system_wide,
1083 struct metric *root_metric,
1084 const struct visited_metric *visited,
1085 const struct pmu_metrics_table *table)
1086 {
1087 int ret = 0;
1088
1089 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1090
1091 if (!strstr(pm->metric_expr, "?")) {
1092 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1093 metric_no_threshold, 0, user_requested_cpu_list,
1094 system_wide, root_metric, visited, table);
1095 } else {
1096 int j, count;
1097
1098 count = arch_get_runtimeparam(pm);
1099
1100 /* This loop is added to create multiple
1101 * events depend on count value and add
1102 * those events to metric_list.
1103 */
1104
1105 for (j = 0; j < count && !ret; j++)
1106 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1107 metric_no_threshold, j, user_requested_cpu_list,
1108 system_wide, root_metric, visited, table);
1109 }
1110
1111 return ret;
1112 }
1113
metricgroup__add_metric_sys_event_iter(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)1114 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1115 const struct pmu_metrics_table *table __maybe_unused,
1116 void *data)
1117 {
1118 struct metricgroup_add_iter_data *d = data;
1119 int ret;
1120
1121 if (!match_pm_metric(pm, d->pmu, d->metric_name))
1122 return 0;
1123
1124 ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1125 d->metric_no_threshold, d->user_requested_cpu_list,
1126 d->system_wide, d->root_metric, d->visited, d->table);
1127 if (ret)
1128 goto out;
1129
1130 *(d->has_match) = true;
1131
1132 out:
1133 *(d->ret) = ret;
1134 return ret;
1135 }
1136
1137 /**
1138 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1139 * the front. tool events are excluded from the count.
1140 */
metric_list_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1141 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1142 const struct list_head *r)
1143 {
1144 const struct metric *left = container_of(l, struct metric, nd);
1145 const struct metric *right = container_of(r, struct metric, nd);
1146 struct expr_id_data *data;
1147 int i, left_count, right_count;
1148
1149 left_count = hashmap__size(left->pctx->ids);
1150 perf_tool_event__for_each_event(i) {
1151 if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1152 left_count--;
1153 }
1154
1155 right_count = hashmap__size(right->pctx->ids);
1156 perf_tool_event__for_each_event(i) {
1157 if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1158 right_count--;
1159 }
1160
1161 return right_count - left_count;
1162 }
1163
1164 /**
1165 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1166 * that first sorts by default_metricgroup_name, then
1167 * metric_name.
1168 */
default_metricgroup_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1169 static int default_metricgroup_cmp(void *priv __maybe_unused,
1170 const struct list_head *l,
1171 const struct list_head *r)
1172 {
1173 const struct metric *left = container_of(l, struct metric, nd);
1174 const struct metric *right = container_of(r, struct metric, nd);
1175 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1176
1177 if (diff)
1178 return diff;
1179
1180 return strcmp(right->metric_name, left->metric_name);
1181 }
1182
1183 struct metricgroup__add_metric_data {
1184 struct list_head *list;
1185 const char *pmu;
1186 const char *metric_name;
1187 const char *modifier;
1188 const char *user_requested_cpu_list;
1189 bool metric_no_group;
1190 bool metric_no_threshold;
1191 bool system_wide;
1192 bool has_match;
1193 };
1194
metricgroup__add_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * vdata)1195 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1196 const struct pmu_metrics_table *table,
1197 void *vdata)
1198 {
1199 struct metricgroup__add_metric_data *data = vdata;
1200 int ret = 0;
1201
1202 if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
1203 bool metric_no_group = data->metric_no_group ||
1204 match_metric(pm->metricgroup_no_group, data->metric_name);
1205
1206 data->has_match = true;
1207 ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1208 data->metric_no_threshold, data->user_requested_cpu_list,
1209 data->system_wide, /*root_metric=*/NULL,
1210 /*visited_metrics=*/NULL, table);
1211 }
1212 return ret;
1213 }
1214
1215 /**
1216 * metricgroup__add_metric - Find and add a metric, or a metric group.
1217 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1218 * @metric_name: The name of the metric or metric group. For example, "IPC"
1219 * could be the name of a metric and "TopDownL1" the name of a
1220 * metric group.
1221 * @modifier: if non-null event modifiers like "u".
1222 * @metric_no_group: Should events written to events be grouped "{}" or
1223 * global. Grouping is the default but due to multiplexing the
1224 * user may override.
1225 * @user_requested_cpu_list: Command line specified CPUs to record on.
1226 * @system_wide: Are events for all processes recorded.
1227 * @metric_list: The list that the metric or metric group are added to.
1228 * @table: The table that is searched for metrics, most commonly the table for the
1229 * architecture perf is running upon.
1230 */
metricgroup__add_metric(const char * pmu,const char * metric_name,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1231 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1232 bool metric_no_group, bool metric_no_threshold,
1233 const char *user_requested_cpu_list,
1234 bool system_wide,
1235 struct list_head *metric_list,
1236 const struct pmu_metrics_table *table)
1237 {
1238 LIST_HEAD(list);
1239 int ret;
1240 bool has_match = false;
1241
1242 {
1243 struct metricgroup__add_metric_data data = {
1244 .list = &list,
1245 .pmu = pmu,
1246 .metric_name = metric_name,
1247 .modifier = modifier,
1248 .metric_no_group = metric_no_group,
1249 .metric_no_threshold = metric_no_threshold,
1250 .user_requested_cpu_list = user_requested_cpu_list,
1251 .system_wide = system_wide,
1252 .has_match = false,
1253 };
1254 /*
1255 * Iterate over all metrics seeing if metric matches either the
1256 * name or group. When it does add the metric to the list.
1257 */
1258 ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
1259 &data);
1260 if (ret)
1261 goto out;
1262
1263 has_match = data.has_match;
1264 }
1265 {
1266 struct metricgroup_iter_data data = {
1267 .fn = metricgroup__add_metric_sys_event_iter,
1268 .data = (void *) &(struct metricgroup_add_iter_data) {
1269 .metric_list = &list,
1270 .pmu = pmu,
1271 .metric_name = metric_name,
1272 .modifier = modifier,
1273 .metric_no_group = metric_no_group,
1274 .user_requested_cpu_list = user_requested_cpu_list,
1275 .system_wide = system_wide,
1276 .has_match = &has_match,
1277 .ret = &ret,
1278 .table = table,
1279 },
1280 };
1281
1282 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1283 }
1284 /* End of pmu events. */
1285 if (!has_match)
1286 ret = -EINVAL;
1287
1288 out:
1289 /*
1290 * add to metric_list so that they can be released
1291 * even if it's failed
1292 */
1293 list_splice(&list, metric_list);
1294 return ret;
1295 }
1296
1297 /**
1298 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1299 * specified in a list.
1300 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1301 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1302 * would match the IPC and CPI metrics, and TopDownL1 would match all
1303 * the metrics in the TopDownL1 group.
1304 * @metric_no_group: Should events written to events be grouped "{}" or
1305 * global. Grouping is the default but due to multiplexing the
1306 * user may override.
1307 * @user_requested_cpu_list: Command line specified CPUs to record on.
1308 * @system_wide: Are events for all processes recorded.
1309 * @metric_list: The list that metrics are added to.
1310 * @table: The table that is searched for metrics, most commonly the table for the
1311 * architecture perf is running upon.
1312 */
metricgroup__add_metric_list(const char * pmu,const char * list,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1313 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1314 bool metric_no_group,
1315 bool metric_no_threshold,
1316 const char *user_requested_cpu_list,
1317 bool system_wide, struct list_head *metric_list,
1318 const struct pmu_metrics_table *table)
1319 {
1320 char *list_itr, *list_copy, *metric_name, *modifier;
1321 int ret, count = 0;
1322
1323 list_copy = strdup(list);
1324 if (!list_copy)
1325 return -ENOMEM;
1326 list_itr = list_copy;
1327
1328 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1329 modifier = strchr(metric_name, ':');
1330 if (modifier)
1331 *modifier++ = '\0';
1332
1333 ret = metricgroup__add_metric(pmu, metric_name, modifier,
1334 metric_no_group, metric_no_threshold,
1335 user_requested_cpu_list,
1336 system_wide, metric_list, table);
1337 if (ret == -EINVAL)
1338 pr_err("Cannot find metric or group `%s'\n", metric_name);
1339
1340 if (ret)
1341 break;
1342
1343 count++;
1344 }
1345 free(list_copy);
1346
1347 if (!ret) {
1348 /*
1349 * Warn about nmi_watchdog if any parsed metrics had the
1350 * NO_NMI_WATCHDOG constraint.
1351 */
1352 metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1353 /* No metrics. */
1354 if (count == 0)
1355 return -EINVAL;
1356 }
1357 return ret;
1358 }
1359
metricgroup__free_metrics(struct list_head * metric_list)1360 static void metricgroup__free_metrics(struct list_head *metric_list)
1361 {
1362 struct metric *m, *tmp;
1363
1364 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1365 list_del_init(&m->nd);
1366 metric__free(m);
1367 }
1368 }
1369
1370 /**
1371 * find_tool_events - Search for the pressence of tool events in metric_list.
1372 * @metric_list: List to take metrics from.
1373 * @tool_events: Array of false values, indices corresponding to tool events set
1374 * to true if tool event is found.
1375 */
find_tool_events(const struct list_head * metric_list,bool tool_events[PERF_TOOL_MAX])1376 static void find_tool_events(const struct list_head *metric_list,
1377 bool tool_events[PERF_TOOL_MAX])
1378 {
1379 struct metric *m;
1380
1381 list_for_each_entry(m, metric_list, nd) {
1382 int i;
1383
1384 perf_tool_event__for_each_event(i) {
1385 struct expr_id_data *data;
1386
1387 if (!tool_events[i] &&
1388 !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1389 tool_events[i] = true;
1390 }
1391 }
1392 }
1393
1394 /**
1395 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1396 * metric IDs, as the IDs are held in a set,
1397 * duplicates will be removed.
1398 * @metric_list: List to take metrics from.
1399 * @combined: Out argument for result.
1400 */
build_combined_expr_ctx(const struct list_head * metric_list,struct expr_parse_ctx ** combined)1401 static int build_combined_expr_ctx(const struct list_head *metric_list,
1402 struct expr_parse_ctx **combined)
1403 {
1404 struct hashmap_entry *cur;
1405 size_t bkt;
1406 struct metric *m;
1407 char *dup;
1408 int ret;
1409
1410 *combined = expr__ctx_new();
1411 if (!*combined)
1412 return -ENOMEM;
1413
1414 list_for_each_entry(m, metric_list, nd) {
1415 if (!m->group_events && !m->modifier) {
1416 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1417 dup = strdup(cur->pkey);
1418 if (!dup) {
1419 ret = -ENOMEM;
1420 goto err_out;
1421 }
1422 ret = expr__add_id(*combined, dup);
1423 if (ret)
1424 goto err_out;
1425 }
1426 }
1427 }
1428 return 0;
1429 err_out:
1430 expr__ctx_free(*combined);
1431 *combined = NULL;
1432 return ret;
1433 }
1434
1435 /**
1436 * parse_ids - Build the event string for the ids and parse them creating an
1437 * evlist. The encoded metric_ids are decoded.
1438 * @metric_no_merge: is metric sharing explicitly disabled.
1439 * @fake_pmu: used when testing metrics not supported by the current CPU.
1440 * @ids: the event identifiers parsed from a metric.
1441 * @modifier: any modifiers added to the events.
1442 * @group_events: should events be placed in a weak group.
1443 * @tool_events: entries set true if the tool event of index could be present in
1444 * the overall list of metrics.
1445 * @out_evlist: the created list of events.
1446 */
parse_ids(bool metric_no_merge,struct perf_pmu * fake_pmu,struct expr_parse_ctx * ids,const char * modifier,bool group_events,const bool tool_events[PERF_TOOL_MAX],struct evlist ** out_evlist)1447 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1448 struct expr_parse_ctx *ids, const char *modifier,
1449 bool group_events, const bool tool_events[PERF_TOOL_MAX],
1450 struct evlist **out_evlist)
1451 {
1452 struct parse_events_error parse_error;
1453 struct evlist *parsed_evlist;
1454 struct strbuf events = STRBUF_INIT;
1455 int ret;
1456
1457 *out_evlist = NULL;
1458 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1459 bool added_event = false;
1460 int i;
1461 /*
1462 * We may fail to share events between metrics because a tool
1463 * event isn't present in one metric. For example, a ratio of
1464 * cache misses doesn't need duration_time but the same events
1465 * may be used for a misses per second. Events without sharing
1466 * implies multiplexing, that is best avoided, so place
1467 * all tool events in every group.
1468 *
1469 * Also, there may be no ids/events in the expression parsing
1470 * context because of constant evaluation, e.g.:
1471 * event1 if #smt_on else 0
1472 * Add a tool event to avoid a parse error on an empty string.
1473 */
1474 perf_tool_event__for_each_event(i) {
1475 if (tool_events[i]) {
1476 char *tmp = strdup(perf_tool_event__to_str(i));
1477
1478 if (!tmp)
1479 return -ENOMEM;
1480 ids__insert(ids->ids, tmp);
1481 added_event = true;
1482 }
1483 }
1484 if (!added_event && hashmap__size(ids->ids) == 0) {
1485 char *tmp = strdup("duration_time");
1486
1487 if (!tmp)
1488 return -ENOMEM;
1489 ids__insert(ids->ids, tmp);
1490 }
1491 }
1492 ret = metricgroup__build_event_string(&events, ids, modifier,
1493 group_events);
1494 if (ret)
1495 return ret;
1496
1497 parsed_evlist = evlist__new();
1498 if (!parsed_evlist) {
1499 ret = -ENOMEM;
1500 goto err_out;
1501 }
1502 pr_debug("Parsing metric events '%s'\n", events.buf);
1503 parse_events_error__init(&parse_error);
1504 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1505 &parse_error, fake_pmu, /*warn_if_reordered=*/false);
1506 if (ret) {
1507 parse_events_error__print(&parse_error, events.buf);
1508 goto err_out;
1509 }
1510 ret = decode_all_metric_ids(parsed_evlist, modifier);
1511 if (ret)
1512 goto err_out;
1513
1514 *out_evlist = parsed_evlist;
1515 parsed_evlist = NULL;
1516 err_out:
1517 parse_events_error__exit(&parse_error);
1518 evlist__delete(parsed_evlist);
1519 strbuf_release(&events);
1520 return ret;
1521 }
1522
parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct perf_pmu * fake_pmu,struct rblist * metric_events_list,const struct pmu_metrics_table * table)1523 static int parse_groups(struct evlist *perf_evlist,
1524 const char *pmu, const char *str,
1525 bool metric_no_group,
1526 bool metric_no_merge,
1527 bool metric_no_threshold,
1528 const char *user_requested_cpu_list,
1529 bool system_wide,
1530 struct perf_pmu *fake_pmu,
1531 struct rblist *metric_events_list,
1532 const struct pmu_metrics_table *table)
1533 {
1534 struct evlist *combined_evlist = NULL;
1535 LIST_HEAD(metric_list);
1536 struct metric *m;
1537 bool tool_events[PERF_TOOL_MAX] = {false};
1538 bool is_default = !strcmp(str, "Default");
1539 int ret;
1540
1541 if (metric_events_list->nr_entries == 0)
1542 metricgroup__rblist_init(metric_events_list);
1543 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1544 user_requested_cpu_list,
1545 system_wide, &metric_list, table);
1546 if (ret)
1547 goto out;
1548
1549 /* Sort metrics from largest to smallest. */
1550 list_sort(NULL, &metric_list, metric_list_cmp);
1551
1552 if (!metric_no_merge) {
1553 struct expr_parse_ctx *combined = NULL;
1554
1555 find_tool_events(&metric_list, tool_events);
1556
1557 ret = build_combined_expr_ctx(&metric_list, &combined);
1558
1559 if (!ret && combined && hashmap__size(combined->ids)) {
1560 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1561 /*modifier=*/NULL,
1562 /*group_events=*/false,
1563 tool_events,
1564 &combined_evlist);
1565 }
1566 if (combined)
1567 expr__ctx_free(combined);
1568
1569 if (ret)
1570 goto out;
1571 }
1572
1573 if (is_default)
1574 list_sort(NULL, &metric_list, default_metricgroup_cmp);
1575
1576 list_for_each_entry(m, &metric_list, nd) {
1577 struct metric_event *me;
1578 struct evsel **metric_events;
1579 struct evlist *metric_evlist = NULL;
1580 struct metric *n;
1581 struct metric_expr *expr;
1582
1583 if (combined_evlist && !m->group_events) {
1584 metric_evlist = combined_evlist;
1585 } else if (!metric_no_merge) {
1586 /*
1587 * See if the IDs for this metric are a subset of an
1588 * earlier metric.
1589 */
1590 list_for_each_entry(n, &metric_list, nd) {
1591 if (m == n)
1592 break;
1593
1594 if (n->evlist == NULL)
1595 continue;
1596
1597 if ((!m->modifier && n->modifier) ||
1598 (m->modifier && !n->modifier) ||
1599 (m->modifier && n->modifier &&
1600 strcmp(m->modifier, n->modifier)))
1601 continue;
1602
1603 if ((!m->pmu && n->pmu) ||
1604 (m->pmu && !n->pmu) ||
1605 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1606 continue;
1607
1608 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1609 pr_debug("Events in '%s' fully contained within '%s'\n",
1610 m->metric_name, n->metric_name);
1611 metric_evlist = n->evlist;
1612 break;
1613 }
1614
1615 }
1616 }
1617 if (!metric_evlist) {
1618 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1619 m->group_events, tool_events, &m->evlist);
1620 if (ret)
1621 goto out;
1622
1623 metric_evlist = m->evlist;
1624 }
1625 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1626 metric_evlist, &metric_events);
1627 if (ret) {
1628 pr_err("Cannot resolve IDs for %s: %s\n",
1629 m->metric_name, m->metric_expr);
1630 goto out;
1631 }
1632
1633 me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1634
1635 expr = malloc(sizeof(struct metric_expr));
1636 if (!expr) {
1637 ret = -ENOMEM;
1638 free(metric_events);
1639 goto out;
1640 }
1641
1642 expr->metric_refs = m->metric_refs;
1643 m->metric_refs = NULL;
1644 expr->metric_expr = m->metric_expr;
1645 if (m->modifier) {
1646 char *tmp;
1647
1648 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1649 expr->metric_name = NULL;
1650 else
1651 expr->metric_name = tmp;
1652 } else
1653 expr->metric_name = strdup(m->metric_name);
1654
1655 if (!expr->metric_name) {
1656 ret = -ENOMEM;
1657 free(metric_events);
1658 goto out;
1659 }
1660 expr->metric_threshold = m->metric_threshold;
1661 expr->metric_unit = m->metric_unit;
1662 expr->metric_events = metric_events;
1663 expr->runtime = m->pctx->sctx.runtime;
1664 expr->default_metricgroup_name = m->default_metricgroup_name;
1665 me->is_default = is_default;
1666 list_add(&expr->nd, &me->head);
1667 }
1668
1669
1670 if (combined_evlist) {
1671 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1672 evlist__delete(combined_evlist);
1673 }
1674
1675 list_for_each_entry(m, &metric_list, nd) {
1676 if (m->evlist)
1677 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1678 }
1679
1680 out:
1681 metricgroup__free_metrics(&metric_list);
1682 return ret;
1683 }
1684
metricgroup__parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct rblist * metric_events)1685 int metricgroup__parse_groups(struct evlist *perf_evlist,
1686 const char *pmu,
1687 const char *str,
1688 bool metric_no_group,
1689 bool metric_no_merge,
1690 bool metric_no_threshold,
1691 const char *user_requested_cpu_list,
1692 bool system_wide,
1693 struct rblist *metric_events)
1694 {
1695 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1696
1697 if (!table)
1698 return -EINVAL;
1699
1700 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1701 metric_no_threshold, user_requested_cpu_list, system_wide,
1702 /*fake_pmu=*/NULL, metric_events, table);
1703 }
1704
metricgroup__parse_groups_test(struct evlist * evlist,const struct pmu_metrics_table * table,const char * str,struct rblist * metric_events)1705 int metricgroup__parse_groups_test(struct evlist *evlist,
1706 const struct pmu_metrics_table *table,
1707 const char *str,
1708 struct rblist *metric_events)
1709 {
1710 return parse_groups(evlist, "all", str,
1711 /*metric_no_group=*/false,
1712 /*metric_no_merge=*/false,
1713 /*metric_no_threshold=*/false,
1714 /*user_requested_cpu_list=*/NULL,
1715 /*system_wide=*/false,
1716 &perf_pmu__fake, metric_events, table);
1717 }
1718
1719 struct metricgroup__has_metric_data {
1720 const char *pmu;
1721 const char *metric;
1722 };
metricgroup__has_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)1723 static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1724 const struct pmu_metrics_table *table __maybe_unused,
1725 void *vdata)
1726 {
1727 struct metricgroup__has_metric_data *data = vdata;
1728
1729 return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
1730 }
1731
metricgroup__has_metric(const char * pmu,const char * metric)1732 bool metricgroup__has_metric(const char *pmu, const char *metric)
1733 {
1734 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1735 struct metricgroup__has_metric_data data = {
1736 .pmu = pmu,
1737 .metric = metric,
1738 };
1739
1740 if (!table)
1741 return false;
1742
1743 return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
1744 ? true : false;
1745 }
1746
metricgroup__topdown_max_level_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)1747 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1748 const struct pmu_metrics_table *table __maybe_unused,
1749 void *data)
1750 {
1751 unsigned int *max_level = data;
1752 unsigned int level;
1753 const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1754
1755 if (!p || p[8] == '\0')
1756 return 0;
1757
1758 level = p[8] - '0';
1759 if (level > *max_level)
1760 *max_level = level;
1761
1762 return 0;
1763 }
1764
metricgroups__topdown_max_level(void)1765 unsigned int metricgroups__topdown_max_level(void)
1766 {
1767 unsigned int max_level = 0;
1768 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1769
1770 if (!table)
1771 return false;
1772
1773 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1774 &max_level);
1775 return max_level;
1776 }
1777
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)1778 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1779 struct rblist *new_metric_events,
1780 struct rblist *old_metric_events)
1781 {
1782 unsigned int i;
1783
1784 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1785 struct rb_node *nd;
1786 struct metric_event *old_me, *new_me;
1787 struct metric_expr *old_expr, *new_expr;
1788 struct evsel *evsel;
1789 size_t alloc_size;
1790 int idx, nr;
1791
1792 nd = rblist__entry(old_metric_events, i);
1793 old_me = container_of(nd, struct metric_event, nd);
1794
1795 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1796 if (!evsel)
1797 return -EINVAL;
1798 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1799 if (!new_me)
1800 return -ENOMEM;
1801
1802 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1803 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1804
1805 list_for_each_entry(old_expr, &old_me->head, nd) {
1806 new_expr = malloc(sizeof(*new_expr));
1807 if (!new_expr)
1808 return -ENOMEM;
1809
1810 new_expr->metric_expr = old_expr->metric_expr;
1811 new_expr->metric_threshold = old_expr->metric_threshold;
1812 new_expr->metric_name = strdup(old_expr->metric_name);
1813 if (!new_expr->metric_name)
1814 return -ENOMEM;
1815
1816 new_expr->metric_unit = old_expr->metric_unit;
1817 new_expr->runtime = old_expr->runtime;
1818
1819 if (old_expr->metric_refs) {
1820 /* calculate number of metric_events */
1821 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1822 continue;
1823 alloc_size = sizeof(*new_expr->metric_refs);
1824 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1825 if (!new_expr->metric_refs) {
1826 free(new_expr);
1827 return -ENOMEM;
1828 }
1829
1830 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1831 nr * alloc_size);
1832 } else {
1833 new_expr->metric_refs = NULL;
1834 }
1835
1836 /* calculate number of metric_events */
1837 for (nr = 0; old_expr->metric_events[nr]; nr++)
1838 continue;
1839 alloc_size = sizeof(*new_expr->metric_events);
1840 new_expr->metric_events = calloc(nr + 1, alloc_size);
1841 if (!new_expr->metric_events) {
1842 zfree(&new_expr->metric_refs);
1843 free(new_expr);
1844 return -ENOMEM;
1845 }
1846
1847 /* copy evsel in the same position */
1848 for (idx = 0; idx < nr; idx++) {
1849 evsel = old_expr->metric_events[idx];
1850 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1851 if (evsel == NULL) {
1852 zfree(&new_expr->metric_events);
1853 zfree(&new_expr->metric_refs);
1854 free(new_expr);
1855 return -EINVAL;
1856 }
1857 new_expr->metric_events[idx] = evsel;
1858 }
1859
1860 list_add(&new_expr->nd, &new_me->head);
1861 }
1862 }
1863 return 0;
1864 }
1865