1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <subcmd/pager.h>
7 #include <sys/types.h>
8 #include <ctype.h>
9 #include <dirent.h>
10 #include <pthread.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include "debug.h"
14 #include "evsel.h"
15 #include "pmus.h"
16 #include "pmu.h"
17 #include "print-events.h"
18
19 /*
20 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
21 * directory contains "cpus" file. All PMUs belonging to core_pmus
22 * must have pmu->is_core=1. If there are more than one PMU in
23 * this list, perf interprets it as a heterogeneous platform.
24 * (FWIW, certain ARM platforms having heterogeneous cores uses
25 * homogeneous PMU, and thus they are treated as homogeneous
26 * platform by perf because core_pmus will have only one entry)
27 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
28 * matter whether PMU is present per SMT-thread or outside of the
29 * core in the hw. For e.g., an instance of AMD ibs_fetch// and
30 * ibs_op// PMUs is present in each hw SMT thread, however they
31 * are captured under other_pmus. PMUs belonging to other_pmus
32 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
33 */
34 static LIST_HEAD(core_pmus);
35 static LIST_HEAD(other_pmus);
36 static bool read_sysfs_core_pmus;
37 static bool read_sysfs_all_pmus;
38
pmu_name_len_no_suffix(const char * str,unsigned long * num)39 int pmu_name_len_no_suffix(const char *str, unsigned long *num)
40 {
41 int orig_len, len;
42
43 orig_len = len = strlen(str);
44
45 /* Non-uncore PMUs have their full length, for example, i915. */
46 if (!strstarts(str, "uncore_"))
47 return len;
48
49 /*
50 * Count trailing digits and '_', if '_{num}' suffix isn't present use
51 * the full length.
52 */
53 while (len > 0 && isdigit(str[len - 1]))
54 len--;
55
56 if (len > 0 && len != orig_len && str[len - 1] == '_') {
57 if (num)
58 *num = strtoul(&str[len], NULL, 10);
59 return len - 1;
60 }
61 return orig_len;
62 }
63
perf_pmus__destroy(void)64 void perf_pmus__destroy(void)
65 {
66 struct perf_pmu *pmu, *tmp;
67
68 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
69 list_del(&pmu->list);
70
71 perf_pmu__delete(pmu);
72 }
73 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
74 list_del(&pmu->list);
75
76 perf_pmu__delete(pmu);
77 }
78 read_sysfs_core_pmus = false;
79 read_sysfs_all_pmus = false;
80 }
81
pmu_find(const char * name)82 static struct perf_pmu *pmu_find(const char *name)
83 {
84 struct perf_pmu *pmu;
85
86 list_for_each_entry(pmu, &core_pmus, list) {
87 if (!strcmp(pmu->name, name) ||
88 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
89 return pmu;
90 }
91 list_for_each_entry(pmu, &other_pmus, list) {
92 if (!strcmp(pmu->name, name) ||
93 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
94 return pmu;
95 }
96
97 return NULL;
98 }
99
perf_pmus__find(const char * name)100 struct perf_pmu *perf_pmus__find(const char *name)
101 {
102 struct perf_pmu *pmu;
103 int dirfd;
104 bool core_pmu;
105
106 /*
107 * Once PMU is loaded it stays in the list,
108 * so we keep us from multiple reading/parsing
109 * the pmu format definitions.
110 */
111 pmu = pmu_find(name);
112 if (pmu)
113 return pmu;
114
115 if (read_sysfs_all_pmus)
116 return NULL;
117
118 core_pmu = is_pmu_core(name);
119 if (core_pmu && read_sysfs_core_pmus)
120 return NULL;
121
122 dirfd = perf_pmu__event_source_devices_fd();
123 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
124 close(dirfd);
125
126 return pmu;
127 }
128
perf_pmu__find2(int dirfd,const char * name)129 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
130 {
131 struct perf_pmu *pmu;
132 bool core_pmu;
133
134 /*
135 * Once PMU is loaded it stays in the list,
136 * so we keep us from multiple reading/parsing
137 * the pmu format definitions.
138 */
139 pmu = pmu_find(name);
140 if (pmu)
141 return pmu;
142
143 if (read_sysfs_all_pmus)
144 return NULL;
145
146 core_pmu = is_pmu_core(name);
147 if (core_pmu && read_sysfs_core_pmus)
148 return NULL;
149
150 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
151 }
152
pmus_cmp(void * priv __maybe_unused,const struct list_head * lhs,const struct list_head * rhs)153 static int pmus_cmp(void *priv __maybe_unused,
154 const struct list_head *lhs, const struct list_head *rhs)
155 {
156 unsigned long lhs_num = 0, rhs_num = 0;
157 struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
158 struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
159 const char *lhs_pmu_name = lhs_pmu->name ?: "";
160 const char *rhs_pmu_name = rhs_pmu->name ?: "";
161 int lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name, &lhs_num);
162 int rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name, &rhs_num);
163 int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
164 lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
165
166 if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
167 return ret;
168
169 return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
170 }
171
172 /* Add all pmus in sysfs to pmu list: */
pmu_read_sysfs(bool core_only)173 static void pmu_read_sysfs(bool core_only)
174 {
175 int fd;
176 DIR *dir;
177 struct dirent *dent;
178
179 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
180 return;
181
182 fd = perf_pmu__event_source_devices_fd();
183 if (fd < 0)
184 return;
185
186 dir = fdopendir(fd);
187 if (!dir) {
188 close(fd);
189 return;
190 }
191
192 while ((dent = readdir(dir))) {
193 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
194 continue;
195 if (core_only && !is_pmu_core(dent->d_name))
196 continue;
197 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
198 perf_pmu__find2(fd, dent->d_name);
199 }
200
201 closedir(dir);
202 if (list_empty(&core_pmus)) {
203 if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
204 pr_err("Failure to set up any core PMUs\n");
205 }
206 list_sort(NULL, &core_pmus, pmus_cmp);
207 list_sort(NULL, &other_pmus, pmus_cmp);
208 if (!list_empty(&core_pmus)) {
209 read_sysfs_core_pmus = true;
210 if (!core_only)
211 read_sysfs_all_pmus = true;
212 }
213 }
214
__perf_pmus__find_by_type(unsigned int type)215 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
216 {
217 struct perf_pmu *pmu;
218
219 list_for_each_entry(pmu, &core_pmus, list) {
220 if (pmu->type == type)
221 return pmu;
222 }
223
224 list_for_each_entry(pmu, &other_pmus, list) {
225 if (pmu->type == type)
226 return pmu;
227 }
228 return NULL;
229 }
230
perf_pmus__find_by_type(unsigned int type)231 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
232 {
233 struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
234
235 if (pmu || read_sysfs_all_pmus)
236 return pmu;
237
238 pmu_read_sysfs(/*core_only=*/false);
239 pmu = __perf_pmus__find_by_type(type);
240 return pmu;
241 }
242
243 /*
244 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
245 * next pmu. Returns NULL on end.
246 */
perf_pmus__scan(struct perf_pmu * pmu)247 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
248 {
249 bool use_core_pmus = !pmu || pmu->is_core;
250
251 if (!pmu) {
252 pmu_read_sysfs(/*core_only=*/false);
253 pmu = list_prepare_entry(pmu, &core_pmus, list);
254 }
255 if (use_core_pmus) {
256 list_for_each_entry_continue(pmu, &core_pmus, list)
257 return pmu;
258
259 pmu = NULL;
260 pmu = list_prepare_entry(pmu, &other_pmus, list);
261 }
262 list_for_each_entry_continue(pmu, &other_pmus, list)
263 return pmu;
264 return NULL;
265 }
266
perf_pmus__scan_core(struct perf_pmu * pmu)267 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
268 {
269 if (!pmu) {
270 pmu_read_sysfs(/*core_only=*/true);
271 pmu = list_prepare_entry(pmu, &core_pmus, list);
272 }
273 list_for_each_entry_continue(pmu, &core_pmus, list)
274 return pmu;
275
276 return NULL;
277 }
278
perf_pmus__scan_skip_duplicates(struct perf_pmu * pmu)279 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
280 {
281 bool use_core_pmus = !pmu || pmu->is_core;
282 int last_pmu_name_len = 0;
283 const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
284
285 if (!pmu) {
286 pmu_read_sysfs(/*core_only=*/false);
287 pmu = list_prepare_entry(pmu, &core_pmus, list);
288 } else
289 last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL);
290
291 if (use_core_pmus) {
292 list_for_each_entry_continue(pmu, &core_pmus, list) {
293 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
294
295 if (last_pmu_name_len == pmu_name_len &&
296 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
297 continue;
298
299 return pmu;
300 }
301 pmu = NULL;
302 pmu = list_prepare_entry(pmu, &other_pmus, list);
303 }
304 list_for_each_entry_continue(pmu, &other_pmus, list) {
305 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
306
307 if (last_pmu_name_len == pmu_name_len &&
308 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
309 continue;
310
311 return pmu;
312 }
313 return NULL;
314 }
315
perf_pmus__pmu_for_pmu_filter(const char * str)316 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
317 {
318 struct perf_pmu *pmu = NULL;
319
320 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
321 if (!strcmp(pmu->name, str))
322 return pmu;
323 /* Ignore "uncore_" prefix. */
324 if (!strncmp(pmu->name, "uncore_", 7)) {
325 if (!strcmp(pmu->name + 7, str))
326 return pmu;
327 }
328 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */
329 if (!strncmp(pmu->name, "cpu_", 4)) {
330 if (!strcmp(pmu->name + 4, str))
331 return pmu;
332 }
333 }
334 return NULL;
335 }
336
perf_pmus__num_mem_pmus(void)337 int __weak perf_pmus__num_mem_pmus(void)
338 {
339 /* All core PMUs are for mem events. */
340 return perf_pmus__num_core_pmus();
341 }
342
343 /** Struct for ordering events as output in perf list. */
344 struct sevent {
345 /** PMU for event. */
346 const struct perf_pmu *pmu;
347 const char *name;
348 const char* alias;
349 const char *scale_unit;
350 const char *desc;
351 const char *long_desc;
352 const char *encoding_desc;
353 const char *topic;
354 const char *pmu_name;
355 bool deprecated;
356 };
357
cmp_sevent(const void * a,const void * b)358 static int cmp_sevent(const void *a, const void *b)
359 {
360 const struct sevent *as = a;
361 const struct sevent *bs = b;
362 bool a_iscpu, b_iscpu;
363 int ret;
364
365 /* Put extra events last. */
366 if (!!as->desc != !!bs->desc)
367 return !!as->desc - !!bs->desc;
368
369 /* Order by topics. */
370 ret = strcmp(as->topic ?: "", bs->topic ?: "");
371 if (ret)
372 return ret;
373
374 /* Order CPU core events to be first */
375 a_iscpu = as->pmu ? as->pmu->is_core : true;
376 b_iscpu = bs->pmu ? bs->pmu->is_core : true;
377 if (a_iscpu != b_iscpu)
378 return a_iscpu ? -1 : 1;
379
380 /* Order by PMU name. */
381 if (as->pmu != bs->pmu) {
382 ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
383 if (ret)
384 return ret;
385 }
386
387 /* Order by event name. */
388 return strcmp(as->name, bs->name);
389 }
390
pmu_alias_is_duplicate(struct sevent * a,struct sevent * b)391 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
392 {
393 /* Different names -> never duplicates */
394 if (strcmp(a->name ?: "//", b->name ?: "//"))
395 return false;
396
397 /* Don't remove duplicates for different PMUs */
398 return strcmp(a->pmu_name, b->pmu_name) == 0;
399 }
400
401 struct events_callback_state {
402 struct sevent *aliases;
403 size_t aliases_len;
404 size_t index;
405 };
406
perf_pmus__print_pmu_events__callback(void * vstate,struct pmu_event_info * info)407 static int perf_pmus__print_pmu_events__callback(void *vstate,
408 struct pmu_event_info *info)
409 {
410 struct events_callback_state *state = vstate;
411 struct sevent *s;
412
413 if (state->index >= state->aliases_len) {
414 pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
415 return 1;
416 }
417 s = &state->aliases[state->index];
418 s->pmu = info->pmu;
419 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
420 COPY_STR(name);
421 COPY_STR(alias);
422 COPY_STR(scale_unit);
423 COPY_STR(desc);
424 COPY_STR(long_desc);
425 COPY_STR(encoding_desc);
426 COPY_STR(topic);
427 COPY_STR(pmu_name);
428 #undef COPY_STR
429 s->deprecated = info->deprecated;
430 state->index++;
431 return 0;
432 }
433
perf_pmus__print_pmu_events(const struct print_callbacks * print_cb,void * print_state)434 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
435 {
436 struct perf_pmu *pmu;
437 int printed = 0;
438 int len;
439 struct sevent *aliases;
440 struct events_callback_state state;
441 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
442 struct perf_pmu *(*scan_fn)(struct perf_pmu *);
443
444 if (skip_duplicate_pmus)
445 scan_fn = perf_pmus__scan_skip_duplicates;
446 else
447 scan_fn = perf_pmus__scan;
448
449 pmu = NULL;
450 len = 0;
451 while ((pmu = scan_fn(pmu)) != NULL)
452 len += perf_pmu__num_events(pmu);
453
454 aliases = zalloc(sizeof(struct sevent) * len);
455 if (!aliases) {
456 pr_err("FATAL: not enough memory to print PMU events\n");
457 return;
458 }
459 pmu = NULL;
460 state = (struct events_callback_state) {
461 .aliases = aliases,
462 .aliases_len = len,
463 .index = 0,
464 };
465 while ((pmu = scan_fn(pmu)) != NULL) {
466 perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
467 perf_pmus__print_pmu_events__callback);
468 }
469 qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
470 for (int j = 0; j < len; j++) {
471 /* Skip duplicates */
472 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
473 continue;
474
475 print_cb->print_event(print_state,
476 aliases[j].pmu_name,
477 aliases[j].topic,
478 aliases[j].name,
479 aliases[j].alias,
480 aliases[j].scale_unit,
481 aliases[j].deprecated,
482 "Kernel PMU event",
483 aliases[j].desc,
484 aliases[j].long_desc,
485 aliases[j].encoding_desc);
486 zfree(&aliases[j].name);
487 zfree(&aliases[j].alias);
488 zfree(&aliases[j].scale_unit);
489 zfree(&aliases[j].desc);
490 zfree(&aliases[j].long_desc);
491 zfree(&aliases[j].encoding_desc);
492 zfree(&aliases[j].topic);
493 zfree(&aliases[j].pmu_name);
494 }
495 if (printed && pager_in_use())
496 printf("\n");
497
498 zfree(&aliases);
499 }
500
perf_pmus__have_event(const char * pname,const char * name)501 bool perf_pmus__have_event(const char *pname, const char *name)
502 {
503 struct perf_pmu *pmu = perf_pmus__find(pname);
504
505 return pmu && perf_pmu__have_event(pmu, name);
506 }
507
perf_pmus__num_core_pmus(void)508 int perf_pmus__num_core_pmus(void)
509 {
510 static int count;
511
512 if (!count) {
513 struct perf_pmu *pmu = NULL;
514
515 while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
516 count++;
517 }
518 return count;
519 }
520
__perf_pmus__supports_extended_type(void)521 static bool __perf_pmus__supports_extended_type(void)
522 {
523 struct perf_pmu *pmu = NULL;
524
525 if (perf_pmus__num_core_pmus() <= 1)
526 return false;
527
528 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
529 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
530 return false;
531 }
532
533 return true;
534 }
535
536 static bool perf_pmus__do_support_extended_type;
537
perf_pmus__init_supports_extended_type(void)538 static void perf_pmus__init_supports_extended_type(void)
539 {
540 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
541 }
542
perf_pmus__supports_extended_type(void)543 bool perf_pmus__supports_extended_type(void)
544 {
545 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
546
547 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
548
549 return perf_pmus__do_support_extended_type;
550 }
551
perf_pmus__default_pmu_name(void)552 char *perf_pmus__default_pmu_name(void)
553 {
554 int fd;
555 DIR *dir;
556 struct dirent *dent;
557 char *result = NULL;
558
559 if (!list_empty(&core_pmus))
560 return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
561
562 fd = perf_pmu__event_source_devices_fd();
563 if (fd < 0)
564 return strdup("cpu");
565
566 dir = fdopendir(fd);
567 if (!dir) {
568 close(fd);
569 return strdup("cpu");
570 }
571
572 while ((dent = readdir(dir))) {
573 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
574 continue;
575 if (is_pmu_core(dent->d_name)) {
576 result = strdup(dent->d_name);
577 break;
578 }
579 }
580
581 closedir(dir);
582 return result ?: strdup("cpu");
583 }
584
evsel__find_pmu(const struct evsel * evsel)585 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
586 {
587 struct perf_pmu *pmu = evsel->pmu;
588
589 if (!pmu) {
590 pmu = perf_pmus__find_by_type(evsel->core.attr.type);
591 ((struct evsel *)evsel)->pmu = pmu;
592 }
593 return pmu;
594 }
595