1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "map_symbol.h"
12 #include "mem-events.h"
13 #include "debug.h"
14 #include "symbol.h"
15 #include "pmu.h"
16 #include "pmus.h"
17
18 unsigned int perf_mem_events__loads_ldlat = 30;
19
20 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
21
22 static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
24 E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
25 E(NULL, NULL, NULL),
26 };
27 #undef E
28
29 static char mem_loads_name[100];
30 static bool mem_loads_name__init;
31
perf_mem_events__ptr(int i)32 struct perf_mem_event * __weak perf_mem_events__ptr(int i)
33 {
34 if (i >= PERF_MEM_EVENTS__MAX)
35 return NULL;
36
37 return &perf_mem_events[i];
38 }
39
perf_mem_events__name(int i,const char * pmu_name __maybe_unused)40 const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
41 {
42 struct perf_mem_event *e = perf_mem_events__ptr(i);
43
44 if (!e)
45 return NULL;
46
47 if (i == PERF_MEM_EVENTS__LOAD) {
48 if (!mem_loads_name__init) {
49 mem_loads_name__init = true;
50 scnprintf(mem_loads_name, sizeof(mem_loads_name),
51 e->name, perf_mem_events__loads_ldlat);
52 }
53 return mem_loads_name;
54 }
55
56 return e->name;
57 }
58
is_mem_loads_aux_event(struct evsel * leader __maybe_unused)59 __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
60 {
61 return false;
62 }
63
perf_mem_events__parse(const char * str)64 int perf_mem_events__parse(const char *str)
65 {
66 char *tok, *saveptr = NULL;
67 bool found = false;
68 char *buf;
69 int j;
70
71 /* We need buffer that we know we can write to. */
72 buf = malloc(strlen(str) + 1);
73 if (!buf)
74 return -ENOMEM;
75
76 strcpy(buf, str);
77
78 tok = strtok_r((char *)buf, ",", &saveptr);
79
80 while (tok) {
81 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
82 struct perf_mem_event *e = perf_mem_events__ptr(j);
83
84 if (!e->tag)
85 continue;
86
87 if (strstr(e->tag, tok))
88 e->record = found = true;
89 }
90
91 tok = strtok_r(NULL, ",", &saveptr);
92 }
93
94 free(buf);
95
96 if (found)
97 return 0;
98
99 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
100 return -1;
101 }
102
perf_mem_event__supported(const char * mnt,char * sysfs_name)103 static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
104 {
105 char path[PATH_MAX];
106 struct stat st;
107
108 scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
109 return !stat(path, &st);
110 }
111
perf_mem_events__init(void)112 int perf_mem_events__init(void)
113 {
114 const char *mnt = sysfs__mount();
115 bool found = false;
116 int j;
117
118 if (!mnt)
119 return -ENOENT;
120
121 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
122 struct perf_mem_event *e = perf_mem_events__ptr(j);
123 char sysfs_name[100];
124 struct perf_pmu *pmu = NULL;
125
126 /*
127 * If the event entry isn't valid, skip initialization
128 * and "e->supported" will keep false.
129 */
130 if (!e->tag)
131 continue;
132
133 /*
134 * Scan all PMUs not just core ones, since perf mem/c2c on
135 * platforms like AMD uses IBS OP PMU which is independent
136 * of core PMU.
137 */
138 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
139 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
140 e->supported |= perf_mem_event__supported(mnt, sysfs_name);
141 }
142
143 if (e->supported)
144 found = true;
145 }
146
147 return found ? 0 : -ENOENT;
148 }
149
perf_mem_events__list(void)150 void perf_mem_events__list(void)
151 {
152 int j;
153
154 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
155 struct perf_mem_event *e = perf_mem_events__ptr(j);
156
157 fprintf(stderr, "%-*s%-*s%s",
158 e->tag ? 13 : 0,
159 e->tag ? : "",
160 e->tag && verbose > 0 ? 25 : 0,
161 e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "",
162 e->supported ? ": available\n" : "");
163 }
164 }
165
perf_mem_events__print_unsupport_hybrid(struct perf_mem_event * e,int idx)166 static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
167 int idx)
168 {
169 const char *mnt = sysfs__mount();
170 char sysfs_name[100];
171 struct perf_pmu *pmu = NULL;
172
173 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
174 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
175 pmu->name);
176 if (!perf_mem_event__supported(mnt, sysfs_name)) {
177 pr_err("failed: event '%s' not supported\n",
178 perf_mem_events__name(idx, pmu->name));
179 }
180 }
181 }
182
perf_mem_events__record_args(const char ** rec_argv,int * argv_nr,char ** rec_tmp,int * tmp_nr)183 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
184 char **rec_tmp, int *tmp_nr)
185 {
186 int i = *argv_nr, k = 0;
187 struct perf_mem_event *e;
188 struct perf_pmu *pmu;
189
190 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
191 e = perf_mem_events__ptr(j);
192 if (!e->record)
193 continue;
194
195 if (perf_pmus__num_mem_pmus() == 1) {
196 if (!e->supported) {
197 pr_err("failed: event '%s' not supported\n",
198 perf_mem_events__name(j, NULL));
199 return -1;
200 }
201
202 rec_argv[i++] = "-e";
203 rec_argv[i++] = perf_mem_events__name(j, NULL);
204 } else {
205 if (!e->supported) {
206 perf_mem_events__print_unsupport_hybrid(e, j);
207 return -1;
208 }
209
210 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
211 const char *s = perf_mem_events__name(j, pmu->name);
212
213 rec_argv[i++] = "-e";
214 if (s) {
215 char *copy = strdup(s);
216 if (!copy)
217 return -1;
218
219 rec_argv[i++] = copy;
220 rec_tmp[k++] = copy;
221 }
222 }
223 }
224 }
225
226 *argv_nr = i;
227 *tmp_nr = k;
228 return 0;
229 }
230
231 static const char * const tlb_access[] = {
232 "N/A",
233 "HIT",
234 "MISS",
235 "L1",
236 "L2",
237 "Walker",
238 "Fault",
239 };
240
perf_mem__tlb_scnprintf(char * out,size_t sz,struct mem_info * mem_info)241 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
242 {
243 size_t l = 0, i;
244 u64 m = PERF_MEM_TLB_NA;
245 u64 hit, miss;
246
247 sz -= 1; /* -1 for null termination */
248 out[0] = '\0';
249
250 if (mem_info)
251 m = mem_info->data_src.mem_dtlb;
252
253 hit = m & PERF_MEM_TLB_HIT;
254 miss = m & PERF_MEM_TLB_MISS;
255
256 /* already taken care of */
257 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
258
259 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
260 if (!(m & 0x1))
261 continue;
262 if (l) {
263 strcat(out, " or ");
264 l += 4;
265 }
266 l += scnprintf(out + l, sz - l, tlb_access[i]);
267 }
268 if (*out == '\0')
269 l += scnprintf(out, sz - l, "N/A");
270 if (hit)
271 l += scnprintf(out + l, sz - l, " hit");
272 if (miss)
273 l += scnprintf(out + l, sz - l, " miss");
274
275 return l;
276 }
277
278 static const char * const mem_lvl[] = {
279 "N/A",
280 "HIT",
281 "MISS",
282 "L1",
283 "LFB/MAB",
284 "L2",
285 "L3",
286 "Local RAM",
287 "Remote RAM (1 hop)",
288 "Remote RAM (2 hops)",
289 "Remote Cache (1 hop)",
290 "Remote Cache (2 hops)",
291 "I/O",
292 "Uncached",
293 };
294
295 static const char * const mem_lvlnum[] = {
296 [PERF_MEM_LVLNUM_UNC] = "Uncached",
297 [PERF_MEM_LVLNUM_CXL] = "CXL",
298 [PERF_MEM_LVLNUM_IO] = "I/O",
299 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
300 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
301 [PERF_MEM_LVLNUM_RAM] = "RAM",
302 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
303 [PERF_MEM_LVLNUM_NA] = "N/A",
304 };
305
306 static const char * const mem_hops[] = {
307 "N/A",
308 /*
309 * While printing, 'Remote' will be added to represent
310 * 'Remote core, same node' accesses as remote field need
311 * to be set with mem_hops field.
312 */
313 "core, same node",
314 "node, same socket",
315 "socket, same board",
316 "board",
317 };
318
perf_mem__op_scnprintf(char * out,size_t sz,struct mem_info * mem_info)319 static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
320 {
321 u64 op = PERF_MEM_LOCK_NA;
322 int l;
323
324 if (mem_info)
325 op = mem_info->data_src.mem_op;
326
327 if (op & PERF_MEM_OP_NA)
328 l = scnprintf(out, sz, "N/A");
329 else if (op & PERF_MEM_OP_LOAD)
330 l = scnprintf(out, sz, "LOAD");
331 else if (op & PERF_MEM_OP_STORE)
332 l = scnprintf(out, sz, "STORE");
333 else if (op & PERF_MEM_OP_PFETCH)
334 l = scnprintf(out, sz, "PFETCH");
335 else if (op & PERF_MEM_OP_EXEC)
336 l = scnprintf(out, sz, "EXEC");
337 else
338 l = scnprintf(out, sz, "No");
339
340 return l;
341 }
342
perf_mem__lvl_scnprintf(char * out,size_t sz,struct mem_info * mem_info)343 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
344 {
345 union perf_mem_data_src data_src;
346 int printed = 0;
347 size_t l = 0;
348 size_t i;
349 int lvl;
350 char hit_miss[5] = {0};
351
352 sz -= 1; /* -1 for null termination */
353 out[0] = '\0';
354
355 if (!mem_info)
356 goto na;
357
358 data_src = mem_info->data_src;
359
360 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
361 memcpy(hit_miss, "hit", 3);
362 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
363 memcpy(hit_miss, "miss", 4);
364
365 lvl = data_src.mem_lvl_num;
366 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
367 if (data_src.mem_remote) {
368 strcat(out, "Remote ");
369 l += 7;
370 }
371
372 if (data_src.mem_hops)
373 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
374
375 if (mem_lvlnum[lvl])
376 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
377 else
378 l += scnprintf(out + l, sz - l, "L%d", lvl);
379
380 l += scnprintf(out + l, sz - l, " %s", hit_miss);
381 return l;
382 }
383
384 lvl = data_src.mem_lvl;
385 if (!lvl)
386 goto na;
387
388 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
389 if (!lvl)
390 goto na;
391
392 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
393 if (!(lvl & 0x1))
394 continue;
395 if (printed++) {
396 strcat(out, " or ");
397 l += 4;
398 }
399 l += scnprintf(out + l, sz - l, mem_lvl[i]);
400 }
401
402 if (printed) {
403 l += scnprintf(out + l, sz - l, " %s", hit_miss);
404 return l;
405 }
406
407 na:
408 strcat(out, "N/A");
409 return 3;
410 }
411
412 static const char * const snoop_access[] = {
413 "N/A",
414 "None",
415 "Hit",
416 "Miss",
417 "HitM",
418 };
419
420 static const char * const snoopx_access[] = {
421 "Fwd",
422 "Peer",
423 };
424
perf_mem__snp_scnprintf(char * out,size_t sz,struct mem_info * mem_info)425 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
426 {
427 size_t i, l = 0;
428 u64 m = PERF_MEM_SNOOP_NA;
429
430 sz -= 1; /* -1 for null termination */
431 out[0] = '\0';
432
433 if (mem_info)
434 m = mem_info->data_src.mem_snoop;
435
436 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
437 if (!(m & 0x1))
438 continue;
439 if (l) {
440 strcat(out, " or ");
441 l += 4;
442 }
443 l += scnprintf(out + l, sz - l, snoop_access[i]);
444 }
445
446 m = 0;
447 if (mem_info)
448 m = mem_info->data_src.mem_snoopx;
449
450 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
451 if (!(m & 0x1))
452 continue;
453
454 if (l) {
455 strcat(out, " or ");
456 l += 4;
457 }
458 l += scnprintf(out + l, sz - l, snoopx_access[i]);
459 }
460
461 if (*out == '\0')
462 l += scnprintf(out, sz - l, "N/A");
463
464 return l;
465 }
466
perf_mem__lck_scnprintf(char * out,size_t sz,struct mem_info * mem_info)467 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
468 {
469 u64 mask = PERF_MEM_LOCK_NA;
470 int l;
471
472 if (mem_info)
473 mask = mem_info->data_src.mem_lock;
474
475 if (mask & PERF_MEM_LOCK_NA)
476 l = scnprintf(out, sz, "N/A");
477 else if (mask & PERF_MEM_LOCK_LOCKED)
478 l = scnprintf(out, sz, "Yes");
479 else
480 l = scnprintf(out, sz, "No");
481
482 return l;
483 }
484
perf_mem__blk_scnprintf(char * out,size_t sz,struct mem_info * mem_info)485 int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
486 {
487 size_t l = 0;
488 u64 mask = PERF_MEM_BLK_NA;
489
490 sz -= 1; /* -1 for null termination */
491 out[0] = '\0';
492
493 if (mem_info)
494 mask = mem_info->data_src.mem_blk;
495
496 if (!mask || (mask & PERF_MEM_BLK_NA)) {
497 l += scnprintf(out + l, sz - l, " N/A");
498 return l;
499 }
500 if (mask & PERF_MEM_BLK_DATA)
501 l += scnprintf(out + l, sz - l, " Data");
502 if (mask & PERF_MEM_BLK_ADDR)
503 l += scnprintf(out + l, sz - l, " Addr");
504
505 return l;
506 }
507
perf_script__meminfo_scnprintf(char * out,size_t sz,struct mem_info * mem_info)508 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
509 {
510 int i = 0;
511
512 i += scnprintf(out, sz, "|OP ");
513 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
514 i += scnprintf(out + i, sz - i, "|LVL ");
515 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
516 i += scnprintf(out + i, sz - i, "|SNP ");
517 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
518 i += scnprintf(out + i, sz - i, "|TLB ");
519 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
520 i += scnprintf(out + i, sz - i, "|LCK ");
521 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
522 i += scnprintf(out + i, sz - i, "|BLK ");
523 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
524
525 return i;
526 }
527
c2c_decode_stats(struct c2c_stats * stats,struct mem_info * mi)528 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
529 {
530 union perf_mem_data_src *data_src = &mi->data_src;
531 u64 daddr = mi->daddr.addr;
532 u64 op = data_src->mem_op;
533 u64 lvl = data_src->mem_lvl;
534 u64 snoop = data_src->mem_snoop;
535 u64 snoopx = data_src->mem_snoopx;
536 u64 lock = data_src->mem_lock;
537 u64 blk = data_src->mem_blk;
538 /*
539 * Skylake might report unknown remote level via this
540 * bit, consider it when evaluating remote HITMs.
541 *
542 * Incase of power, remote field can also be used to denote cache
543 * accesses from the another core of same node. Hence, setting
544 * mrem only when HOPS is zero along with set remote field.
545 */
546 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
547 int err = 0;
548
549 #define HITM_INC(__f) \
550 do { \
551 stats->__f++; \
552 stats->tot_hitm++; \
553 } while (0)
554
555 #define PEER_INC(__f) \
556 do { \
557 stats->__f++; \
558 stats->tot_peer++; \
559 } while (0)
560
561 #define P(a, b) PERF_MEM_##a##_##b
562
563 stats->nr_entries++;
564
565 if (lock & P(LOCK, LOCKED)) stats->locks++;
566
567 if (blk & P(BLK, DATA)) stats->blk_data++;
568 if (blk & P(BLK, ADDR)) stats->blk_addr++;
569
570 if (op & P(OP, LOAD)) {
571 /* load */
572 stats->load++;
573
574 if (!daddr) {
575 stats->ld_noadrs++;
576 return -1;
577 }
578
579 if (lvl & P(LVL, HIT)) {
580 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
581 if (lvl & P(LVL, IO)) stats->ld_io++;
582 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
583 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
584 if (lvl & P(LVL, L2)) {
585 stats->ld_l2hit++;
586
587 if (snoopx & P(SNOOPX, PEER))
588 PEER_INC(lcl_peer);
589 }
590 if (lvl & P(LVL, L3 )) {
591 if (snoop & P(SNOOP, HITM))
592 HITM_INC(lcl_hitm);
593 else
594 stats->ld_llchit++;
595
596 if (snoopx & P(SNOOPX, PEER))
597 PEER_INC(lcl_peer);
598 }
599
600 if (lvl & P(LVL, LOC_RAM)) {
601 stats->lcl_dram++;
602 if (snoop & P(SNOOP, HIT))
603 stats->ld_shared++;
604 else
605 stats->ld_excl++;
606 }
607
608 if ((lvl & P(LVL, REM_RAM1)) ||
609 (lvl & P(LVL, REM_RAM2)) ||
610 mrem) {
611 stats->rmt_dram++;
612 if (snoop & P(SNOOP, HIT))
613 stats->ld_shared++;
614 else
615 stats->ld_excl++;
616 }
617 }
618
619 if ((lvl & P(LVL, REM_CCE1)) ||
620 (lvl & P(LVL, REM_CCE2)) ||
621 mrem) {
622 if (snoop & P(SNOOP, HIT)) {
623 stats->rmt_hit++;
624 } else if (snoop & P(SNOOP, HITM)) {
625 HITM_INC(rmt_hitm);
626 } else if (snoopx & P(SNOOPX, PEER)) {
627 stats->rmt_hit++;
628 PEER_INC(rmt_peer);
629 }
630 }
631
632 if ((lvl & P(LVL, MISS)))
633 stats->ld_miss++;
634
635 } else if (op & P(OP, STORE)) {
636 /* store */
637 stats->store++;
638
639 if (!daddr) {
640 stats->st_noadrs++;
641 return -1;
642 }
643
644 if (lvl & P(LVL, HIT)) {
645 if (lvl & P(LVL, UNC)) stats->st_uncache++;
646 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
647 }
648 if (lvl & P(LVL, MISS))
649 if (lvl & P(LVL, L1)) stats->st_l1miss++;
650 if (lvl & P(LVL, NA))
651 stats->st_na++;
652 } else {
653 /* unparsable data_src? */
654 stats->noparse++;
655 return -1;
656 }
657
658 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
659 stats->nomap++;
660 return -1;
661 }
662
663 #undef P
664 #undef HITM_INC
665 return err;
666 }
667
c2c_add_stats(struct c2c_stats * stats,struct c2c_stats * add)668 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
669 {
670 stats->nr_entries += add->nr_entries;
671
672 stats->locks += add->locks;
673 stats->store += add->store;
674 stats->st_uncache += add->st_uncache;
675 stats->st_noadrs += add->st_noadrs;
676 stats->st_l1hit += add->st_l1hit;
677 stats->st_l1miss += add->st_l1miss;
678 stats->st_na += add->st_na;
679 stats->load += add->load;
680 stats->ld_excl += add->ld_excl;
681 stats->ld_shared += add->ld_shared;
682 stats->ld_uncache += add->ld_uncache;
683 stats->ld_io += add->ld_io;
684 stats->ld_miss += add->ld_miss;
685 stats->ld_noadrs += add->ld_noadrs;
686 stats->ld_fbhit += add->ld_fbhit;
687 stats->ld_l1hit += add->ld_l1hit;
688 stats->ld_l2hit += add->ld_l2hit;
689 stats->ld_llchit += add->ld_llchit;
690 stats->lcl_hitm += add->lcl_hitm;
691 stats->rmt_hitm += add->rmt_hitm;
692 stats->tot_hitm += add->tot_hitm;
693 stats->lcl_peer += add->lcl_peer;
694 stats->rmt_peer += add->rmt_peer;
695 stats->tot_peer += add->tot_peer;
696 stats->rmt_hit += add->rmt_hit;
697 stats->lcl_dram += add->lcl_dram;
698 stats->rmt_dram += add->rmt_dram;
699 stats->blk_data += add->blk_data;
700 stats->blk_addr += add->blk_addr;
701 stats->nomap += add->nomap;
702 stats->noparse += add->noparse;
703 }
704