1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 *
5 * Author: Jacob Shin <jacob.shin@amd.com>
6 */
7
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20
21 #define NUM_COUNTERS_NB 4
22 #define NUM_COUNTERS_L2 4
23 #define NUM_COUNTERS_L3 6
24 #define MAX_COUNTERS 6
25
26 #define RDPMC_BASE_NB 6
27 #define RDPMC_BASE_LLC 10
28
29 #define COUNTER_SHIFT 16
30
31 #undef pr_fmt
32 #define pr_fmt(fmt) "amd_uncore: " fmt
33
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37
38 static HLIST_HEAD(uncore_unused_list);
39
40 struct amd_uncore {
41 int id;
42 int refcnt;
43 int cpu;
44 int num_counters;
45 int rdpmc_base;
46 u32 msr_base;
47 cpumask_t *active_mask;
48 struct pmu *pmu;
49 struct perf_event *events[MAX_COUNTERS];
50 struct hlist_node node;
51 };
52
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61
is_nb_event(struct perf_event * event)62 static bool is_nb_event(struct perf_event *event)
63 {
64 return event->pmu->type == amd_nb_pmu.type;
65 }
66
is_llc_event(struct perf_event * event)67 static bool is_llc_event(struct perf_event *event)
68 {
69 return event->pmu->type == amd_llc_pmu.type;
70 }
71
event_to_amd_uncore(struct perf_event * event)72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74 if (is_nb_event(event) && amd_uncore_nb)
75 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76 else if (is_llc_event(event) && amd_uncore_llc)
77 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78
79 return NULL;
80 }
81
amd_uncore_read(struct perf_event * event)82 static void amd_uncore_read(struct perf_event *event)
83 {
84 struct hw_perf_event *hwc = &event->hw;
85 u64 prev, new;
86 s64 delta;
87
88 /*
89 * since we do not enable counter overflow interrupts,
90 * we do not have to worry about prev_count changing on us
91 */
92
93 prev = local64_read(&hwc->prev_count);
94 rdpmcl(hwc->event_base_rdpmc, new);
95 local64_set(&hwc->prev_count, new);
96 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97 delta >>= COUNTER_SHIFT;
98 local64_add(delta, &event->count);
99 }
100
amd_uncore_start(struct perf_event * event,int flags)101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103 struct hw_perf_event *hwc = &event->hw;
104
105 if (flags & PERF_EF_RELOAD)
106 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107
108 hwc->state = 0;
109 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110 perf_event_update_userpage(event);
111 }
112
amd_uncore_stop(struct perf_event * event,int flags)113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115 struct hw_perf_event *hwc = &event->hw;
116
117 wrmsrl(hwc->config_base, hwc->config);
118 hwc->state |= PERF_HES_STOPPED;
119
120 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121 amd_uncore_read(event);
122 hwc->state |= PERF_HES_UPTODATE;
123 }
124 }
125
amd_uncore_add(struct perf_event * event,int flags)126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128 int i;
129 struct amd_uncore *uncore = event_to_amd_uncore(event);
130 struct hw_perf_event *hwc = &event->hw;
131
132 /* are we already assigned? */
133 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 goto out;
135
136 for (i = 0; i < uncore->num_counters; i++) {
137 if (uncore->events[i] == event) {
138 hwc->idx = i;
139 goto out;
140 }
141 }
142
143 /* if not, take the first available counter */
144 hwc->idx = -1;
145 for (i = 0; i < uncore->num_counters; i++) {
146 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147 hwc->idx = i;
148 break;
149 }
150 }
151
152 out:
153 if (hwc->idx == -1)
154 return -EBUSY;
155
156 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160
161 if (flags & PERF_EF_START)
162 amd_uncore_start(event, PERF_EF_RELOAD);
163
164 return 0;
165 }
166
amd_uncore_del(struct perf_event * event,int flags)167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169 int i;
170 struct amd_uncore *uncore = event_to_amd_uncore(event);
171 struct hw_perf_event *hwc = &event->hw;
172
173 amd_uncore_stop(event, PERF_EF_UPDATE);
174
175 for (i = 0; i < uncore->num_counters; i++) {
176 if (cmpxchg(&uncore->events[i], event, NULL) == event)
177 break;
178 }
179
180 hwc->idx = -1;
181 }
182
amd_uncore_event_init(struct perf_event * event)183 static int amd_uncore_event_init(struct perf_event *event)
184 {
185 struct amd_uncore *uncore;
186 struct hw_perf_event *hwc = &event->hw;
187
188 if (event->attr.type != event->pmu->type)
189 return -ENOENT;
190
191 /*
192 * NB and Last level cache counters (MSRs) are shared across all cores
193 * that share the same NB / Last level cache. Interrupts can be directed
194 * to a single target core, however, event counts generated by processes
195 * running on other cores cannot be masked out. So we do not support
196 * sampling and per-thread events.
197 */
198 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
199 return -EINVAL;
200
201 /* and we do not enable counter overflow interrupts */
202 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
203 hwc->idx = -1;
204
205 if (event->cpu < 0)
206 return -EINVAL;
207
208 /*
209 * SliceMask and ThreadMask need to be set for certain L3 events in
210 * Family 17h. For other events, the two fields do not affect the count.
211 */
212 if (l3_mask && is_llc_event(event)) {
213 int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
214
215 if (smp_num_siblings > 1)
216 thread += cpu_data(event->cpu).apicid & 1;
217
218 hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
219 AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
220 }
221
222 uncore = event_to_amd_uncore(event);
223 if (!uncore)
224 return -ENODEV;
225
226 /*
227 * since request can come in to any of the shared cores, we will remap
228 * to a single common cpu.
229 */
230 event->cpu = uncore->cpu;
231
232 return 0;
233 }
234
amd_uncore_attr_show_cpumask(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238 {
239 cpumask_t *active_mask;
240 struct pmu *pmu = dev_get_drvdata(dev);
241
242 if (pmu->type == amd_nb_pmu.type)
243 active_mask = &amd_nb_active_mask;
244 else if (pmu->type == amd_llc_pmu.type)
245 active_mask = &amd_llc_active_mask;
246 else
247 return 0;
248
249 return cpumap_print_to_pagebuf(true, buf, active_mask);
250 }
251 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
252
253 static struct attribute *amd_uncore_attrs[] = {
254 &dev_attr_cpumask.attr,
255 NULL,
256 };
257
258 static struct attribute_group amd_uncore_attr_group = {
259 .attrs = amd_uncore_attrs,
260 };
261
262 /*
263 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
264 * on family
265 */
266 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
267 static ssize_t \
268 _dev##_show##_name(struct device *dev, \
269 struct device_attribute *attr, \
270 char *page) \
271 { \
272 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
273 return sprintf(page, _format "\n"); \
274 } \
275 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
276
277 /* Used for each uncore counter type */
278 #define AMD_ATTRIBUTE(_name) \
279 static struct attribute *amd_uncore_format_attr_##_name[] = { \
280 &format_attr_event_##_name.attr, \
281 &format_attr_umask.attr, \
282 NULL, \
283 }; \
284 static struct attribute_group amd_uncore_format_group_##_name = { \
285 .name = "format", \
286 .attrs = amd_uncore_format_attr_##_name, \
287 }; \
288 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
289 &amd_uncore_attr_group, \
290 &amd_uncore_format_group_##_name, \
291 NULL, \
292 };
293
294 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
295 AMD_FORMAT_ATTR(umask, , "config:8-15");
296 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
297 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
298 AMD_ATTRIBUTE(df);
299 AMD_ATTRIBUTE(l3);
300
301 static struct pmu amd_nb_pmu = {
302 .task_ctx_nr = perf_invalid_context,
303 .event_init = amd_uncore_event_init,
304 .add = amd_uncore_add,
305 .del = amd_uncore_del,
306 .start = amd_uncore_start,
307 .stop = amd_uncore_stop,
308 .read = amd_uncore_read,
309 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
310 };
311
312 static struct pmu amd_llc_pmu = {
313 .task_ctx_nr = perf_invalid_context,
314 .event_init = amd_uncore_event_init,
315 .add = amd_uncore_add,
316 .del = amd_uncore_del,
317 .start = amd_uncore_start,
318 .stop = amd_uncore_stop,
319 .read = amd_uncore_read,
320 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
321 };
322
amd_uncore_alloc(unsigned int cpu)323 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
324 {
325 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
326 cpu_to_node(cpu));
327 }
328
amd_uncore_cpu_up_prepare(unsigned int cpu)329 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
330 {
331 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
332
333 if (amd_uncore_nb) {
334 uncore_nb = amd_uncore_alloc(cpu);
335 if (!uncore_nb)
336 goto fail;
337 uncore_nb->cpu = cpu;
338 uncore_nb->num_counters = num_counters_nb;
339 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
340 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
341 uncore_nb->active_mask = &amd_nb_active_mask;
342 uncore_nb->pmu = &amd_nb_pmu;
343 uncore_nb->id = -1;
344 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
345 }
346
347 if (amd_uncore_llc) {
348 uncore_llc = amd_uncore_alloc(cpu);
349 if (!uncore_llc)
350 goto fail;
351 uncore_llc->cpu = cpu;
352 uncore_llc->num_counters = num_counters_llc;
353 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
354 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
355 uncore_llc->active_mask = &amd_llc_active_mask;
356 uncore_llc->pmu = &amd_llc_pmu;
357 uncore_llc->id = -1;
358 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
359 }
360
361 return 0;
362
363 fail:
364 if (amd_uncore_nb)
365 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
366 kfree(uncore_nb);
367 return -ENOMEM;
368 }
369
370 static struct amd_uncore *
amd_uncore_find_online_sibling(struct amd_uncore * this,struct amd_uncore * __percpu * uncores)371 amd_uncore_find_online_sibling(struct amd_uncore *this,
372 struct amd_uncore * __percpu *uncores)
373 {
374 unsigned int cpu;
375 struct amd_uncore *that;
376
377 for_each_online_cpu(cpu) {
378 that = *per_cpu_ptr(uncores, cpu);
379
380 if (!that)
381 continue;
382
383 if (this == that)
384 continue;
385
386 if (this->id == that->id) {
387 hlist_add_head(&this->node, &uncore_unused_list);
388 this = that;
389 break;
390 }
391 }
392
393 this->refcnt++;
394 return this;
395 }
396
amd_uncore_cpu_starting(unsigned int cpu)397 static int amd_uncore_cpu_starting(unsigned int cpu)
398 {
399 unsigned int eax, ebx, ecx, edx;
400 struct amd_uncore *uncore;
401
402 if (amd_uncore_nb) {
403 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
404 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
405 uncore->id = ecx & 0xff;
406
407 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
408 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
409 }
410
411 if (amd_uncore_llc) {
412 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
413 uncore->id = per_cpu(cpu_llc_id, cpu);
414
415 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
416 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
417 }
418
419 return 0;
420 }
421
uncore_clean_online(void)422 static void uncore_clean_online(void)
423 {
424 struct amd_uncore *uncore;
425 struct hlist_node *n;
426
427 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
428 hlist_del(&uncore->node);
429 kfree(uncore);
430 }
431 }
432
uncore_online(unsigned int cpu,struct amd_uncore * __percpu * uncores)433 static void uncore_online(unsigned int cpu,
434 struct amd_uncore * __percpu *uncores)
435 {
436 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
437
438 uncore_clean_online();
439
440 if (cpu == uncore->cpu)
441 cpumask_set_cpu(cpu, uncore->active_mask);
442 }
443
amd_uncore_cpu_online(unsigned int cpu)444 static int amd_uncore_cpu_online(unsigned int cpu)
445 {
446 if (amd_uncore_nb)
447 uncore_online(cpu, amd_uncore_nb);
448
449 if (amd_uncore_llc)
450 uncore_online(cpu, amd_uncore_llc);
451
452 return 0;
453 }
454
uncore_down_prepare(unsigned int cpu,struct amd_uncore * __percpu * uncores)455 static void uncore_down_prepare(unsigned int cpu,
456 struct amd_uncore * __percpu *uncores)
457 {
458 unsigned int i;
459 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
460
461 if (this->cpu != cpu)
462 return;
463
464 /* this cpu is going down, migrate to a shared sibling if possible */
465 for_each_online_cpu(i) {
466 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
467
468 if (cpu == i)
469 continue;
470
471 if (this == that) {
472 perf_pmu_migrate_context(this->pmu, cpu, i);
473 cpumask_clear_cpu(cpu, that->active_mask);
474 cpumask_set_cpu(i, that->active_mask);
475 that->cpu = i;
476 break;
477 }
478 }
479 }
480
amd_uncore_cpu_down_prepare(unsigned int cpu)481 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
482 {
483 if (amd_uncore_nb)
484 uncore_down_prepare(cpu, amd_uncore_nb);
485
486 if (amd_uncore_llc)
487 uncore_down_prepare(cpu, amd_uncore_llc);
488
489 return 0;
490 }
491
uncore_dead(unsigned int cpu,struct amd_uncore * __percpu * uncores)492 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
493 {
494 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
495
496 if (cpu == uncore->cpu)
497 cpumask_clear_cpu(cpu, uncore->active_mask);
498
499 if (!--uncore->refcnt)
500 kfree(uncore);
501 *per_cpu_ptr(uncores, cpu) = NULL;
502 }
503
amd_uncore_cpu_dead(unsigned int cpu)504 static int amd_uncore_cpu_dead(unsigned int cpu)
505 {
506 if (amd_uncore_nb)
507 uncore_dead(cpu, amd_uncore_nb);
508
509 if (amd_uncore_llc)
510 uncore_dead(cpu, amd_uncore_llc);
511
512 return 0;
513 }
514
amd_uncore_init(void)515 static int __init amd_uncore_init(void)
516 {
517 int ret = -ENODEV;
518
519 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
520 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
521 return -ENODEV;
522
523 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
524 return -ENODEV;
525
526 if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
527 /*
528 * For F17h or F18h, the Northbridge counters are
529 * repurposed as Data Fabric counters. Also, L3
530 * counters are supported too. The PMUs are exported
531 * based on family as either L2 or L3 and NB or DF.
532 */
533 num_counters_nb = NUM_COUNTERS_NB;
534 num_counters_llc = NUM_COUNTERS_L3;
535 amd_nb_pmu.name = "amd_df";
536 amd_llc_pmu.name = "amd_l3";
537 format_attr_event_df.show = &event_show_df;
538 format_attr_event_l3.show = &event_show_l3;
539 l3_mask = true;
540 } else {
541 num_counters_nb = NUM_COUNTERS_NB;
542 num_counters_llc = NUM_COUNTERS_L2;
543 amd_nb_pmu.name = "amd_nb";
544 amd_llc_pmu.name = "amd_l2";
545 format_attr_event_df = format_attr_event;
546 format_attr_event_l3 = format_attr_event;
547 l3_mask = false;
548 }
549
550 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
551 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
552
553 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
554 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
555 if (!amd_uncore_nb) {
556 ret = -ENOMEM;
557 goto fail_nb;
558 }
559 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
560 if (ret)
561 goto fail_nb;
562
563 pr_info("%s NB counters detected\n",
564 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
565 "HYGON" : "AMD");
566 ret = 0;
567 }
568
569 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
570 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
571 if (!amd_uncore_llc) {
572 ret = -ENOMEM;
573 goto fail_llc;
574 }
575 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
576 if (ret)
577 goto fail_llc;
578
579 pr_info("%s LLC counters detected\n",
580 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
581 "HYGON" : "AMD");
582 ret = 0;
583 }
584
585 /*
586 * Install callbacks. Core will call them for each online cpu.
587 */
588 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
589 "perf/x86/amd/uncore:prepare",
590 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
591 goto fail_llc;
592
593 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
594 "perf/x86/amd/uncore:starting",
595 amd_uncore_cpu_starting, NULL))
596 goto fail_prep;
597 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
598 "perf/x86/amd/uncore:online",
599 amd_uncore_cpu_online,
600 amd_uncore_cpu_down_prepare))
601 goto fail_start;
602 return 0;
603
604 fail_start:
605 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
606 fail_prep:
607 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
608 fail_llc:
609 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
610 perf_pmu_unregister(&amd_nb_pmu);
611 if (amd_uncore_llc)
612 free_percpu(amd_uncore_llc);
613 fail_nb:
614 if (amd_uncore_nb)
615 free_percpu(amd_uncore_nb);
616
617 return ret;
618 }
619 device_initcall(amd_uncore_init);
620