1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/slab.h>
3 #include <linux/pci.h>
4 #include <asm/apicdef.h>
5 
6 #include <linux/perf_event.h>
7 #include "../perf_event.h"
8 
9 #define UNCORE_PMU_NAME_LEN		32
10 #define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
11 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
12 
13 #define UNCORE_FIXED_EVENT		0xff
14 #define UNCORE_PMC_IDX_MAX_GENERIC	8
15 #define UNCORE_PMC_IDX_MAX_FIXED	1
16 #define UNCORE_PMC_IDX_MAX_FREERUNNING	1
17 #define UNCORE_PMC_IDX_FIXED		UNCORE_PMC_IDX_MAX_GENERIC
18 #define UNCORE_PMC_IDX_FREERUNNING	(UNCORE_PMC_IDX_FIXED + \
19 					UNCORE_PMC_IDX_MAX_FIXED)
20 #define UNCORE_PMC_IDX_MAX		(UNCORE_PMC_IDX_FREERUNNING + \
21 					UNCORE_PMC_IDX_MAX_FREERUNNING)
22 
23 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)	\
24 		((dev << 24) | (func << 16) | (type << 8) | idx)
25 #define UNCORE_PCI_DEV_DATA(type, idx)	((type << 8) | idx)
26 #define UNCORE_PCI_DEV_DEV(data)	((data >> 24) & 0xff)
27 #define UNCORE_PCI_DEV_FUNC(data)	((data >> 16) & 0xff)
28 #define UNCORE_PCI_DEV_TYPE(data)	((data >> 8) & 0xff)
29 #define UNCORE_PCI_DEV_IDX(data)	(data & 0xff)
30 #define UNCORE_EXTRA_PCI_DEV		0xff
31 #define UNCORE_EXTRA_PCI_DEV_MAX	4
32 
33 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
34 
35 struct pci_extra_dev {
36 	struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
37 };
38 
39 struct intel_uncore_ops;
40 struct intel_uncore_pmu;
41 struct intel_uncore_box;
42 struct uncore_event_desc;
43 struct freerunning_counters;
44 
45 struct intel_uncore_type {
46 	const char *name;
47 	int num_counters;
48 	int num_boxes;
49 	int perf_ctr_bits;
50 	int fixed_ctr_bits;
51 	int num_freerunning_types;
52 	unsigned perf_ctr;
53 	unsigned event_ctl;
54 	unsigned event_mask;
55 	unsigned event_mask_ext;
56 	unsigned fixed_ctr;
57 	unsigned fixed_ctl;
58 	unsigned box_ctl;
59 	unsigned msr_offset;
60 	unsigned num_shared_regs:8;
61 	unsigned single_fixed:1;
62 	unsigned pair_ctr_ctl:1;
63 	unsigned *msr_offsets;
64 	struct event_constraint unconstrainted;
65 	struct event_constraint *constraints;
66 	struct intel_uncore_pmu *pmus;
67 	struct intel_uncore_ops *ops;
68 	struct uncore_event_desc *event_descs;
69 	struct freerunning_counters *freerunning;
70 	const struct attribute_group *attr_groups[4];
71 	struct pmu *pmu; /* for custom pmu ops */
72 };
73 
74 #define pmu_group attr_groups[0]
75 #define format_group attr_groups[1]
76 #define events_group attr_groups[2]
77 
78 struct intel_uncore_ops {
79 	void (*init_box)(struct intel_uncore_box *);
80 	void (*exit_box)(struct intel_uncore_box *);
81 	void (*disable_box)(struct intel_uncore_box *);
82 	void (*enable_box)(struct intel_uncore_box *);
83 	void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
84 	void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
85 	u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
86 	int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
87 	struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
88 						   struct perf_event *);
89 	void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
90 };
91 
92 struct intel_uncore_pmu {
93 	struct pmu			pmu;
94 	char				name[UNCORE_PMU_NAME_LEN];
95 	int				pmu_idx;
96 	int				func_id;
97 	bool				registered;
98 	atomic_t			activeboxes;
99 	struct intel_uncore_type	*type;
100 	struct intel_uncore_box		**boxes;
101 };
102 
103 struct intel_uncore_extra_reg {
104 	raw_spinlock_t lock;
105 	u64 config, config1, config2;
106 	atomic_t ref;
107 };
108 
109 struct intel_uncore_box {
110 	int pci_phys_id;
111 	int pkgid;	/* Logical package ID */
112 	int n_active;	/* number of active events */
113 	int n_events;
114 	int cpu;	/* cpu to collect events */
115 	unsigned long flags;
116 	atomic_t refcnt;
117 	struct perf_event *events[UNCORE_PMC_IDX_MAX];
118 	struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
119 	struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
120 	unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
121 	u64 tags[UNCORE_PMC_IDX_MAX];
122 	struct pci_dev *pci_dev;
123 	struct intel_uncore_pmu *pmu;
124 	u64 hrtimer_duration; /* hrtimer timeout for this box */
125 	struct hrtimer hrtimer;
126 	struct list_head list;
127 	struct list_head active_list;
128 	void *io_addr;
129 	struct intel_uncore_extra_reg shared_regs[0];
130 };
131 
132 #define UNCORE_BOX_FLAG_INITIATED	0
133 #define UNCORE_BOX_FLAG_CTL_OFFS8	1 /* event config registers are 8-byte apart */
134 
135 struct uncore_event_desc {
136 	struct kobj_attribute attr;
137 	const char *config;
138 };
139 
140 struct freerunning_counters {
141 	unsigned int counter_base;
142 	unsigned int counter_offset;
143 	unsigned int box_offset;
144 	unsigned int num_counters;
145 	unsigned int bits;
146 };
147 
148 struct pci2phy_map {
149 	struct list_head list;
150 	int segment;
151 	int pbus_to_physid[256];
152 };
153 
154 struct pci2phy_map *__find_pci2phy_map(int segment);
155 
156 ssize_t uncore_event_show(struct kobject *kobj,
157 			  struct kobj_attribute *attr, char *buf);
158 
159 #define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
160 {								\
161 	.attr	= __ATTR(_name, 0444, uncore_event_show, NULL),	\
162 	.config	= _config,					\
163 }
164 
165 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
166 static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
167 				struct kobj_attribute *attr,		\
168 				char *page)				\
169 {									\
170 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
171 	return sprintf(page, _format "\n");				\
172 }									\
173 static struct kobj_attribute format_attr_##_var =			\
174 	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
175 
uncore_pmc_fixed(int idx)176 static inline bool uncore_pmc_fixed(int idx)
177 {
178 	return idx == UNCORE_PMC_IDX_FIXED;
179 }
180 
uncore_pmc_freerunning(int idx)181 static inline bool uncore_pmc_freerunning(int idx)
182 {
183 	return idx == UNCORE_PMC_IDX_FREERUNNING;
184 }
185 
uncore_pci_box_ctl(struct intel_uncore_box * box)186 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
187 {
188 	return box->pmu->type->box_ctl;
189 }
190 
uncore_pci_fixed_ctl(struct intel_uncore_box * box)191 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
192 {
193 	return box->pmu->type->fixed_ctl;
194 }
195 
uncore_pci_fixed_ctr(struct intel_uncore_box * box)196 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
197 {
198 	return box->pmu->type->fixed_ctr;
199 }
200 
201 static inline
uncore_pci_event_ctl(struct intel_uncore_box * box,int idx)202 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
203 {
204 	if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
205 		return idx * 8 + box->pmu->type->event_ctl;
206 
207 	return idx * 4 + box->pmu->type->event_ctl;
208 }
209 
210 static inline
uncore_pci_perf_ctr(struct intel_uncore_box * box,int idx)211 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
212 {
213 	return idx * 8 + box->pmu->type->perf_ctr;
214 }
215 
uncore_msr_box_offset(struct intel_uncore_box * box)216 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
217 {
218 	struct intel_uncore_pmu *pmu = box->pmu;
219 	return pmu->type->msr_offsets ?
220 		pmu->type->msr_offsets[pmu->pmu_idx] :
221 		pmu->type->msr_offset * pmu->pmu_idx;
222 }
223 
uncore_msr_box_ctl(struct intel_uncore_box * box)224 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
225 {
226 	if (!box->pmu->type->box_ctl)
227 		return 0;
228 	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
229 }
230 
uncore_msr_fixed_ctl(struct intel_uncore_box * box)231 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
232 {
233 	if (!box->pmu->type->fixed_ctl)
234 		return 0;
235 	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
236 }
237 
uncore_msr_fixed_ctr(struct intel_uncore_box * box)238 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
239 {
240 	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
241 }
242 
243 
244 /*
245  * In the uncore document, there is no event-code assigned to free running
246  * counters. Some events need to be defined to indicate the free running
247  * counters. The events are encoded as event-code + umask-code.
248  *
249  * The event-code for all free running counters is 0xff, which is the same as
250  * the fixed counters.
251  *
252  * The umask-code is used to distinguish a fixed counter and a free running
253  * counter, and different types of free running counters.
254  * - For fixed counters, the umask-code is 0x0X.
255  *   X indicates the index of the fixed counter, which starts from 0.
256  * - For free running counters, the umask-code uses the rest of the space.
257  *   It would bare the format of 0xXY.
258  *   X stands for the type of free running counters, which starts from 1.
259  *   Y stands for the index of free running counters of same type, which
260  *   starts from 0.
261  *
262  * For example, there are three types of IIO free running counters on Skylake
263  * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
264  * The event-code for all the free running counters is 0xff.
265  * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
266  * which umask-code starts from 0x10.
267  * So 'ioclk' is encoded as event=0xff,umask=0x10
268  * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
269  * the second type, which umask-code starts from 0x20.
270  * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
271  */
uncore_freerunning_idx(u64 config)272 static inline unsigned int uncore_freerunning_idx(u64 config)
273 {
274 	return ((config >> 8) & 0xf);
275 }
276 
277 #define UNCORE_FREERUNNING_UMASK_START		0x10
278 
uncore_freerunning_type(u64 config)279 static inline unsigned int uncore_freerunning_type(u64 config)
280 {
281 	return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
282 }
283 
284 static inline
uncore_freerunning_counter(struct intel_uncore_box * box,struct perf_event * event)285 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
286 					struct perf_event *event)
287 {
288 	unsigned int type = uncore_freerunning_type(event->attr.config);
289 	unsigned int idx = uncore_freerunning_idx(event->attr.config);
290 	struct intel_uncore_pmu *pmu = box->pmu;
291 
292 	return pmu->type->freerunning[type].counter_base +
293 	       pmu->type->freerunning[type].counter_offset * idx +
294 	       pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
295 }
296 
297 static inline
uncore_msr_event_ctl(struct intel_uncore_box * box,int idx)298 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
299 {
300 	return box->pmu->type->event_ctl +
301 		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
302 		uncore_msr_box_offset(box);
303 }
304 
305 static inline
uncore_msr_perf_ctr(struct intel_uncore_box * box,int idx)306 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
307 {
308 	return box->pmu->type->perf_ctr +
309 		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
310 		uncore_msr_box_offset(box);
311 }
312 
313 static inline
uncore_fixed_ctl(struct intel_uncore_box * box)314 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
315 {
316 	if (box->pci_dev)
317 		return uncore_pci_fixed_ctl(box);
318 	else
319 		return uncore_msr_fixed_ctl(box);
320 }
321 
322 static inline
uncore_fixed_ctr(struct intel_uncore_box * box)323 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
324 {
325 	if (box->pci_dev)
326 		return uncore_pci_fixed_ctr(box);
327 	else
328 		return uncore_msr_fixed_ctr(box);
329 }
330 
331 static inline
uncore_event_ctl(struct intel_uncore_box * box,int idx)332 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
333 {
334 	if (box->pci_dev)
335 		return uncore_pci_event_ctl(box, idx);
336 	else
337 		return uncore_msr_event_ctl(box, idx);
338 }
339 
340 static inline
uncore_perf_ctr(struct intel_uncore_box * box,int idx)341 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
342 {
343 	if (box->pci_dev)
344 		return uncore_pci_perf_ctr(box, idx);
345 	else
346 		return uncore_msr_perf_ctr(box, idx);
347 }
348 
uncore_perf_ctr_bits(struct intel_uncore_box * box)349 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
350 {
351 	return box->pmu->type->perf_ctr_bits;
352 }
353 
uncore_fixed_ctr_bits(struct intel_uncore_box * box)354 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
355 {
356 	return box->pmu->type->fixed_ctr_bits;
357 }
358 
359 static inline
uncore_freerunning_bits(struct intel_uncore_box * box,struct perf_event * event)360 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
361 				     struct perf_event *event)
362 {
363 	unsigned int type = uncore_freerunning_type(event->attr.config);
364 
365 	return box->pmu->type->freerunning[type].bits;
366 }
367 
uncore_num_freerunning(struct intel_uncore_box * box,struct perf_event * event)368 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
369 					 struct perf_event *event)
370 {
371 	unsigned int type = uncore_freerunning_type(event->attr.config);
372 
373 	return box->pmu->type->freerunning[type].num_counters;
374 }
375 
uncore_num_freerunning_types(struct intel_uncore_box * box,struct perf_event * event)376 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
377 					       struct perf_event *event)
378 {
379 	return box->pmu->type->num_freerunning_types;
380 }
381 
check_valid_freerunning_event(struct intel_uncore_box * box,struct perf_event * event)382 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
383 						 struct perf_event *event)
384 {
385 	unsigned int type = uncore_freerunning_type(event->attr.config);
386 	unsigned int idx = uncore_freerunning_idx(event->attr.config);
387 
388 	return (type < uncore_num_freerunning_types(box, event)) &&
389 	       (idx < uncore_num_freerunning(box, event));
390 }
391 
uncore_num_counters(struct intel_uncore_box * box)392 static inline int uncore_num_counters(struct intel_uncore_box *box)
393 {
394 	return box->pmu->type->num_counters;
395 }
396 
is_freerunning_event(struct perf_event * event)397 static inline bool is_freerunning_event(struct perf_event *event)
398 {
399 	u64 cfg = event->attr.config;
400 
401 	return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
402 	       (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
403 }
404 
uncore_disable_box(struct intel_uncore_box * box)405 static inline void uncore_disable_box(struct intel_uncore_box *box)
406 {
407 	if (box->pmu->type->ops->disable_box)
408 		box->pmu->type->ops->disable_box(box);
409 }
410 
uncore_enable_box(struct intel_uncore_box * box)411 static inline void uncore_enable_box(struct intel_uncore_box *box)
412 {
413 	if (box->pmu->type->ops->enable_box)
414 		box->pmu->type->ops->enable_box(box);
415 }
416 
uncore_disable_event(struct intel_uncore_box * box,struct perf_event * event)417 static inline void uncore_disable_event(struct intel_uncore_box *box,
418 				struct perf_event *event)
419 {
420 	box->pmu->type->ops->disable_event(box, event);
421 }
422 
uncore_enable_event(struct intel_uncore_box * box,struct perf_event * event)423 static inline void uncore_enable_event(struct intel_uncore_box *box,
424 				struct perf_event *event)
425 {
426 	box->pmu->type->ops->enable_event(box, event);
427 }
428 
uncore_read_counter(struct intel_uncore_box * box,struct perf_event * event)429 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
430 				struct perf_event *event)
431 {
432 	return box->pmu->type->ops->read_counter(box, event);
433 }
434 
uncore_box_init(struct intel_uncore_box * box)435 static inline void uncore_box_init(struct intel_uncore_box *box)
436 {
437 	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
438 		if (box->pmu->type->ops->init_box)
439 			box->pmu->type->ops->init_box(box);
440 	}
441 }
442 
uncore_box_exit(struct intel_uncore_box * box)443 static inline void uncore_box_exit(struct intel_uncore_box *box)
444 {
445 	if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
446 		if (box->pmu->type->ops->exit_box)
447 			box->pmu->type->ops->exit_box(box);
448 	}
449 }
450 
uncore_box_is_fake(struct intel_uncore_box * box)451 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
452 {
453 	return (box->pkgid < 0);
454 }
455 
uncore_event_to_pmu(struct perf_event * event)456 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
457 {
458 	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
459 }
460 
uncore_event_to_box(struct perf_event * event)461 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
462 {
463 	return event->pmu_private;
464 }
465 
466 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
467 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
468 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
469 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
470 void uncore_pmu_event_start(struct perf_event *event, int flags);
471 void uncore_pmu_event_stop(struct perf_event *event, int flags);
472 int uncore_pmu_event_add(struct perf_event *event, int flags);
473 void uncore_pmu_event_del(struct perf_event *event, int flags);
474 void uncore_pmu_event_read(struct perf_event *event);
475 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
476 struct event_constraint *
477 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
478 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
479 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
480 
481 extern struct intel_uncore_type **uncore_msr_uncores;
482 extern struct intel_uncore_type **uncore_pci_uncores;
483 extern struct pci_driver *uncore_pci_driver;
484 extern raw_spinlock_t pci2phy_map_lock;
485 extern struct list_head pci2phy_map_head;
486 extern struct pci_extra_dev *uncore_extra_pci_dev;
487 extern struct event_constraint uncore_constraint_empty;
488 
489 /* uncore_snb.c */
490 int snb_uncore_pci_init(void);
491 int ivb_uncore_pci_init(void);
492 int hsw_uncore_pci_init(void);
493 int bdw_uncore_pci_init(void);
494 int skl_uncore_pci_init(void);
495 void snb_uncore_cpu_init(void);
496 void nhm_uncore_cpu_init(void);
497 void skl_uncore_cpu_init(void);
498 int snb_pci2phy_map_init(int devid);
499 
500 /* uncore_snbep.c */
501 int snbep_uncore_pci_init(void);
502 void snbep_uncore_cpu_init(void);
503 int ivbep_uncore_pci_init(void);
504 void ivbep_uncore_cpu_init(void);
505 int hswep_uncore_pci_init(void);
506 void hswep_uncore_cpu_init(void);
507 int bdx_uncore_pci_init(void);
508 void bdx_uncore_cpu_init(void);
509 int knl_uncore_pci_init(void);
510 void knl_uncore_cpu_init(void);
511 int skx_uncore_pci_init(void);
512 void skx_uncore_cpu_init(void);
513 
514 /* uncore_nhmex.c */
515 void nhmex_uncore_cpu_init(void);
516