1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include "uncore.h"
9 #include "uncore_discovery.h"
10 
11 static struct rb_root discovery_tables = RB_ROOT;
12 static int num_discovered_types[UNCORE_ACCESS_MAX];
13 
has_generic_discovery_table(void)14 static bool has_generic_discovery_table(void)
15 {
16 	struct pci_dev *dev;
17 	int dvsec;
18 
19 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
20 	if (!dev)
21 		return false;
22 
23 	/* A discovery table device has the unique capability ID. */
24 	dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
25 	pci_dev_put(dev);
26 	if (dvsec)
27 		return true;
28 
29 	return false;
30 }
31 
32 static int logical_die_id;
33 
get_device_die_id(struct pci_dev * dev)34 static int get_device_die_id(struct pci_dev *dev)
35 {
36 	int cpu, node = pcibus_to_node(dev->bus);
37 
38 	/*
39 	 * If the NUMA info is not available, assume that the logical die id is
40 	 * continuous in the order in which the discovery table devices are
41 	 * detected.
42 	 */
43 	if (node < 0)
44 		return logical_die_id++;
45 
46 	for_each_cpu(cpu, cpumask_of_node(node)) {
47 		struct cpuinfo_x86 *c = &cpu_data(cpu);
48 
49 		if (c->initialized && cpu_to_node(cpu) == node)
50 			return c->logical_die_id;
51 	}
52 
53 	/*
54 	 * All CPUs of a node may be offlined. For this case,
55 	 * the PCI and MMIO type of uncore blocks which are
56 	 * enumerated by the device will be unavailable.
57 	 */
58 	return -1;
59 }
60 
61 #define __node_2_type(cur)	\
62 	rb_entry((cur), struct intel_uncore_discovery_type, node)
63 
__type_cmp(const void * key,const struct rb_node * b)64 static inline int __type_cmp(const void *key, const struct rb_node *b)
65 {
66 	struct intel_uncore_discovery_type *type_b = __node_2_type(b);
67 	const u16 *type_id = key;
68 
69 	if (type_b->type > *type_id)
70 		return -1;
71 	else if (type_b->type < *type_id)
72 		return 1;
73 
74 	return 0;
75 }
76 
77 static inline struct intel_uncore_discovery_type *
search_uncore_discovery_type(u16 type_id)78 search_uncore_discovery_type(u16 type_id)
79 {
80 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
81 
82 	return (node) ? __node_2_type(node) : NULL;
83 }
84 
__type_less(struct rb_node * a,const struct rb_node * b)85 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
86 {
87 	return (__node_2_type(a)->type < __node_2_type(b)->type);
88 }
89 
90 static struct intel_uncore_discovery_type *
add_uncore_discovery_type(struct uncore_unit_discovery * unit)91 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
92 {
93 	struct intel_uncore_discovery_type *type;
94 
95 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
96 		pr_warn("Unsupported access type %d\n", unit->access_type);
97 		return NULL;
98 	}
99 
100 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
101 	if (!type)
102 		return NULL;
103 
104 	type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
105 	if (!type->box_ctrl_die)
106 		goto free_type;
107 
108 	type->access_type = unit->access_type;
109 	num_discovered_types[type->access_type]++;
110 	type->type = unit->box_type;
111 
112 	rb_add(&type->node, &discovery_tables, __type_less);
113 
114 	return type;
115 
116 free_type:
117 	kfree(type);
118 
119 	return NULL;
120 
121 }
122 
123 static struct intel_uncore_discovery_type *
get_uncore_discovery_type(struct uncore_unit_discovery * unit)124 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
125 {
126 	struct intel_uncore_discovery_type *type;
127 
128 	type = search_uncore_discovery_type(unit->box_type);
129 	if (type)
130 		return type;
131 
132 	return add_uncore_discovery_type(unit);
133 }
134 
135 static void
uncore_insert_box_info(struct uncore_unit_discovery * unit,int die,bool parsed)136 uncore_insert_box_info(struct uncore_unit_discovery *unit,
137 		       int die, bool parsed)
138 {
139 	struct intel_uncore_discovery_type *type;
140 	unsigned int *box_offset, *ids;
141 	int i;
142 
143 	if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
144 		return;
145 
146 	if (parsed) {
147 		type = search_uncore_discovery_type(unit->box_type);
148 		if (WARN_ON_ONCE(!type))
149 			return;
150 		/* Store the first box of each die */
151 		if (!type->box_ctrl_die[die])
152 			type->box_ctrl_die[die] = unit->ctl;
153 		return;
154 	}
155 
156 	type = get_uncore_discovery_type(unit);
157 	if (!type)
158 		return;
159 
160 	box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
161 	if (!box_offset)
162 		return;
163 
164 	ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
165 	if (!ids)
166 		goto free_box_offset;
167 
168 	/* Store generic information for the first box */
169 	if (!type->num_boxes) {
170 		type->box_ctrl = unit->ctl;
171 		type->box_ctrl_die[die] = unit->ctl;
172 		type->num_counters = unit->num_regs;
173 		type->counter_width = unit->bit_width;
174 		type->ctl_offset = unit->ctl_offset;
175 		type->ctr_offset = unit->ctr_offset;
176 		*ids = unit->box_id;
177 		goto end;
178 	}
179 
180 	for (i = 0; i < type->num_boxes; i++) {
181 		ids[i] = type->ids[i];
182 		box_offset[i] = type->box_offset[i];
183 
184 		if (WARN_ON_ONCE(unit->box_id == ids[i]))
185 			goto free_ids;
186 	}
187 	ids[i] = unit->box_id;
188 	box_offset[i] = unit->ctl - type->box_ctrl;
189 	kfree(type->ids);
190 	kfree(type->box_offset);
191 end:
192 	type->ids = ids;
193 	type->box_offset = box_offset;
194 	type->num_boxes++;
195 	return;
196 
197 free_ids:
198 	kfree(ids);
199 
200 free_box_offset:
201 	kfree(box_offset);
202 
203 }
204 
parse_discovery_table(struct pci_dev * dev,int die,u32 bar_offset,bool * parsed)205 static int parse_discovery_table(struct pci_dev *dev, int die,
206 				 u32 bar_offset, bool *parsed)
207 {
208 	struct uncore_global_discovery global;
209 	struct uncore_unit_discovery unit;
210 	void __iomem *io_addr;
211 	resource_size_t addr;
212 	unsigned long size;
213 	u32 val;
214 	int i;
215 
216 	pci_read_config_dword(dev, bar_offset, &val);
217 
218 	if (val & UNCORE_DISCOVERY_MASK)
219 		return -EINVAL;
220 
221 	addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK);
222 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
223 	io_addr = ioremap(addr, size);
224 	if (!io_addr)
225 		return -ENOMEM;
226 
227 	/* Read Global Discovery State */
228 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
229 	if (uncore_discovery_invalid_unit(global)) {
230 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
231 			global.table1, global.ctl, global.table3);
232 		iounmap(io_addr);
233 		return -EINVAL;
234 	}
235 	iounmap(io_addr);
236 
237 	size = (1 + global.max_units) * global.stride * 8;
238 	io_addr = ioremap(addr, size);
239 	if (!io_addr)
240 		return -ENOMEM;
241 
242 	/* Parsing Unit Discovery State */
243 	for (i = 0; i < global.max_units; i++) {
244 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
245 			      sizeof(struct uncore_unit_discovery));
246 
247 		if (uncore_discovery_invalid_unit(unit))
248 			continue;
249 
250 		if (unit.access_type >= UNCORE_ACCESS_MAX)
251 			continue;
252 
253 		uncore_insert_box_info(&unit, die, *parsed);
254 	}
255 
256 	*parsed = true;
257 	iounmap(io_addr);
258 	return 0;
259 }
260 
intel_uncore_has_discovery_tables(void)261 bool intel_uncore_has_discovery_tables(void)
262 {
263 	u32 device, val, entry_id, bar_offset;
264 	int die, dvsec = 0, ret = true;
265 	struct pci_dev *dev = NULL;
266 	bool parsed = false;
267 
268 	if (has_generic_discovery_table())
269 		device = UNCORE_DISCOVERY_TABLE_DEVICE;
270 	else
271 		device = PCI_ANY_ID;
272 
273 	/*
274 	 * Start a new search and iterates through the list of
275 	 * the discovery table devices.
276 	 */
277 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
278 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
279 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
280 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
281 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
282 				continue;
283 
284 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
285 
286 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
287 				ret = false;
288 				goto err;
289 			}
290 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
291 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
292 
293 			die = get_device_die_id(dev);
294 			if (die < 0)
295 				continue;
296 
297 			parse_discovery_table(dev, die, bar_offset, &parsed);
298 		}
299 	}
300 
301 	/* None of the discovery tables are available */
302 	if (!parsed)
303 		ret = false;
304 err:
305 	pci_dev_put(dev);
306 
307 	return ret;
308 }
309 
intel_uncore_clear_discovery_tables(void)310 void intel_uncore_clear_discovery_tables(void)
311 {
312 	struct intel_uncore_discovery_type *type, *next;
313 
314 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
315 		kfree(type->box_ctrl_die);
316 		kfree(type);
317 	}
318 }
319 
320 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
321 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
322 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
323 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
324 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
325 
326 static struct attribute *generic_uncore_formats_attr[] = {
327 	&format_attr_event.attr,
328 	&format_attr_umask.attr,
329 	&format_attr_edge.attr,
330 	&format_attr_inv.attr,
331 	&format_attr_thresh.attr,
332 	NULL,
333 };
334 
335 static const struct attribute_group generic_uncore_format_group = {
336 	.name = "format",
337 	.attrs = generic_uncore_formats_attr,
338 };
339 
intel_generic_uncore_msr_init_box(struct intel_uncore_box * box)340 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
341 {
342 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
343 }
344 
intel_generic_uncore_msr_disable_box(struct intel_uncore_box * box)345 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
346 {
347 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
348 }
349 
intel_generic_uncore_msr_enable_box(struct intel_uncore_box * box)350 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
351 {
352 	wrmsrl(uncore_msr_box_ctl(box), 0);
353 }
354 
intel_generic_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)355 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
356 					    struct perf_event *event)
357 {
358 	struct hw_perf_event *hwc = &event->hw;
359 
360 	wrmsrl(hwc->config_base, hwc->config);
361 }
362 
intel_generic_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)363 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
364 					     struct perf_event *event)
365 {
366 	struct hw_perf_event *hwc = &event->hw;
367 
368 	wrmsrl(hwc->config_base, 0);
369 }
370 
371 static struct intel_uncore_ops generic_uncore_msr_ops = {
372 	.init_box		= intel_generic_uncore_msr_init_box,
373 	.disable_box		= intel_generic_uncore_msr_disable_box,
374 	.enable_box		= intel_generic_uncore_msr_enable_box,
375 	.disable_event		= intel_generic_uncore_msr_disable_event,
376 	.enable_event		= intel_generic_uncore_msr_enable_event,
377 	.read_counter		= uncore_msr_read_counter,
378 };
379 
intel_generic_uncore_pci_init_box(struct intel_uncore_box * box)380 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
381 {
382 	struct pci_dev *pdev = box->pci_dev;
383 	int box_ctl = uncore_pci_box_ctl(box);
384 
385 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
386 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
387 }
388 
intel_generic_uncore_pci_disable_box(struct intel_uncore_box * box)389 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
390 {
391 	struct pci_dev *pdev = box->pci_dev;
392 	int box_ctl = uncore_pci_box_ctl(box);
393 
394 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
395 }
396 
intel_generic_uncore_pci_enable_box(struct intel_uncore_box * box)397 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
398 {
399 	struct pci_dev *pdev = box->pci_dev;
400 	int box_ctl = uncore_pci_box_ctl(box);
401 
402 	pci_write_config_dword(pdev, box_ctl, 0);
403 }
404 
intel_generic_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)405 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
406 					    struct perf_event *event)
407 {
408 	struct pci_dev *pdev = box->pci_dev;
409 	struct hw_perf_event *hwc = &event->hw;
410 
411 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
412 }
413 
intel_generic_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)414 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
415 					    struct perf_event *event)
416 {
417 	struct pci_dev *pdev = box->pci_dev;
418 	struct hw_perf_event *hwc = &event->hw;
419 
420 	pci_write_config_dword(pdev, hwc->config_base, 0);
421 }
422 
intel_generic_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)423 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
424 					  struct perf_event *event)
425 {
426 	struct pci_dev *pdev = box->pci_dev;
427 	struct hw_perf_event *hwc = &event->hw;
428 	u64 count = 0;
429 
430 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
431 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
432 
433 	return count;
434 }
435 
436 static struct intel_uncore_ops generic_uncore_pci_ops = {
437 	.init_box	= intel_generic_uncore_pci_init_box,
438 	.disable_box	= intel_generic_uncore_pci_disable_box,
439 	.enable_box	= intel_generic_uncore_pci_enable_box,
440 	.disable_event	= intel_generic_uncore_pci_disable_event,
441 	.enable_event	= intel_generic_uncore_pci_enable_event,
442 	.read_counter	= intel_generic_uncore_pci_read_counter,
443 };
444 
445 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
446 
generic_uncore_mmio_box_ctl(struct intel_uncore_box * box)447 static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
448 {
449 	struct intel_uncore_type *type = box->pmu->type;
450 
451 	if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
452 		return 0;
453 
454 	return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
455 }
456 
intel_generic_uncore_mmio_init_box(struct intel_uncore_box * box)457 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
458 {
459 	unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
460 	struct intel_uncore_type *type = box->pmu->type;
461 	resource_size_t addr;
462 
463 	if (!box_ctl) {
464 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
465 			type->type_id, type->box_ids[box->pmu->pmu_idx]);
466 		return;
467 	}
468 
469 	addr = box_ctl;
470 	box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
471 	if (!box->io_addr) {
472 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
473 			type->type_id, type->box_ids[box->pmu->pmu_idx],
474 			(unsigned long long)addr);
475 		return;
476 	}
477 
478 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
479 }
480 
intel_generic_uncore_mmio_disable_box(struct intel_uncore_box * box)481 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
482 {
483 	if (!box->io_addr)
484 		return;
485 
486 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
487 }
488 
intel_generic_uncore_mmio_enable_box(struct intel_uncore_box * box)489 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
490 {
491 	if (!box->io_addr)
492 		return;
493 
494 	writel(0, box->io_addr);
495 }
496 
intel_generic_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)497 static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
498 					     struct perf_event *event)
499 {
500 	struct hw_perf_event *hwc = &event->hw;
501 
502 	if (!box->io_addr)
503 		return;
504 
505 	writel(hwc->config, box->io_addr + hwc->config_base);
506 }
507 
intel_generic_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)508 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
509 					     struct perf_event *event)
510 {
511 	struct hw_perf_event *hwc = &event->hw;
512 
513 	if (!box->io_addr)
514 		return;
515 
516 	writel(0, box->io_addr + hwc->config_base);
517 }
518 
519 static struct intel_uncore_ops generic_uncore_mmio_ops = {
520 	.init_box	= intel_generic_uncore_mmio_init_box,
521 	.exit_box	= uncore_mmio_exit_box,
522 	.disable_box	= intel_generic_uncore_mmio_disable_box,
523 	.enable_box	= intel_generic_uncore_mmio_enable_box,
524 	.disable_event	= intel_generic_uncore_mmio_disable_event,
525 	.enable_event	= intel_generic_uncore_mmio_enable_event,
526 	.read_counter	= uncore_mmio_read_counter,
527 };
528 
uncore_update_uncore_type(enum uncore_access_type type_id,struct intel_uncore_type * uncore,struct intel_uncore_discovery_type * type)529 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
530 				      struct intel_uncore_type *uncore,
531 				      struct intel_uncore_discovery_type *type)
532 {
533 	uncore->type_id = type->type;
534 	uncore->num_boxes = type->num_boxes;
535 	uncore->num_counters = type->num_counters;
536 	uncore->perf_ctr_bits = type->counter_width;
537 	uncore->box_ids = type->ids;
538 
539 	switch (type_id) {
540 	case UNCORE_ACCESS_MSR:
541 		uncore->ops = &generic_uncore_msr_ops;
542 		uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
543 		uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
544 		uncore->box_ctl = (unsigned int)type->box_ctrl;
545 		uncore->msr_offsets = type->box_offset;
546 		break;
547 	case UNCORE_ACCESS_PCI:
548 		uncore->ops = &generic_uncore_pci_ops;
549 		uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
550 		uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
551 		uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
552 		uncore->box_ctls = type->box_ctrl_die;
553 		uncore->pci_offsets = type->box_offset;
554 		break;
555 	case UNCORE_ACCESS_MMIO:
556 		uncore->ops = &generic_uncore_mmio_ops;
557 		uncore->perf_ctr = (unsigned int)type->ctr_offset;
558 		uncore->event_ctl = (unsigned int)type->ctl_offset;
559 		uncore->box_ctl = (unsigned int)type->box_ctrl;
560 		uncore->box_ctls = type->box_ctrl_die;
561 		uncore->mmio_offsets = type->box_offset;
562 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
563 		break;
564 	default:
565 		return false;
566 	}
567 
568 	return true;
569 }
570 
571 struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id,int num_extra)572 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
573 {
574 	struct intel_uncore_discovery_type *type;
575 	struct intel_uncore_type **uncores;
576 	struct intel_uncore_type *uncore;
577 	struct rb_node *node;
578 	int i = 0;
579 
580 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
581 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
582 	if (!uncores)
583 		return empty_uncore;
584 
585 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
586 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
587 		if (type->access_type != type_id)
588 			continue;
589 
590 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
591 		if (!uncore)
592 			break;
593 
594 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
595 		uncore->format_group = &generic_uncore_format_group;
596 
597 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
598 			kfree(uncore);
599 			continue;
600 		}
601 		uncores[i++] = uncore;
602 	}
603 
604 	return uncores;
605 }
606 
intel_uncore_generic_uncore_cpu_init(void)607 void intel_uncore_generic_uncore_cpu_init(void)
608 {
609 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
610 }
611 
intel_uncore_generic_uncore_pci_init(void)612 int intel_uncore_generic_uncore_pci_init(void)
613 {
614 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
615 
616 	return 0;
617 }
618 
intel_uncore_generic_uncore_mmio_init(void)619 void intel_uncore_generic_uncore_mmio_init(void)
620 {
621 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
622 }
623