1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implement the AER root port service driver. The driver registers an IRQ
4  * handler. When a root port triggers an AER interrupt, the IRQ handler
5  * collects root port status and schedules work.
6  *
7  * Copyright (C) 2006 Intel Corp.
8  *	Tom Long Nguyen (tom.l.nguyen@intel.com)
9  *	Zhang Yanmin (yanmin.zhang@intel.com)
10  *
11  * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
12  *    Andrew Patterson <andrew.patterson@hp.com>
13  */
14 
15 #include <linux/cper.h>
16 #include <linux/pci.h>
17 #include <linux/pci-acpi.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/pm.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/kfifo.h>
26 #include <linux/slab.h>
27 #include <acpi/apei.h>
28 #include <ras/ras_event.h>
29 
30 #include "../pci.h"
31 #include "portdrv.h"
32 
33 #define AER_ERROR_SOURCES_MAX		100
34 
35 #define AER_MAX_TYPEOF_COR_ERRS		16	/* as per PCI_ERR_COR_STATUS */
36 #define AER_MAX_TYPEOF_UNCOR_ERRS	26	/* as per PCI_ERR_UNCOR_STATUS*/
37 
38 struct aer_err_source {
39 	unsigned int status;
40 	unsigned int id;
41 };
42 
43 struct aer_rpc {
44 	struct pci_dev *rpd;		/* Root Port device */
45 	struct work_struct dpc_handler;
46 	struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
47 	struct aer_err_info e_info;
48 	unsigned short prod_idx;	/* Error Producer Index */
49 	unsigned short cons_idx;	/* Error Consumer Index */
50 	int isr;
51 	spinlock_t e_lock;		/*
52 					 * Lock access to Error Status/ID Regs
53 					 * and error producer/consumer index
54 					 */
55 	struct mutex rpc_mutex;		/*
56 					 * only one thread could do
57 					 * recovery on the same
58 					 * root port hierarchy
59 					 */
60 };
61 
62 /* AER stats for the device */
63 struct aer_stats {
64 
65 	/*
66 	 * Fields for all AER capable devices. They indicate the errors
67 	 * "as seen by this device". Note that this may mean that if an
68 	 * end point is causing problems, the AER counters may increment
69 	 * at its link partner (e.g. root port) because the errors will be
70 	 * "seen" by the link partner and not the the problematic end point
71 	 * itself (which may report all counters as 0 as it never saw any
72 	 * problems).
73 	 */
74 	/* Counters for different type of correctable errors */
75 	u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
76 	/* Counters for different type of fatal uncorrectable errors */
77 	u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
78 	/* Counters for different type of nonfatal uncorrectable errors */
79 	u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
80 	/* Total number of ERR_COR sent by this device */
81 	u64 dev_total_cor_errs;
82 	/* Total number of ERR_FATAL sent by this device */
83 	u64 dev_total_fatal_errs;
84 	/* Total number of ERR_NONFATAL sent by this device */
85 	u64 dev_total_nonfatal_errs;
86 
87 	/*
88 	 * Fields for Root ports & root complex event collectors only, these
89 	 * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
90 	 * messages received by the root port / event collector, INCLUDING the
91 	 * ones that are generated internally (by the rootport itself)
92 	 */
93 	u64 rootport_total_cor_errs;
94 	u64 rootport_total_fatal_errs;
95 	u64 rootport_total_nonfatal_errs;
96 };
97 
98 #define AER_LOG_TLP_MASKS		(PCI_ERR_UNC_POISON_TLP|	\
99 					PCI_ERR_UNC_ECRC|		\
100 					PCI_ERR_UNC_UNSUP|		\
101 					PCI_ERR_UNC_COMP_ABORT|		\
102 					PCI_ERR_UNC_UNX_COMP|		\
103 					PCI_ERR_UNC_MALF_TLP)
104 
105 #define SYSTEM_ERROR_INTR_ON_MESG_MASK	(PCI_EXP_RTCTL_SECEE|	\
106 					PCI_EXP_RTCTL_SENFEE|	\
107 					PCI_EXP_RTCTL_SEFEE)
108 #define ROOT_PORT_INTR_ON_MESG_MASK	(PCI_ERR_ROOT_CMD_COR_EN|	\
109 					PCI_ERR_ROOT_CMD_NONFATAL_EN|	\
110 					PCI_ERR_ROOT_CMD_FATAL_EN)
111 #define ERR_COR_ID(d)			(d & 0xffff)
112 #define ERR_UNCOR_ID(d)			(d >> 16)
113 
114 static int pcie_aer_disable;
115 
pci_no_aer(void)116 void pci_no_aer(void)
117 {
118 	pcie_aer_disable = 1;
119 }
120 
pci_aer_available(void)121 bool pci_aer_available(void)
122 {
123 	return !pcie_aer_disable && pci_msi_enabled();
124 }
125 
126 #ifdef CONFIG_PCIE_ECRC
127 
128 #define ECRC_POLICY_DEFAULT 0		/* ECRC set by BIOS */
129 #define ECRC_POLICY_OFF     1		/* ECRC off for performance */
130 #define ECRC_POLICY_ON      2		/* ECRC on for data integrity */
131 
132 static int ecrc_policy = ECRC_POLICY_DEFAULT;
133 
134 static const char *ecrc_policy_str[] = {
135 	[ECRC_POLICY_DEFAULT] = "bios",
136 	[ECRC_POLICY_OFF] = "off",
137 	[ECRC_POLICY_ON] = "on"
138 };
139 
140 /**
141  * enable_ercr_checking - enable PCIe ECRC checking for a device
142  * @dev: the PCI device
143  *
144  * Returns 0 on success, or negative on failure.
145  */
enable_ecrc_checking(struct pci_dev * dev)146 static int enable_ecrc_checking(struct pci_dev *dev)
147 {
148 	int pos;
149 	u32 reg32;
150 
151 	if (!pci_is_pcie(dev))
152 		return -ENODEV;
153 
154 	pos = dev->aer_cap;
155 	if (!pos)
156 		return -ENODEV;
157 
158 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
159 	if (reg32 & PCI_ERR_CAP_ECRC_GENC)
160 		reg32 |= PCI_ERR_CAP_ECRC_GENE;
161 	if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
162 		reg32 |= PCI_ERR_CAP_ECRC_CHKE;
163 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
164 
165 	return 0;
166 }
167 
168 /**
169  * disable_ercr_checking - disables PCIe ECRC checking for a device
170  * @dev: the PCI device
171  *
172  * Returns 0 on success, or negative on failure.
173  */
disable_ecrc_checking(struct pci_dev * dev)174 static int disable_ecrc_checking(struct pci_dev *dev)
175 {
176 	int pos;
177 	u32 reg32;
178 
179 	if (!pci_is_pcie(dev))
180 		return -ENODEV;
181 
182 	pos = dev->aer_cap;
183 	if (!pos)
184 		return -ENODEV;
185 
186 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
187 	reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
188 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
189 
190 	return 0;
191 }
192 
193 /**
194  * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
195  * @dev: the PCI device
196  */
pcie_set_ecrc_checking(struct pci_dev * dev)197 void pcie_set_ecrc_checking(struct pci_dev *dev)
198 {
199 	switch (ecrc_policy) {
200 	case ECRC_POLICY_DEFAULT:
201 		return;
202 	case ECRC_POLICY_OFF:
203 		disable_ecrc_checking(dev);
204 		break;
205 	case ECRC_POLICY_ON:
206 		enable_ecrc_checking(dev);
207 		break;
208 	default:
209 		return;
210 	}
211 }
212 
213 /**
214  * pcie_ecrc_get_policy - parse kernel command-line ecrc option
215  */
pcie_ecrc_get_policy(char * str)216 void pcie_ecrc_get_policy(char *str)
217 {
218 	int i;
219 
220 	for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
221 		if (!strncmp(str, ecrc_policy_str[i],
222 			     strlen(ecrc_policy_str[i])))
223 			break;
224 	if (i >= ARRAY_SIZE(ecrc_policy_str))
225 		return;
226 
227 	ecrc_policy = i;
228 }
229 #endif	/* CONFIG_PCIE_ECRC */
230 
231 #ifdef CONFIG_ACPI_APEI
hest_match_pci(struct acpi_hest_aer_common * p,struct pci_dev * pci)232 static inline int hest_match_pci(struct acpi_hest_aer_common *p,
233 				 struct pci_dev *pci)
234 {
235 	return   ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
236 		 ACPI_HEST_BUS(p->bus)     == pci->bus->number &&
237 		 p->device                 == PCI_SLOT(pci->devfn) &&
238 		 p->function               == PCI_FUNC(pci->devfn);
239 }
240 
hest_match_type(struct acpi_hest_header * hest_hdr,struct pci_dev * dev)241 static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
242 				struct pci_dev *dev)
243 {
244 	u16 hest_type = hest_hdr->type;
245 	u8 pcie_type = pci_pcie_type(dev);
246 
247 	if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
248 		pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
249 	    (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
250 		pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
251 	    (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
252 		(dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
253 		return true;
254 	return false;
255 }
256 
257 struct aer_hest_parse_info {
258 	struct pci_dev *pci_dev;
259 	int firmware_first;
260 };
261 
hest_source_is_pcie_aer(struct acpi_hest_header * hest_hdr)262 static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
263 {
264 	if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
265 	    hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
266 	    hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
267 		return 1;
268 	return 0;
269 }
270 
aer_hest_parse(struct acpi_hest_header * hest_hdr,void * data)271 static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
272 {
273 	struct aer_hest_parse_info *info = data;
274 	struct acpi_hest_aer_common *p;
275 	int ff;
276 
277 	if (!hest_source_is_pcie_aer(hest_hdr))
278 		return 0;
279 
280 	p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
281 	ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
282 
283 	/*
284 	 * If no specific device is supplied, determine whether
285 	 * FIRMWARE_FIRST is set for *any* PCIe device.
286 	 */
287 	if (!info->pci_dev) {
288 		info->firmware_first |= ff;
289 		return 0;
290 	}
291 
292 	/* Otherwise, check the specific device */
293 	if (p->flags & ACPI_HEST_GLOBAL) {
294 		if (hest_match_type(hest_hdr, info->pci_dev))
295 			info->firmware_first = ff;
296 	} else
297 		if (hest_match_pci(p, info->pci_dev))
298 			info->firmware_first = ff;
299 
300 	return 0;
301 }
302 
aer_set_firmware_first(struct pci_dev * pci_dev)303 static void aer_set_firmware_first(struct pci_dev *pci_dev)
304 {
305 	int rc;
306 	struct aer_hest_parse_info info = {
307 		.pci_dev	= pci_dev,
308 		.firmware_first	= 0,
309 	};
310 
311 	rc = apei_hest_parse(aer_hest_parse, &info);
312 
313 	if (rc)
314 		pci_dev->__aer_firmware_first = 0;
315 	else
316 		pci_dev->__aer_firmware_first = info.firmware_first;
317 	pci_dev->__aer_firmware_first_valid = 1;
318 }
319 
pcie_aer_get_firmware_first(struct pci_dev * dev)320 int pcie_aer_get_firmware_first(struct pci_dev *dev)
321 {
322 	if (!pci_is_pcie(dev))
323 		return 0;
324 
325 	if (pcie_ports_native)
326 		return 0;
327 
328 	if (!dev->__aer_firmware_first_valid)
329 		aer_set_firmware_first(dev);
330 	return dev->__aer_firmware_first;
331 }
332 
333 static bool aer_firmware_first;
334 
335 /**
336  * aer_acpi_firmware_first - Check if APEI should control AER.
337  */
aer_acpi_firmware_first(void)338 bool aer_acpi_firmware_first(void)
339 {
340 	static bool parsed = false;
341 	struct aer_hest_parse_info info = {
342 		.pci_dev	= NULL,	/* Check all PCIe devices */
343 		.firmware_first	= 0,
344 	};
345 
346 	if (pcie_ports_native)
347 		return false;
348 
349 	if (!parsed) {
350 		apei_hest_parse(aer_hest_parse, &info);
351 		aer_firmware_first = info.firmware_first;
352 		parsed = true;
353 	}
354 	return aer_firmware_first;
355 }
356 #endif
357 
358 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
359 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
360 
pci_enable_pcie_error_reporting(struct pci_dev * dev)361 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
362 {
363 	if (pcie_aer_get_firmware_first(dev))
364 		return -EIO;
365 
366 	if (!dev->aer_cap)
367 		return -EIO;
368 
369 	return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
370 }
371 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
372 
pci_disable_pcie_error_reporting(struct pci_dev * dev)373 int pci_disable_pcie_error_reporting(struct pci_dev *dev)
374 {
375 	if (pcie_aer_get_firmware_first(dev))
376 		return -EIO;
377 
378 	return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
379 					  PCI_EXP_AER_FLAGS);
380 }
381 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
382 
pci_aer_clear_device_status(struct pci_dev * dev)383 void pci_aer_clear_device_status(struct pci_dev *dev)
384 {
385 	u16 sta;
386 
387 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
388 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
389 }
390 
pci_cleanup_aer_uncorrect_error_status(struct pci_dev * dev)391 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
392 {
393 	int pos;
394 	u32 status, sev;
395 
396 	pos = dev->aer_cap;
397 	if (!pos)
398 		return -EIO;
399 
400 	if (pcie_aer_get_firmware_first(dev))
401 		return -EIO;
402 
403 	/* Clear status bits for ERR_NONFATAL errors only */
404 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
405 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
406 	status &= ~sev;
407 	if (status)
408 		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
409 
410 	return 0;
411 }
412 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
413 
pci_aer_clear_fatal_status(struct pci_dev * dev)414 void pci_aer_clear_fatal_status(struct pci_dev *dev)
415 {
416 	int pos;
417 	u32 status, sev;
418 
419 	pos = dev->aer_cap;
420 	if (!pos)
421 		return;
422 
423 	if (pcie_aer_get_firmware_first(dev))
424 		return;
425 
426 	/* Clear status bits for ERR_FATAL errors only */
427 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
428 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
429 	status &= sev;
430 	if (status)
431 		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
432 }
433 
pci_cleanup_aer_error_status_regs(struct pci_dev * dev)434 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
435 {
436 	int pos;
437 	u32 status;
438 	int port_type;
439 
440 	if (!pci_is_pcie(dev))
441 		return -ENODEV;
442 
443 	pos = dev->aer_cap;
444 	if (!pos)
445 		return -EIO;
446 
447 	if (pcie_aer_get_firmware_first(dev))
448 		return -EIO;
449 
450 	port_type = pci_pcie_type(dev);
451 	if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
452 		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
453 		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
454 	}
455 
456 	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
457 	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
458 
459 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
460 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
461 
462 	return 0;
463 }
464 
pci_aer_init(struct pci_dev * dev)465 void pci_aer_init(struct pci_dev *dev)
466 {
467 	dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
468 
469 	if (dev->aer_cap)
470 		dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
471 
472 	pci_cleanup_aer_error_status_regs(dev);
473 }
474 
pci_aer_exit(struct pci_dev * dev)475 void pci_aer_exit(struct pci_dev *dev)
476 {
477 	kfree(dev->aer_stats);
478 	dev->aer_stats = NULL;
479 }
480 
481 #define AER_AGENT_RECEIVER		0
482 #define AER_AGENT_REQUESTER		1
483 #define AER_AGENT_COMPLETER		2
484 #define AER_AGENT_TRANSMITTER		3
485 
486 #define AER_AGENT_REQUESTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
487 	0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
488 #define AER_AGENT_COMPLETER_MASK(t)	((t == AER_CORRECTABLE) ?	\
489 	0 : PCI_ERR_UNC_COMP_ABORT)
490 #define AER_AGENT_TRANSMITTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
491 	(PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
492 
493 #define AER_GET_AGENT(t, e)						\
494 	((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER :	\
495 	(e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER :	\
496 	(e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER :	\
497 	AER_AGENT_RECEIVER)
498 
499 #define AER_PHYSICAL_LAYER_ERROR	0
500 #define AER_DATA_LINK_LAYER_ERROR	1
501 #define AER_TRANSACTION_LAYER_ERROR	2
502 
503 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
504 	PCI_ERR_COR_RCVR : 0)
505 #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
506 	(PCI_ERR_COR_BAD_TLP|						\
507 	PCI_ERR_COR_BAD_DLLP|						\
508 	PCI_ERR_COR_REP_ROLL|						\
509 	PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
510 
511 #define AER_GET_LAYER_ERROR(t, e)					\
512 	((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
513 	(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
514 	AER_TRANSACTION_LAYER_ERROR)
515 
516 /*
517  * AER error strings
518  */
519 static const char *aer_error_severity_string[] = {
520 	"Uncorrected (Non-Fatal)",
521 	"Uncorrected (Fatal)",
522 	"Corrected"
523 };
524 
525 static const char *aer_error_layer[] = {
526 	"Physical Layer",
527 	"Data Link Layer",
528 	"Transaction Layer"
529 };
530 
531 static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
532 	"RxErr",			/* Bit Position 0	*/
533 	NULL,
534 	NULL,
535 	NULL,
536 	NULL,
537 	NULL,
538 	"BadTLP",			/* Bit Position 6	*/
539 	"BadDLLP",			/* Bit Position 7	*/
540 	"Rollover",			/* Bit Position 8	*/
541 	NULL,
542 	NULL,
543 	NULL,
544 	"Timeout",			/* Bit Position 12	*/
545 	"NonFatalErr",			/* Bit Position 13	*/
546 	"CorrIntErr",			/* Bit Position 14	*/
547 	"HeaderOF",			/* Bit Position 15	*/
548 };
549 
550 static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
551 	"Undefined",			/* Bit Position 0	*/
552 	NULL,
553 	NULL,
554 	NULL,
555 	"DLP",				/* Bit Position 4	*/
556 	"SDES",				/* Bit Position 5	*/
557 	NULL,
558 	NULL,
559 	NULL,
560 	NULL,
561 	NULL,
562 	NULL,
563 	"TLP",				/* Bit Position 12	*/
564 	"FCP",				/* Bit Position 13	*/
565 	"CmpltTO",			/* Bit Position 14	*/
566 	"CmpltAbrt",			/* Bit Position 15	*/
567 	"UnxCmplt",			/* Bit Position 16	*/
568 	"RxOF",				/* Bit Position 17	*/
569 	"MalfTLP",			/* Bit Position 18	*/
570 	"ECRC",				/* Bit Position 19	*/
571 	"UnsupReq",			/* Bit Position 20	*/
572 	"ACSViol",			/* Bit Position 21	*/
573 	"UncorrIntErr",			/* Bit Position 22	*/
574 	"BlockedTLP",			/* Bit Position 23	*/
575 	"AtomicOpBlocked",		/* Bit Position 24	*/
576 	"TLPBlockedErr",		/* Bit Position 25	*/
577 };
578 
579 static const char *aer_agent_string[] = {
580 	"Receiver ID",
581 	"Requester ID",
582 	"Completer ID",
583 	"Transmitter ID"
584 };
585 
586 #define aer_stats_dev_attr(name, stats_array, strings_array,		\
587 			   total_string, total_field)			\
588 	static ssize_t							\
589 	name##_show(struct device *dev, struct device_attribute *attr,	\
590 		     char *buf)						\
591 {									\
592 	unsigned int i;							\
593 	char *str = buf;						\
594 	struct pci_dev *pdev = to_pci_dev(dev);				\
595 	u64 *stats = pdev->aer_stats->stats_array;			\
596 									\
597 	for (i = 0; i < ARRAY_SIZE(strings_array); i++) {		\
598 		if (strings_array[i])					\
599 			str += sprintf(str, "%s %llu\n",		\
600 				       strings_array[i], stats[i]);	\
601 		else if (stats[i])					\
602 			str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
603 				       i, stats[i]);			\
604 	}								\
605 	str += sprintf(str, "TOTAL_%s %llu\n", total_string,		\
606 		       pdev->aer_stats->total_field);			\
607 	return str-buf;							\
608 }									\
609 static DEVICE_ATTR_RO(name)
610 
611 aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
612 		   aer_correctable_error_string, "ERR_COR",
613 		   dev_total_cor_errs);
614 aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
615 		   aer_uncorrectable_error_string, "ERR_FATAL",
616 		   dev_total_fatal_errs);
617 aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
618 		   aer_uncorrectable_error_string, "ERR_NONFATAL",
619 		   dev_total_nonfatal_errs);
620 
621 #define aer_stats_rootport_attr(name, field)				\
622 	static ssize_t							\
623 	name##_show(struct device *dev, struct device_attribute *attr,	\
624 		     char *buf)						\
625 {									\
626 	struct pci_dev *pdev = to_pci_dev(dev);				\
627 	return sprintf(buf, "%llu\n", pdev->aer_stats->field);		\
628 }									\
629 static DEVICE_ATTR_RO(name)
630 
631 aer_stats_rootport_attr(aer_rootport_total_err_cor,
632 			 rootport_total_cor_errs);
633 aer_stats_rootport_attr(aer_rootport_total_err_fatal,
634 			 rootport_total_fatal_errs);
635 aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
636 			 rootport_total_nonfatal_errs);
637 
638 static struct attribute *aer_stats_attrs[] __ro_after_init = {
639 	&dev_attr_aer_dev_correctable.attr,
640 	&dev_attr_aer_dev_fatal.attr,
641 	&dev_attr_aer_dev_nonfatal.attr,
642 	&dev_attr_aer_rootport_total_err_cor.attr,
643 	&dev_attr_aer_rootport_total_err_fatal.attr,
644 	&dev_attr_aer_rootport_total_err_nonfatal.attr,
645 	NULL
646 };
647 
aer_stats_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)648 static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
649 					   struct attribute *a, int n)
650 {
651 	struct device *dev = kobj_to_dev(kobj);
652 	struct pci_dev *pdev = to_pci_dev(dev);
653 
654 	if (!pdev->aer_stats)
655 		return 0;
656 
657 	if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
658 	     a == &dev_attr_aer_rootport_total_err_fatal.attr ||
659 	     a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
660 	    pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
661 		return 0;
662 
663 	return a->mode;
664 }
665 
666 const struct attribute_group aer_stats_attr_group = {
667 	.attrs  = aer_stats_attrs,
668 	.is_visible = aer_stats_attrs_are_visible,
669 };
670 
pci_dev_aer_stats_incr(struct pci_dev * pdev,struct aer_err_info * info)671 static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
672 				   struct aer_err_info *info)
673 {
674 	int status, i, max = -1;
675 	u64 *counter = NULL;
676 	struct aer_stats *aer_stats = pdev->aer_stats;
677 
678 	if (!aer_stats)
679 		return;
680 
681 	switch (info->severity) {
682 	case AER_CORRECTABLE:
683 		aer_stats->dev_total_cor_errs++;
684 		counter = &aer_stats->dev_cor_errs[0];
685 		max = AER_MAX_TYPEOF_COR_ERRS;
686 		break;
687 	case AER_NONFATAL:
688 		aer_stats->dev_total_nonfatal_errs++;
689 		counter = &aer_stats->dev_nonfatal_errs[0];
690 		max = AER_MAX_TYPEOF_UNCOR_ERRS;
691 		break;
692 	case AER_FATAL:
693 		aer_stats->dev_total_fatal_errs++;
694 		counter = &aer_stats->dev_fatal_errs[0];
695 		max = AER_MAX_TYPEOF_UNCOR_ERRS;
696 		break;
697 	}
698 
699 	status = (info->status & ~info->mask);
700 	for (i = 0; i < max; i++)
701 		if (status & (1 << i))
702 			counter[i]++;
703 }
704 
pci_rootport_aer_stats_incr(struct pci_dev * pdev,struct aer_err_source * e_src)705 static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
706 				 struct aer_err_source *e_src)
707 {
708 	struct aer_stats *aer_stats = pdev->aer_stats;
709 
710 	if (!aer_stats)
711 		return;
712 
713 	if (e_src->status & PCI_ERR_ROOT_COR_RCV)
714 		aer_stats->rootport_total_cor_errs++;
715 
716 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
717 		if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
718 			aer_stats->rootport_total_fatal_errs++;
719 		else
720 			aer_stats->rootport_total_nonfatal_errs++;
721 	}
722 }
723 
__print_tlp_header(struct pci_dev * dev,struct aer_header_log_regs * t)724 static void __print_tlp_header(struct pci_dev *dev,
725 			       struct aer_header_log_regs *t)
726 {
727 	pci_err(dev, "  TLP Header: %08x %08x %08x %08x\n",
728 		t->dw0, t->dw1, t->dw2, t->dw3);
729 }
730 
__aer_print_error(struct pci_dev * dev,struct aer_err_info * info)731 static void __aer_print_error(struct pci_dev *dev,
732 			      struct aer_err_info *info)
733 {
734 	int i, status;
735 	const char *errmsg = NULL;
736 	status = (info->status & ~info->mask);
737 
738 	for (i = 0; i < 32; i++) {
739 		if (!(status & (1 << i)))
740 			continue;
741 
742 		if (info->severity == AER_CORRECTABLE)
743 			errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
744 				aer_correctable_error_string[i] : NULL;
745 		else
746 			errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
747 				aer_uncorrectable_error_string[i] : NULL;
748 
749 		if (errmsg)
750 			pci_err(dev, "   [%2d] %-22s%s\n", i, errmsg,
751 				info->first_error == i ? " (First)" : "");
752 		else
753 			pci_err(dev, "   [%2d] Unknown Error Bit%s\n",
754 				i, info->first_error == i ? " (First)" : "");
755 	}
756 	pci_dev_aer_stats_incr(dev, info);
757 }
758 
aer_print_error(struct pci_dev * dev,struct aer_err_info * info)759 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
760 {
761 	int layer, agent;
762 	int id = ((dev->bus->number << 8) | dev->devfn);
763 
764 	if (!info->status) {
765 		pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
766 			aer_error_severity_string[info->severity]);
767 		goto out;
768 	}
769 
770 	layer = AER_GET_LAYER_ERROR(info->severity, info->status);
771 	agent = AER_GET_AGENT(info->severity, info->status);
772 
773 	pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
774 		aer_error_severity_string[info->severity],
775 		aer_error_layer[layer], aer_agent_string[agent]);
776 
777 	pci_err(dev, "  device [%04x:%04x] error status/mask=%08x/%08x\n",
778 		dev->vendor, dev->device,
779 		info->status, info->mask);
780 
781 	__aer_print_error(dev, info);
782 
783 	if (info->tlp_header_valid)
784 		__print_tlp_header(dev, &info->tlp);
785 
786 out:
787 	if (info->id && info->error_dev_num > 1 && info->id == id)
788 		pci_err(dev, "  Error of this Agent is reported first\n");
789 
790 	trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
791 			info->severity, info->tlp_header_valid, &info->tlp);
792 }
793 
aer_print_port_info(struct pci_dev * dev,struct aer_err_info * info)794 static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
795 {
796 	u8 bus = info->id >> 8;
797 	u8 devfn = info->id & 0xff;
798 
799 	pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
800 		info->multi_error_valid ? "Multiple " : "",
801 		aer_error_severity_string[info->severity],
802 		pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
803 }
804 
805 #ifdef CONFIG_ACPI_APEI_PCIEAER
cper_severity_to_aer(int cper_severity)806 int cper_severity_to_aer(int cper_severity)
807 {
808 	switch (cper_severity) {
809 	case CPER_SEV_RECOVERABLE:
810 		return AER_NONFATAL;
811 	case CPER_SEV_FATAL:
812 		return AER_FATAL;
813 	default:
814 		return AER_CORRECTABLE;
815 	}
816 }
817 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
818 
cper_print_aer(struct pci_dev * dev,int aer_severity,struct aer_capability_regs * aer)819 void cper_print_aer(struct pci_dev *dev, int aer_severity,
820 		    struct aer_capability_regs *aer)
821 {
822 	int layer, agent, tlp_header_valid = 0;
823 	u32 status, mask;
824 	struct aer_err_info info;
825 
826 	if (aer_severity == AER_CORRECTABLE) {
827 		status = aer->cor_status;
828 		mask = aer->cor_mask;
829 	} else {
830 		status = aer->uncor_status;
831 		mask = aer->uncor_mask;
832 		tlp_header_valid = status & AER_LOG_TLP_MASKS;
833 	}
834 
835 	layer = AER_GET_LAYER_ERROR(aer_severity, status);
836 	agent = AER_GET_AGENT(aer_severity, status);
837 
838 	memset(&info, 0, sizeof(info));
839 	info.severity = aer_severity;
840 	info.status = status;
841 	info.mask = mask;
842 	info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
843 
844 	pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
845 	__aer_print_error(dev, &info);
846 	pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
847 		aer_error_layer[layer], aer_agent_string[agent]);
848 
849 	if (aer_severity != AER_CORRECTABLE)
850 		pci_err(dev, "aer_uncor_severity: 0x%08x\n",
851 			aer->uncor_severity);
852 
853 	if (tlp_header_valid)
854 		__print_tlp_header(dev, &aer->header_log);
855 
856 	trace_aer_event(dev_name(&dev->dev), (status & ~mask),
857 			aer_severity, tlp_header_valid, &aer->header_log);
858 }
859 #endif
860 
861 /**
862  * add_error_device - list device to be handled
863  * @e_info: pointer to error info
864  * @dev: pointer to pci_dev to be added
865  */
add_error_device(struct aer_err_info * e_info,struct pci_dev * dev)866 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
867 {
868 	if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
869 		e_info->dev[e_info->error_dev_num] = dev;
870 		e_info->error_dev_num++;
871 		return 0;
872 	}
873 	return -ENOSPC;
874 }
875 
876 /**
877  * is_error_source - check whether the device is source of reported error
878  * @dev: pointer to pci_dev to be checked
879  * @e_info: pointer to reported error info
880  */
is_error_source(struct pci_dev * dev,struct aer_err_info * e_info)881 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
882 {
883 	int pos;
884 	u32 status, mask;
885 	u16 reg16;
886 
887 	/*
888 	 * When bus id is equal to 0, it might be a bad id
889 	 * reported by root port.
890 	 */
891 	if ((PCI_BUS_NUM(e_info->id) != 0) &&
892 	    !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
893 		/* Device ID match? */
894 		if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
895 			return true;
896 
897 		/* Continue id comparing if there is no multiple error */
898 		if (!e_info->multi_error_valid)
899 			return false;
900 	}
901 
902 	/*
903 	 * When either
904 	 *      1) bus id is equal to 0. Some ports might lose the bus
905 	 *              id of error source id;
906 	 *      2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
907 	 *      3) There are multiple errors and prior ID comparing fails;
908 	 * We check AER status registers to find possible reporter.
909 	 */
910 	if (atomic_read(&dev->enable_cnt) == 0)
911 		return false;
912 
913 	/* Check if AER is enabled */
914 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
915 	if (!(reg16 & PCI_EXP_AER_FLAGS))
916 		return false;
917 
918 	pos = dev->aer_cap;
919 	if (!pos)
920 		return false;
921 
922 	/* Check if error is recorded */
923 	if (e_info->severity == AER_CORRECTABLE) {
924 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
925 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
926 	} else {
927 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
928 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
929 	}
930 	if (status & ~mask)
931 		return true;
932 
933 	return false;
934 }
935 
find_device_iter(struct pci_dev * dev,void * data)936 static int find_device_iter(struct pci_dev *dev, void *data)
937 {
938 	struct aer_err_info *e_info = (struct aer_err_info *)data;
939 
940 	if (is_error_source(dev, e_info)) {
941 		/* List this device */
942 		if (add_error_device(e_info, dev)) {
943 			/* We cannot handle more... Stop iteration */
944 			/* TODO: Should print error message here? */
945 			return 1;
946 		}
947 
948 		/* If there is only a single error, stop iteration */
949 		if (!e_info->multi_error_valid)
950 			return 1;
951 	}
952 	return 0;
953 }
954 
955 /**
956  * find_source_device - search through device hierarchy for source device
957  * @parent: pointer to Root Port pci_dev data structure
958  * @e_info: including detailed error information such like id
959  *
960  * Return true if found.
961  *
962  * Invoked by DPC when error is detected at the Root Port.
963  * Caller of this function must set id, severity, and multi_error_valid of
964  * struct aer_err_info pointed by @e_info properly.  This function must fill
965  * e_info->error_dev_num and e_info->dev[], based on the given information.
966  */
find_source_device(struct pci_dev * parent,struct aer_err_info * e_info)967 static bool find_source_device(struct pci_dev *parent,
968 		struct aer_err_info *e_info)
969 {
970 	struct pci_dev *dev = parent;
971 	int result;
972 
973 	/* Must reset in this function */
974 	e_info->error_dev_num = 0;
975 
976 	/* Is Root Port an agent that sends error message? */
977 	result = find_device_iter(dev, e_info);
978 	if (result)
979 		return true;
980 
981 	pci_walk_bus(parent->subordinate, find_device_iter, e_info);
982 
983 	if (!e_info->error_dev_num) {
984 		pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
985 			   e_info->id);
986 		return false;
987 	}
988 	return true;
989 }
990 
991 /**
992  * handle_error_source - handle logging error into an event log
993  * @dev: pointer to pci_dev data structure of error source device
994  * @info: comprehensive error information
995  *
996  * Invoked when an error being detected by Root Port.
997  */
handle_error_source(struct pci_dev * dev,struct aer_err_info * info)998 static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
999 {
1000 	int pos;
1001 
1002 	if (info->severity == AER_CORRECTABLE) {
1003 		/*
1004 		 * Correctable error does not need software intervention.
1005 		 * No need to go through error recovery process.
1006 		 */
1007 		pos = dev->aer_cap;
1008 		if (pos)
1009 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
1010 					info->status);
1011 		pci_aer_clear_device_status(dev);
1012 	} else if (info->severity == AER_NONFATAL)
1013 		pcie_do_nonfatal_recovery(dev);
1014 	else if (info->severity == AER_FATAL)
1015 		pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
1016 }
1017 
1018 #ifdef CONFIG_ACPI_APEI_PCIEAER
1019 
1020 #define AER_RECOVER_RING_ORDER		4
1021 #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER)
1022 
1023 struct aer_recover_entry {
1024 	u8	bus;
1025 	u8	devfn;
1026 	u16	domain;
1027 	int	severity;
1028 	struct aer_capability_regs *regs;
1029 };
1030 
1031 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
1032 		    AER_RECOVER_RING_SIZE);
1033 
aer_recover_work_func(struct work_struct * work)1034 static void aer_recover_work_func(struct work_struct *work)
1035 {
1036 	struct aer_recover_entry entry;
1037 	struct pci_dev *pdev;
1038 
1039 	while (kfifo_get(&aer_recover_ring, &entry)) {
1040 		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
1041 						   entry.devfn);
1042 		if (!pdev) {
1043 			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
1044 			       entry.domain, entry.bus,
1045 			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
1046 			continue;
1047 		}
1048 		cper_print_aer(pdev, entry.severity, entry.regs);
1049 		if (entry.severity == AER_NONFATAL)
1050 			pcie_do_nonfatal_recovery(pdev);
1051 		else if (entry.severity == AER_FATAL)
1052 			pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
1053 		pci_dev_put(pdev);
1054 	}
1055 }
1056 
1057 /*
1058  * Mutual exclusion for writers of aer_recover_ring, reader side don't
1059  * need lock, because there is only one reader and lock is not needed
1060  * between reader and writer.
1061  */
1062 static DEFINE_SPINLOCK(aer_recover_ring_lock);
1063 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
1064 
aer_recover_queue(int domain,unsigned int bus,unsigned int devfn,int severity,struct aer_capability_regs * aer_regs)1065 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
1066 		       int severity, struct aer_capability_regs *aer_regs)
1067 {
1068 	unsigned long flags;
1069 	struct aer_recover_entry entry = {
1070 		.bus		= bus,
1071 		.devfn		= devfn,
1072 		.domain		= domain,
1073 		.severity	= severity,
1074 		.regs		= aer_regs,
1075 	};
1076 
1077 	spin_lock_irqsave(&aer_recover_ring_lock, flags);
1078 	if (kfifo_put(&aer_recover_ring, entry))
1079 		schedule_work(&aer_recover_work);
1080 	else
1081 		pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
1082 		       domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1083 	spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
1084 }
1085 EXPORT_SYMBOL_GPL(aer_recover_queue);
1086 #endif
1087 
1088 /**
1089  * aer_get_device_error_info - read error status from dev and store it to info
1090  * @dev: pointer to the device expected to have a error record
1091  * @info: pointer to structure to store the error record
1092  *
1093  * Return 1 on success, 0 on error.
1094  *
1095  * Note that @info is reused among all error devices. Clear fields properly.
1096  */
aer_get_device_error_info(struct pci_dev * dev,struct aer_err_info * info)1097 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
1098 {
1099 	int pos, temp;
1100 
1101 	/* Must reset in this function */
1102 	info->status = 0;
1103 	info->tlp_header_valid = 0;
1104 
1105 	pos = dev->aer_cap;
1106 
1107 	/* The device might not support AER */
1108 	if (!pos)
1109 		return 0;
1110 
1111 	if (info->severity == AER_CORRECTABLE) {
1112 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
1113 			&info->status);
1114 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
1115 			&info->mask);
1116 		if (!(info->status & ~info->mask))
1117 			return 0;
1118 	} else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1119 		info->severity == AER_NONFATAL) {
1120 
1121 		/* Link is still healthy for IO reads */
1122 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
1123 			&info->status);
1124 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
1125 			&info->mask);
1126 		if (!(info->status & ~info->mask))
1127 			return 0;
1128 
1129 		/* Get First Error Pointer */
1130 		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
1131 		info->first_error = PCI_ERR_CAP_FEP(temp);
1132 
1133 		if (info->status & AER_LOG_TLP_MASKS) {
1134 			info->tlp_header_valid = 1;
1135 			pci_read_config_dword(dev,
1136 				pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
1137 			pci_read_config_dword(dev,
1138 				pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
1139 			pci_read_config_dword(dev,
1140 				pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
1141 			pci_read_config_dword(dev,
1142 				pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
1143 		}
1144 	}
1145 
1146 	return 1;
1147 }
1148 
aer_process_err_devices(struct aer_err_info * e_info)1149 static inline void aer_process_err_devices(struct aer_err_info *e_info)
1150 {
1151 	int i;
1152 
1153 	/* Report all before handle them, not to lost records by reset etc. */
1154 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
1155 		if (aer_get_device_error_info(e_info->dev[i], e_info))
1156 			aer_print_error(e_info->dev[i], e_info);
1157 	}
1158 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
1159 		if (aer_get_device_error_info(e_info->dev[i], e_info))
1160 			handle_error_source(e_info->dev[i], e_info);
1161 	}
1162 }
1163 
1164 /**
1165  * aer_isr_one_error - consume an error detected by root port
1166  * @rpc: pointer to the root port which holds an error
1167  * @e_src: pointer to an error source
1168  */
aer_isr_one_error(struct aer_rpc * rpc,struct aer_err_source * e_src)1169 static void aer_isr_one_error(struct aer_rpc *rpc,
1170 		struct aer_err_source *e_src)
1171 {
1172 	struct pci_dev *pdev = rpc->rpd;
1173 	struct aer_err_info *e_info = &rpc->e_info;
1174 
1175 	pci_rootport_aer_stats_incr(pdev, e_src);
1176 
1177 	/*
1178 	 * There is a possibility that both correctable error and
1179 	 * uncorrectable error being logged. Report correctable error first.
1180 	 */
1181 	if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
1182 		e_info->id = ERR_COR_ID(e_src->id);
1183 		e_info->severity = AER_CORRECTABLE;
1184 
1185 		if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
1186 			e_info->multi_error_valid = 1;
1187 		else
1188 			e_info->multi_error_valid = 0;
1189 		aer_print_port_info(pdev, e_info);
1190 
1191 		if (find_source_device(pdev, e_info))
1192 			aer_process_err_devices(e_info);
1193 	}
1194 
1195 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
1196 		e_info->id = ERR_UNCOR_ID(e_src->id);
1197 
1198 		if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
1199 			e_info->severity = AER_FATAL;
1200 		else
1201 			e_info->severity = AER_NONFATAL;
1202 
1203 		if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
1204 			e_info->multi_error_valid = 1;
1205 		else
1206 			e_info->multi_error_valid = 0;
1207 
1208 		aer_print_port_info(pdev, e_info);
1209 
1210 		if (find_source_device(pdev, e_info))
1211 			aer_process_err_devices(e_info);
1212 	}
1213 }
1214 
1215 /**
1216  * get_e_source - retrieve an error source
1217  * @rpc: pointer to the root port which holds an error
1218  * @e_src: pointer to store retrieved error source
1219  *
1220  * Return 1 if an error source is retrieved, otherwise 0.
1221  *
1222  * Invoked by DPC handler to consume an error.
1223  */
get_e_source(struct aer_rpc * rpc,struct aer_err_source * e_src)1224 static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
1225 {
1226 	unsigned long flags;
1227 
1228 	/* Lock access to Root error producer/consumer index */
1229 	spin_lock_irqsave(&rpc->e_lock, flags);
1230 	if (rpc->prod_idx == rpc->cons_idx) {
1231 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1232 		return 0;
1233 	}
1234 
1235 	*e_src = rpc->e_sources[rpc->cons_idx];
1236 	rpc->cons_idx++;
1237 	if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
1238 		rpc->cons_idx = 0;
1239 	spin_unlock_irqrestore(&rpc->e_lock, flags);
1240 
1241 	return 1;
1242 }
1243 
1244 /**
1245  * aer_isr - consume errors detected by root port
1246  * @work: definition of this work item
1247  *
1248  * Invoked, as DPC, when root port records new detected error
1249  */
aer_isr(struct work_struct * work)1250 static void aer_isr(struct work_struct *work)
1251 {
1252 	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
1253 	struct aer_err_source uninitialized_var(e_src);
1254 
1255 	mutex_lock(&rpc->rpc_mutex);
1256 	while (get_e_source(rpc, &e_src))
1257 		aer_isr_one_error(rpc, &e_src);
1258 	mutex_unlock(&rpc->rpc_mutex);
1259 }
1260 
1261 /**
1262  * aer_irq - Root Port's ISR
1263  * @irq: IRQ assigned to Root Port
1264  * @context: pointer to Root Port data structure
1265  *
1266  * Invoked when Root Port detects AER messages.
1267  */
aer_irq(int irq,void * context)1268 irqreturn_t aer_irq(int irq, void *context)
1269 {
1270 	unsigned int status, id;
1271 	struct pcie_device *pdev = (struct pcie_device *)context;
1272 	struct aer_rpc *rpc = get_service_data(pdev);
1273 	int next_prod_idx;
1274 	unsigned long flags;
1275 	int pos;
1276 
1277 	pos = pdev->port->aer_cap;
1278 	/*
1279 	 * Must lock access to Root Error Status Reg, Root Error ID Reg,
1280 	 * and Root error producer/consumer index
1281 	 */
1282 	spin_lock_irqsave(&rpc->e_lock, flags);
1283 
1284 	/* Read error status */
1285 	pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
1286 	if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
1287 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1288 		return IRQ_NONE;
1289 	}
1290 
1291 	/* Read error source and clear error status */
1292 	pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
1293 	pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
1294 
1295 	/* Store error source for later DPC handler */
1296 	next_prod_idx = rpc->prod_idx + 1;
1297 	if (next_prod_idx == AER_ERROR_SOURCES_MAX)
1298 		next_prod_idx = 0;
1299 	if (next_prod_idx == rpc->cons_idx) {
1300 		/*
1301 		 * Error Storm Condition - possibly the same error occurred.
1302 		 * Drop the error.
1303 		 */
1304 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1305 		return IRQ_HANDLED;
1306 	}
1307 	rpc->e_sources[rpc->prod_idx].status =  status;
1308 	rpc->e_sources[rpc->prod_idx].id = id;
1309 	rpc->prod_idx = next_prod_idx;
1310 	spin_unlock_irqrestore(&rpc->e_lock, flags);
1311 
1312 	/*  Invoke DPC handler */
1313 	schedule_work(&rpc->dpc_handler);
1314 
1315 	return IRQ_HANDLED;
1316 }
1317 EXPORT_SYMBOL_GPL(aer_irq);
1318 
set_device_error_reporting(struct pci_dev * dev,void * data)1319 static int set_device_error_reporting(struct pci_dev *dev, void *data)
1320 {
1321 	bool enable = *((bool *)data);
1322 	int type = pci_pcie_type(dev);
1323 
1324 	if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
1325 	    (type == PCI_EXP_TYPE_UPSTREAM) ||
1326 	    (type == PCI_EXP_TYPE_DOWNSTREAM)) {
1327 		if (enable)
1328 			pci_enable_pcie_error_reporting(dev);
1329 		else
1330 			pci_disable_pcie_error_reporting(dev);
1331 	}
1332 
1333 	if (enable)
1334 		pcie_set_ecrc_checking(dev);
1335 
1336 	return 0;
1337 }
1338 
1339 /**
1340  * set_downstream_devices_error_reporting - enable/disable the error reporting  bits on the root port and its downstream ports.
1341  * @dev: pointer to root port's pci_dev data structure
1342  * @enable: true = enable error reporting, false = disable error reporting.
1343  */
set_downstream_devices_error_reporting(struct pci_dev * dev,bool enable)1344 static void set_downstream_devices_error_reporting(struct pci_dev *dev,
1345 						   bool enable)
1346 {
1347 	set_device_error_reporting(dev, &enable);
1348 
1349 	if (!dev->subordinate)
1350 		return;
1351 	pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
1352 }
1353 
1354 /**
1355  * aer_enable_rootport - enable Root Port's interrupts when receiving messages
1356  * @rpc: pointer to a Root Port data structure
1357  *
1358  * Invoked when PCIe bus loads AER service driver.
1359  */
aer_enable_rootport(struct aer_rpc * rpc)1360 static void aer_enable_rootport(struct aer_rpc *rpc)
1361 {
1362 	struct pci_dev *pdev = rpc->rpd;
1363 	int aer_pos;
1364 	u16 reg16;
1365 	u32 reg32;
1366 
1367 	/* Clear PCIe Capability's Device Status */
1368 	pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
1369 	pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
1370 
1371 	/* Disable system error generation in response to error messages */
1372 	pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
1373 				   SYSTEM_ERROR_INTR_ON_MESG_MASK);
1374 
1375 	aer_pos = pdev->aer_cap;
1376 	/* Clear error status */
1377 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
1378 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
1379 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
1380 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
1381 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
1382 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
1383 
1384 	/*
1385 	 * Enable error reporting for the root port device and downstream port
1386 	 * devices.
1387 	 */
1388 	set_downstream_devices_error_reporting(pdev, true);
1389 
1390 	/* Enable Root Port's interrupt in response to error messages */
1391 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
1392 	reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1393 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
1394 }
1395 
1396 /**
1397  * aer_disable_rootport - disable Root Port's interrupts when receiving messages
1398  * @rpc: pointer to a Root Port data structure
1399  *
1400  * Invoked when PCIe bus unloads AER service driver.
1401  */
aer_disable_rootport(struct aer_rpc * rpc)1402 static void aer_disable_rootport(struct aer_rpc *rpc)
1403 {
1404 	struct pci_dev *pdev = rpc->rpd;
1405 	u32 reg32;
1406 	int pos;
1407 
1408 	/*
1409 	 * Disable error reporting for the root port device and downstream port
1410 	 * devices.
1411 	 */
1412 	set_downstream_devices_error_reporting(pdev, false);
1413 
1414 	pos = pdev->aer_cap;
1415 	/* Disable Root's interrupt in response to error messages */
1416 	pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1417 	reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1418 	pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1419 
1420 	/* Clear Root's error status reg */
1421 	pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
1422 	pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
1423 }
1424 
1425 /**
1426  * aer_alloc_rpc - allocate Root Port data structure
1427  * @dev: pointer to the pcie_dev data structure
1428  *
1429  * Invoked when Root Port's AER service is loaded.
1430  */
aer_alloc_rpc(struct pcie_device * dev)1431 static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
1432 {
1433 	struct aer_rpc *rpc;
1434 
1435 	rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
1436 	if (!rpc)
1437 		return NULL;
1438 
1439 	/* Initialize Root lock access, e_lock, to Root Error Status Reg */
1440 	spin_lock_init(&rpc->e_lock);
1441 
1442 	rpc->rpd = dev->port;
1443 	INIT_WORK(&rpc->dpc_handler, aer_isr);
1444 	mutex_init(&rpc->rpc_mutex);
1445 
1446 	/* Use PCIe bus function to store rpc into PCIe device */
1447 	set_service_data(dev, rpc);
1448 
1449 	return rpc;
1450 }
1451 
1452 /**
1453  * aer_remove - clean up resources
1454  * @dev: pointer to the pcie_dev data structure
1455  *
1456  * Invoked when PCI Express bus unloads or AER probe fails.
1457  */
aer_remove(struct pcie_device * dev)1458 static void aer_remove(struct pcie_device *dev)
1459 {
1460 	struct aer_rpc *rpc = get_service_data(dev);
1461 
1462 	if (rpc) {
1463 		/* If register interrupt service, it must be free. */
1464 		if (rpc->isr)
1465 			free_irq(dev->irq, dev);
1466 
1467 		flush_work(&rpc->dpc_handler);
1468 		aer_disable_rootport(rpc);
1469 		kfree(rpc);
1470 		set_service_data(dev, NULL);
1471 	}
1472 }
1473 
1474 /**
1475  * aer_probe - initialize resources
1476  * @dev: pointer to the pcie_dev data structure
1477  *
1478  * Invoked when PCI Express bus loads AER service driver.
1479  */
aer_probe(struct pcie_device * dev)1480 static int aer_probe(struct pcie_device *dev)
1481 {
1482 	int status;
1483 	struct aer_rpc *rpc;
1484 	struct device *device = &dev->port->dev;
1485 
1486 	/* Alloc rpc data structure */
1487 	rpc = aer_alloc_rpc(dev);
1488 	if (!rpc) {
1489 		dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
1490 		aer_remove(dev);
1491 		return -ENOMEM;
1492 	}
1493 
1494 	/* Request IRQ ISR */
1495 	status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
1496 	if (status) {
1497 		dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
1498 			   dev->irq);
1499 		aer_remove(dev);
1500 		return status;
1501 	}
1502 
1503 	rpc->isr = 1;
1504 
1505 	aer_enable_rootport(rpc);
1506 	dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
1507 	return 0;
1508 }
1509 
1510 /**
1511  * aer_root_reset - reset link on Root Port
1512  * @dev: pointer to Root Port's pci_dev data structure
1513  *
1514  * Invoked by Port Bus driver when performing link reset at Root Port.
1515  */
aer_root_reset(struct pci_dev * dev)1516 static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
1517 {
1518 	u32 reg32;
1519 	int pos;
1520 	int rc;
1521 
1522 	pos = dev->aer_cap;
1523 
1524 	/* Disable Root's interrupt in response to error messages */
1525 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1526 	reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1527 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1528 
1529 	rc = pci_bridge_secondary_bus_reset(dev);
1530 	pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
1531 
1532 	/* Clear Root Error Status */
1533 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
1534 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
1535 
1536 	/* Enable Root Port's interrupt in response to error messages */
1537 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1538 	reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1539 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1540 
1541 	return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1542 }
1543 
1544 /**
1545  * aer_error_resume - clean up corresponding error status bits
1546  * @dev: pointer to Root Port's pci_dev data structure
1547  *
1548  * Invoked by Port Bus driver during nonfatal recovery.
1549  */
aer_error_resume(struct pci_dev * dev)1550 static void aer_error_resume(struct pci_dev *dev)
1551 {
1552 	pci_aer_clear_device_status(dev);
1553 	pci_cleanup_aer_uncorrect_error_status(dev);
1554 }
1555 
1556 static struct pcie_port_service_driver aerdriver = {
1557 	.name		= "aer",
1558 	.port_type	= PCI_EXP_TYPE_ROOT_PORT,
1559 	.service	= PCIE_PORT_SERVICE_AER,
1560 
1561 	.probe		= aer_probe,
1562 	.remove		= aer_remove,
1563 	.error_resume	= aer_error_resume,
1564 	.reset_link	= aer_root_reset,
1565 };
1566 
1567 /**
1568  * aer_service_init - register AER root service driver
1569  *
1570  * Invoked when AER root service driver is loaded.
1571  */
aer_service_init(void)1572 static int __init aer_service_init(void)
1573 {
1574 	if (!pci_aer_available() || aer_acpi_firmware_first())
1575 		return -ENXIO;
1576 	return pcie_port_service_register(&aerdriver);
1577 }
1578 device_initcall(aer_service_init);
1579