1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCI Express PCI Hot Plug Driver
4  *
5  * Copyright (C) 1995,2001 Compaq Computer Corporation
6  * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
7  * Copyright (C) 2001 IBM Corp.
8  * Copyright (C) 2003-2004 Intel Corporation
9  *
10  * All rights reserved.
11  *
12  * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/signal.h>
19 #include <linux/jiffies.h>
20 #include <linux/kthread.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/interrupt.h>
24 #include <linux/time.h>
25 #include <linux/slab.h>
26 
27 #include "../pci.h"
28 #include "pciehp.h"
29 
ctrl_dev(struct controller * ctrl)30 static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
31 {
32 	return ctrl->pcie->port;
33 }
34 
35 static irqreturn_t pciehp_isr(int irq, void *dev_id);
36 static irqreturn_t pciehp_ist(int irq, void *dev_id);
37 static int pciehp_poll(void *data);
38 
pciehp_request_irq(struct controller * ctrl)39 static inline int pciehp_request_irq(struct controller *ctrl)
40 {
41 	int retval, irq = ctrl->pcie->irq;
42 
43 	if (pciehp_poll_mode) {
44 		ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
45 						"pciehp_poll-%s",
46 						slot_name(ctrl->slot));
47 		return PTR_ERR_OR_ZERO(ctrl->poll_thread);
48 	}
49 
50 	/* Installs the interrupt handler */
51 	retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
52 				      IRQF_SHARED, MY_NAME, ctrl);
53 	if (retval)
54 		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
55 			 irq);
56 	return retval;
57 }
58 
pciehp_free_irq(struct controller * ctrl)59 static inline void pciehp_free_irq(struct controller *ctrl)
60 {
61 	if (pciehp_poll_mode)
62 		kthread_stop(ctrl->poll_thread);
63 	else
64 		free_irq(ctrl->pcie->irq, ctrl);
65 }
66 
pcie_poll_cmd(struct controller * ctrl,int timeout)67 static int pcie_poll_cmd(struct controller *ctrl, int timeout)
68 {
69 	struct pci_dev *pdev = ctrl_dev(ctrl);
70 	u16 slot_status;
71 
72 	while (true) {
73 		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
74 		if (slot_status == (u16) ~0) {
75 			ctrl_info(ctrl, "%s: no response from device\n",
76 				  __func__);
77 			return 0;
78 		}
79 
80 		if (slot_status & PCI_EXP_SLTSTA_CC) {
81 			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
82 						   PCI_EXP_SLTSTA_CC);
83 			return 1;
84 		}
85 		if (timeout < 0)
86 			break;
87 		msleep(10);
88 		timeout -= 10;
89 	}
90 	return 0;	/* timeout */
91 }
92 
pcie_wait_cmd(struct controller * ctrl)93 static void pcie_wait_cmd(struct controller *ctrl)
94 {
95 	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
96 	unsigned long duration = msecs_to_jiffies(msecs);
97 	unsigned long cmd_timeout = ctrl->cmd_started + duration;
98 	unsigned long now, timeout;
99 	int rc;
100 
101 	/*
102 	 * If the controller does not generate notifications for command
103 	 * completions, we never need to wait between writes.
104 	 */
105 	if (NO_CMD_CMPL(ctrl))
106 		return;
107 
108 	if (!ctrl->cmd_busy)
109 		return;
110 
111 	/*
112 	 * Even if the command has already timed out, we want to call
113 	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
114 	 */
115 	now = jiffies;
116 	if (time_before_eq(cmd_timeout, now))
117 		timeout = 1;
118 	else
119 		timeout = cmd_timeout - now;
120 
121 	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
122 	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
123 		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
124 	else
125 		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
126 
127 	if (!rc)
128 		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
129 			  ctrl->slot_ctrl,
130 			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
131 }
132 
133 #define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
134 				 PCI_EXP_SLTCTL_PIC |	\
135 				 PCI_EXP_SLTCTL_AIC |	\
136 				 PCI_EXP_SLTCTL_EIC)
137 
pcie_do_write_cmd(struct controller * ctrl,u16 cmd,u16 mask,bool wait)138 static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
139 			      u16 mask, bool wait)
140 {
141 	struct pci_dev *pdev = ctrl_dev(ctrl);
142 	u16 slot_ctrl_orig, slot_ctrl;
143 
144 	mutex_lock(&ctrl->ctrl_lock);
145 
146 	/*
147 	 * Always wait for any previous command that might still be in progress
148 	 */
149 	pcie_wait_cmd(ctrl);
150 
151 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
152 	if (slot_ctrl == (u16) ~0) {
153 		ctrl_info(ctrl, "%s: no response from device\n", __func__);
154 		goto out;
155 	}
156 
157 	slot_ctrl_orig = slot_ctrl;
158 	slot_ctrl &= ~mask;
159 	slot_ctrl |= (cmd & mask);
160 	ctrl->cmd_busy = 1;
161 	smp_mb();
162 	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
163 	ctrl->cmd_started = jiffies;
164 	ctrl->slot_ctrl = slot_ctrl;
165 
166 	/*
167 	 * Controllers with the Intel CF118 and similar errata advertise
168 	 * Command Completed support, but they only set Command Completed
169 	 * if we change the "Control" bits for power, power indicator,
170 	 * attention indicator, or interlock.  If we only change the
171 	 * "Enable" bits, they never set the Command Completed bit.
172 	 */
173 	if (pdev->broken_cmd_compl &&
174 	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
175 		ctrl->cmd_busy = 0;
176 
177 	/*
178 	 * Optionally wait for the hardware to be ready for a new command,
179 	 * indicating completion of the above issued command.
180 	 */
181 	if (wait)
182 		pcie_wait_cmd(ctrl);
183 
184 out:
185 	mutex_unlock(&ctrl->ctrl_lock);
186 }
187 
188 /**
189  * pcie_write_cmd - Issue controller command
190  * @ctrl: controller to which the command is issued
191  * @cmd:  command value written to slot control register
192  * @mask: bitmask of slot control register to be modified
193  */
pcie_write_cmd(struct controller * ctrl,u16 cmd,u16 mask)194 static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
195 {
196 	pcie_do_write_cmd(ctrl, cmd, mask, true);
197 }
198 
199 /* Same as above without waiting for the hardware to latch */
pcie_write_cmd_nowait(struct controller * ctrl,u16 cmd,u16 mask)200 static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
201 {
202 	pcie_do_write_cmd(ctrl, cmd, mask, false);
203 }
204 
pciehp_check_link_active(struct controller * ctrl)205 bool pciehp_check_link_active(struct controller *ctrl)
206 {
207 	struct pci_dev *pdev = ctrl_dev(ctrl);
208 	u16 lnk_status;
209 	bool ret;
210 
211 	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
212 	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
213 
214 	if (ret)
215 		ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
216 
217 	return ret;
218 }
219 
pcie_wait_link_active(struct controller * ctrl)220 static void pcie_wait_link_active(struct controller *ctrl)
221 {
222 	struct pci_dev *pdev = ctrl_dev(ctrl);
223 
224 	pcie_wait_for_link(pdev, true);
225 }
226 
pci_bus_check_dev(struct pci_bus * bus,int devfn)227 static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
228 {
229 	u32 l;
230 	int count = 0;
231 	int delay = 1000, step = 20;
232 	bool found = false;
233 
234 	do {
235 		found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
236 		count++;
237 
238 		if (found)
239 			break;
240 
241 		msleep(step);
242 		delay -= step;
243 	} while (delay > 0);
244 
245 	if (count > 1 && pciehp_debug)
246 		printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
247 			pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
248 			PCI_FUNC(devfn), count, step, l);
249 
250 	return found;
251 }
252 
pciehp_check_link_status(struct controller * ctrl)253 int pciehp_check_link_status(struct controller *ctrl)
254 {
255 	struct pci_dev *pdev = ctrl_dev(ctrl);
256 	bool found;
257 	u16 lnk_status;
258 
259 	/*
260 	 * Data Link Layer Link Active Reporting must be capable for
261 	 * hot-plug capable downstream port. But old controller might
262 	 * not implement it. In this case, we wait for 1000 ms.
263 	*/
264 	if (ctrl->link_active_reporting)
265 		pcie_wait_link_active(ctrl);
266 	else
267 		msleep(1000);
268 
269 	/* wait 100ms before read pci conf, and try in 1s */
270 	msleep(100);
271 	found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
272 					PCI_DEVFN(0, 0));
273 
274 	/* ignore link or presence changes up to this point */
275 	if (found)
276 		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
277 			   &ctrl->pending_events);
278 
279 	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
280 	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
281 	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
282 	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
283 		ctrl_err(ctrl, "link training error: status %#06x\n",
284 			 lnk_status);
285 		return -1;
286 	}
287 
288 	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
289 
290 	if (!found)
291 		return -1;
292 
293 	return 0;
294 }
295 
__pciehp_link_set(struct controller * ctrl,bool enable)296 static int __pciehp_link_set(struct controller *ctrl, bool enable)
297 {
298 	struct pci_dev *pdev = ctrl_dev(ctrl);
299 	u16 lnk_ctrl;
300 
301 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
302 
303 	if (enable)
304 		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
305 	else
306 		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
307 
308 	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
309 	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
310 	return 0;
311 }
312 
pciehp_link_enable(struct controller * ctrl)313 static int pciehp_link_enable(struct controller *ctrl)
314 {
315 	return __pciehp_link_set(ctrl, true);
316 }
317 
pciehp_get_raw_indicator_status(struct hotplug_slot * hotplug_slot,u8 * status)318 int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
319 				    u8 *status)
320 {
321 	struct slot *slot = hotplug_slot->private;
322 	struct pci_dev *pdev = ctrl_dev(slot->ctrl);
323 	u16 slot_ctrl;
324 
325 	pci_config_pm_runtime_get(pdev);
326 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
327 	pci_config_pm_runtime_put(pdev);
328 	*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
329 	return 0;
330 }
331 
pciehp_get_attention_status(struct slot * slot,u8 * status)332 void pciehp_get_attention_status(struct slot *slot, u8 *status)
333 {
334 	struct controller *ctrl = slot->ctrl;
335 	struct pci_dev *pdev = ctrl_dev(ctrl);
336 	u16 slot_ctrl;
337 
338 	pci_config_pm_runtime_get(pdev);
339 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
340 	pci_config_pm_runtime_put(pdev);
341 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
342 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
343 
344 	switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
345 	case PCI_EXP_SLTCTL_ATTN_IND_ON:
346 		*status = 1;	/* On */
347 		break;
348 	case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
349 		*status = 2;	/* Blink */
350 		break;
351 	case PCI_EXP_SLTCTL_ATTN_IND_OFF:
352 		*status = 0;	/* Off */
353 		break;
354 	default:
355 		*status = 0xFF;
356 		break;
357 	}
358 }
359 
pciehp_get_power_status(struct slot * slot,u8 * status)360 void pciehp_get_power_status(struct slot *slot, u8 *status)
361 {
362 	struct controller *ctrl = slot->ctrl;
363 	struct pci_dev *pdev = ctrl_dev(ctrl);
364 	u16 slot_ctrl;
365 
366 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
367 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
368 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
369 
370 	switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
371 	case PCI_EXP_SLTCTL_PWR_ON:
372 		*status = 1;	/* On */
373 		break;
374 	case PCI_EXP_SLTCTL_PWR_OFF:
375 		*status = 0;	/* Off */
376 		break;
377 	default:
378 		*status = 0xFF;
379 		break;
380 	}
381 }
382 
pciehp_get_latch_status(struct slot * slot,u8 * status)383 void pciehp_get_latch_status(struct slot *slot, u8 *status)
384 {
385 	struct pci_dev *pdev = ctrl_dev(slot->ctrl);
386 	u16 slot_status;
387 
388 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
389 	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
390 }
391 
pciehp_get_adapter_status(struct slot * slot,u8 * status)392 void pciehp_get_adapter_status(struct slot *slot, u8 *status)
393 {
394 	struct pci_dev *pdev = ctrl_dev(slot->ctrl);
395 	u16 slot_status;
396 
397 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
398 	*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
399 }
400 
pciehp_query_power_fault(struct slot * slot)401 int pciehp_query_power_fault(struct slot *slot)
402 {
403 	struct pci_dev *pdev = ctrl_dev(slot->ctrl);
404 	u16 slot_status;
405 
406 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
407 	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
408 }
409 
pciehp_set_raw_indicator_status(struct hotplug_slot * hotplug_slot,u8 status)410 int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
411 				    u8 status)
412 {
413 	struct slot *slot = hotplug_slot->private;
414 	struct controller *ctrl = slot->ctrl;
415 	struct pci_dev *pdev = ctrl_dev(ctrl);
416 
417 	pci_config_pm_runtime_get(pdev);
418 	pcie_write_cmd_nowait(ctrl, status << 6,
419 			      PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
420 	pci_config_pm_runtime_put(pdev);
421 	return 0;
422 }
423 
pciehp_set_attention_status(struct slot * slot,u8 value)424 void pciehp_set_attention_status(struct slot *slot, u8 value)
425 {
426 	struct controller *ctrl = slot->ctrl;
427 	u16 slot_cmd;
428 
429 	if (!ATTN_LED(ctrl))
430 		return;
431 
432 	switch (value) {
433 	case 0:		/* turn off */
434 		slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
435 		break;
436 	case 1:		/* turn on */
437 		slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
438 		break;
439 	case 2:		/* turn blink */
440 		slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
441 		break;
442 	default:
443 		return;
444 	}
445 	pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
446 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
447 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
448 }
449 
pciehp_green_led_on(struct slot * slot)450 void pciehp_green_led_on(struct slot *slot)
451 {
452 	struct controller *ctrl = slot->ctrl;
453 
454 	if (!PWR_LED(ctrl))
455 		return;
456 
457 	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
458 			      PCI_EXP_SLTCTL_PIC);
459 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
460 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
461 		 PCI_EXP_SLTCTL_PWR_IND_ON);
462 }
463 
pciehp_green_led_off(struct slot * slot)464 void pciehp_green_led_off(struct slot *slot)
465 {
466 	struct controller *ctrl = slot->ctrl;
467 
468 	if (!PWR_LED(ctrl))
469 		return;
470 
471 	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
472 			      PCI_EXP_SLTCTL_PIC);
473 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
474 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
475 		 PCI_EXP_SLTCTL_PWR_IND_OFF);
476 }
477 
pciehp_green_led_blink(struct slot * slot)478 void pciehp_green_led_blink(struct slot *slot)
479 {
480 	struct controller *ctrl = slot->ctrl;
481 
482 	if (!PWR_LED(ctrl))
483 		return;
484 
485 	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
486 			      PCI_EXP_SLTCTL_PIC);
487 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
488 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
489 		 PCI_EXP_SLTCTL_PWR_IND_BLINK);
490 }
491 
pciehp_power_on_slot(struct slot * slot)492 int pciehp_power_on_slot(struct slot *slot)
493 {
494 	struct controller *ctrl = slot->ctrl;
495 	struct pci_dev *pdev = ctrl_dev(ctrl);
496 	u16 slot_status;
497 	int retval;
498 
499 	/* Clear power-fault bit from previous power failures */
500 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 	if (slot_status & PCI_EXP_SLTSTA_PFD)
502 		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
503 					   PCI_EXP_SLTSTA_PFD);
504 	ctrl->power_fault_detected = 0;
505 
506 	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
507 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
508 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
509 		 PCI_EXP_SLTCTL_PWR_ON);
510 
511 	retval = pciehp_link_enable(ctrl);
512 	if (retval)
513 		ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
514 
515 	return retval;
516 }
517 
pciehp_power_off_slot(struct slot * slot)518 void pciehp_power_off_slot(struct slot *slot)
519 {
520 	struct controller *ctrl = slot->ctrl;
521 
522 	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
523 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
524 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
525 		 PCI_EXP_SLTCTL_PWR_OFF);
526 }
527 
pciehp_isr(int irq,void * dev_id)528 static irqreturn_t pciehp_isr(int irq, void *dev_id)
529 {
530 	struct controller *ctrl = (struct controller *)dev_id;
531 	struct pci_dev *pdev = ctrl_dev(ctrl);
532 	struct device *parent = pdev->dev.parent;
533 	u16 status, events;
534 
535 	/*
536 	 * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
537 	 */
538 	if (pdev->current_state == PCI_D3cold)
539 		return IRQ_NONE;
540 
541 	/*
542 	 * Keep the port accessible by holding a runtime PM ref on its parent.
543 	 * Defer resume of the parent to the IRQ thread if it's suspended.
544 	 * Mask the interrupt until then.
545 	 */
546 	if (parent) {
547 		pm_runtime_get_noresume(parent);
548 		if (!pm_runtime_active(parent)) {
549 			pm_runtime_put(parent);
550 			disable_irq_nosync(irq);
551 			atomic_or(RERUN_ISR, &ctrl->pending_events);
552 			return IRQ_WAKE_THREAD;
553 		}
554 	}
555 
556 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
557 	if (status == (u16) ~0) {
558 		ctrl_info(ctrl, "%s: no response from device\n", __func__);
559 		if (parent)
560 			pm_runtime_put(parent);
561 		return IRQ_NONE;
562 	}
563 
564 	/*
565 	 * Slot Status contains plain status bits as well as event
566 	 * notification bits; right now we only want the event bits.
567 	 */
568 	events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
569 			   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
570 			   PCI_EXP_SLTSTA_DLLSC);
571 
572 	/*
573 	 * If we've already reported a power fault, don't report it again
574 	 * until we've done something to handle it.
575 	 */
576 	if (ctrl->power_fault_detected)
577 		events &= ~PCI_EXP_SLTSTA_PFD;
578 
579 	if (!events) {
580 		if (parent)
581 			pm_runtime_put(parent);
582 		return IRQ_NONE;
583 	}
584 
585 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
586 	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
587 	if (parent)
588 		pm_runtime_put(parent);
589 
590 	/*
591 	 * Command Completed notifications are not deferred to the
592 	 * IRQ thread because it may be waiting for their arrival.
593 	 */
594 	if (events & PCI_EXP_SLTSTA_CC) {
595 		ctrl->cmd_busy = 0;
596 		smp_mb();
597 		wake_up(&ctrl->queue);
598 
599 		if (events == PCI_EXP_SLTSTA_CC)
600 			return IRQ_HANDLED;
601 
602 		events &= ~PCI_EXP_SLTSTA_CC;
603 	}
604 
605 	if (pdev->ignore_hotplug) {
606 		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
607 		return IRQ_HANDLED;
608 	}
609 
610 	/* Save pending events for consumption by IRQ thread. */
611 	atomic_or(events, &ctrl->pending_events);
612 	return IRQ_WAKE_THREAD;
613 }
614 
pciehp_ist(int irq,void * dev_id)615 static irqreturn_t pciehp_ist(int irq, void *dev_id)
616 {
617 	struct controller *ctrl = (struct controller *)dev_id;
618 	struct pci_dev *pdev = ctrl_dev(ctrl);
619 	struct slot *slot = ctrl->slot;
620 	irqreturn_t ret;
621 	u32 events;
622 
623 	pci_config_pm_runtime_get(pdev);
624 
625 	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
626 	if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
627 		ret = pciehp_isr(irq, dev_id);
628 		enable_irq(irq);
629 		if (ret != IRQ_WAKE_THREAD) {
630 			pci_config_pm_runtime_put(pdev);
631 			return ret;
632 		}
633 	}
634 
635 	synchronize_hardirq(irq);
636 	events = atomic_xchg(&ctrl->pending_events, 0);
637 	if (!events) {
638 		pci_config_pm_runtime_put(pdev);
639 		return IRQ_NONE;
640 	}
641 
642 	/* Check Attention Button Pressed */
643 	if (events & PCI_EXP_SLTSTA_ABP) {
644 		ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
645 			  slot_name(slot));
646 		pciehp_handle_button_press(slot);
647 	}
648 
649 	/* Check Power Fault Detected */
650 	if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
651 		ctrl->power_fault_detected = 1;
652 		ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
653 		pciehp_set_attention_status(slot, 1);
654 		pciehp_green_led_off(slot);
655 	}
656 
657 	/*
658 	 * Disable requests have higher priority than Presence Detect Changed
659 	 * or Data Link Layer State Changed events.
660 	 */
661 	down_read(&ctrl->reset_lock);
662 	if (events & DISABLE_SLOT)
663 		pciehp_handle_disable_request(slot);
664 	else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
665 		pciehp_handle_presence_or_link_change(slot, events);
666 	up_read(&ctrl->reset_lock);
667 
668 	pci_config_pm_runtime_put(pdev);
669 	wake_up(&ctrl->requester);
670 	return IRQ_HANDLED;
671 }
672 
pciehp_poll(void * data)673 static int pciehp_poll(void *data)
674 {
675 	struct controller *ctrl = data;
676 
677 	schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
678 
679 	while (!kthread_should_stop()) {
680 		/* poll for interrupt events or user requests */
681 		while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
682 		       atomic_read(&ctrl->pending_events))
683 			pciehp_ist(IRQ_NOTCONNECTED, ctrl);
684 
685 		if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
686 			pciehp_poll_time = 2; /* clamp to sane value */
687 
688 		schedule_timeout_idle(pciehp_poll_time * HZ);
689 	}
690 
691 	return 0;
692 }
693 
pcie_enable_notification(struct controller * ctrl)694 static void pcie_enable_notification(struct controller *ctrl)
695 {
696 	u16 cmd, mask;
697 
698 	/*
699 	 * TBD: Power fault detected software notification support.
700 	 *
701 	 * Power fault detected software notification is not enabled
702 	 * now, because it caused power fault detected interrupt storm
703 	 * on some machines. On those machines, power fault detected
704 	 * bit in the slot status register was set again immediately
705 	 * when it is cleared in the interrupt service routine, and
706 	 * next power fault detected interrupt was notified again.
707 	 */
708 
709 	/*
710 	 * Always enable link events: thus link-up and link-down shall
711 	 * always be treated as hotplug and unplug respectively. Enable
712 	 * presence detect only if Attention Button is not present.
713 	 */
714 	cmd = PCI_EXP_SLTCTL_DLLSCE;
715 	if (ATTN_BUTTN(ctrl))
716 		cmd |= PCI_EXP_SLTCTL_ABPE;
717 	else
718 		cmd |= PCI_EXP_SLTCTL_PDCE;
719 	if (!pciehp_poll_mode)
720 		cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
721 
722 	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
723 		PCI_EXP_SLTCTL_PFDE |
724 		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
725 		PCI_EXP_SLTCTL_DLLSCE);
726 
727 	pcie_write_cmd_nowait(ctrl, cmd, mask);
728 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
729 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
730 }
731 
pcie_disable_notification(struct controller * ctrl)732 static void pcie_disable_notification(struct controller *ctrl)
733 {
734 	u16 mask;
735 
736 	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
737 		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
738 		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
739 		PCI_EXP_SLTCTL_DLLSCE);
740 	pcie_write_cmd(ctrl, 0, mask);
741 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
742 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
743 }
744 
pcie_clear_hotplug_events(struct controller * ctrl)745 void pcie_clear_hotplug_events(struct controller *ctrl)
746 {
747 	pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
748 				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
749 }
750 
751 /*
752  * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
753  * bus reset of the bridge, but at the same time we want to ensure that it is
754  * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
755  * disable link state notification and presence detection change notification
756  * momentarily, if we see that they could interfere. Also, clear any spurious
757  * events after.
758  */
pciehp_reset_slot(struct slot * slot,int probe)759 int pciehp_reset_slot(struct slot *slot, int probe)
760 {
761 	struct controller *ctrl = slot->ctrl;
762 	struct pci_dev *pdev = ctrl_dev(ctrl);
763 	u16 stat_mask = 0, ctrl_mask = 0;
764 	int rc;
765 
766 	if (probe)
767 		return 0;
768 
769 	down_write(&ctrl->reset_lock);
770 
771 	if (!ATTN_BUTTN(ctrl)) {
772 		ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
773 		stat_mask |= PCI_EXP_SLTSTA_PDC;
774 	}
775 	ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
776 	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
777 
778 	pcie_write_cmd(ctrl, 0, ctrl_mask);
779 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
780 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
781 
782 	rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
783 
784 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
785 	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
786 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
787 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
788 
789 	up_write(&ctrl->reset_lock);
790 	return rc;
791 }
792 
pcie_init_notification(struct controller * ctrl)793 int pcie_init_notification(struct controller *ctrl)
794 {
795 	if (pciehp_request_irq(ctrl))
796 		return -1;
797 	pcie_enable_notification(ctrl);
798 	ctrl->notification_enabled = 1;
799 	return 0;
800 }
801 
pcie_shutdown_notification(struct controller * ctrl)802 void pcie_shutdown_notification(struct controller *ctrl)
803 {
804 	if (ctrl->notification_enabled) {
805 		pcie_disable_notification(ctrl);
806 		pciehp_free_irq(ctrl);
807 		ctrl->notification_enabled = 0;
808 	}
809 }
810 
pcie_init_slot(struct controller * ctrl)811 static int pcie_init_slot(struct controller *ctrl)
812 {
813 	struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
814 	struct slot *slot;
815 
816 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
817 	if (!slot)
818 		return -ENOMEM;
819 
820 	down_read(&pci_bus_sem);
821 	slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
822 	up_read(&pci_bus_sem);
823 
824 	slot->ctrl = ctrl;
825 	mutex_init(&slot->lock);
826 	INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
827 	ctrl->slot = slot;
828 	return 0;
829 }
830 
pcie_cleanup_slot(struct controller * ctrl)831 static void pcie_cleanup_slot(struct controller *ctrl)
832 {
833 	struct slot *slot = ctrl->slot;
834 
835 	cancel_delayed_work_sync(&slot->work);
836 	kfree(slot);
837 }
838 
dbg_ctrl(struct controller * ctrl)839 static inline void dbg_ctrl(struct controller *ctrl)
840 {
841 	struct pci_dev *pdev = ctrl->pcie->port;
842 	u16 reg16;
843 
844 	if (!pciehp_debug)
845 		return;
846 
847 	ctrl_info(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
848 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
849 	ctrl_info(ctrl, "Slot Status            : 0x%04x\n", reg16);
850 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
851 	ctrl_info(ctrl, "Slot Control           : 0x%04x\n", reg16);
852 }
853 
854 #define FLAG(x, y)	(((x) & (y)) ? '+' : '-')
855 
pcie_init(struct pcie_device * dev)856 struct controller *pcie_init(struct pcie_device *dev)
857 {
858 	struct controller *ctrl;
859 	u32 slot_cap, link_cap;
860 	u8 occupied, poweron;
861 	struct pci_dev *pdev = dev->port;
862 
863 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
864 	if (!ctrl)
865 		goto abort;
866 
867 	ctrl->pcie = dev;
868 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
869 
870 	if (pdev->hotplug_user_indicators)
871 		slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
872 
873 	/*
874 	 * We assume no Thunderbolt controllers support Command Complete events,
875 	 * but some controllers falsely claim they do.
876 	 */
877 	if (pdev->is_thunderbolt)
878 		slot_cap |= PCI_EXP_SLTCAP_NCCS;
879 
880 	ctrl->slot_cap = slot_cap;
881 	mutex_init(&ctrl->ctrl_lock);
882 	init_rwsem(&ctrl->reset_lock);
883 	init_waitqueue_head(&ctrl->requester);
884 	init_waitqueue_head(&ctrl->queue);
885 	dbg_ctrl(ctrl);
886 
887 	/* Check if Data Link Layer Link Active Reporting is implemented */
888 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
889 	if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
890 		ctrl->link_active_reporting = 1;
891 
892 	/* Clear all remaining event bits in Slot Status register. */
893 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
894 		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
895 		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
896 		PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
897 
898 	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
899 		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
900 		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
901 		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
902 		FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
903 		FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
904 		FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
905 		FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
906 		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
907 		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
908 		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
909 		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
910 		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
911 
912 	if (pcie_init_slot(ctrl))
913 		goto abort_ctrl;
914 
915 	/*
916 	 * If empty slot's power status is on, turn power off.  The IRQ isn't
917 	 * requested yet, so avoid triggering a notification with this command.
918 	 */
919 	if (POWER_CTRL(ctrl)) {
920 		pciehp_get_adapter_status(ctrl->slot, &occupied);
921 		pciehp_get_power_status(ctrl->slot, &poweron);
922 		if (!occupied && poweron) {
923 			pcie_disable_notification(ctrl);
924 			pciehp_power_off_slot(ctrl->slot);
925 		}
926 	}
927 
928 	return ctrl;
929 
930 abort_ctrl:
931 	kfree(ctrl);
932 abort:
933 	return NULL;
934 }
935 
pciehp_release_ctrl(struct controller * ctrl)936 void pciehp_release_ctrl(struct controller *ctrl)
937 {
938 	pcie_cleanup_slot(ctrl);
939 	kfree(ctrl);
940 }
941 
quirk_cmd_compl(struct pci_dev * pdev)942 static void quirk_cmd_compl(struct pci_dev *pdev)
943 {
944 	u32 slot_cap;
945 
946 	if (pci_is_pcie(pdev)) {
947 		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
948 		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
949 		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
950 			pdev->broken_cmd_compl = 1;
951 	}
952 }
953 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
954 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
955 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
956 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
957 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
958 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
959