1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35
36 DEFINE_MUTEX(pci_slot_mutex);
37
38 const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40 };
41 EXPORT_SYMBOL_GPL(pci_power_names);
42
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46 int pci_pci_problems;
47 EXPORT_SYMBOL(pci_pci_problems);
48
49 unsigned int pci_pm_d3hot_delay;
50
51 static void pci_pme_list_scan(struct work_struct *work);
52
53 static LIST_HEAD(pci_pme_list);
54 static DEFINE_MUTEX(pci_pme_list_mutex);
55 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57 struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60 };
61
62 #define PME_TIMEOUT 1000 /* How long between PME checks */
63
pci_dev_d3_sleep(struct pci_dev * dev)64 static void pci_dev_d3_sleep(struct pci_dev *dev)
65 {
66 unsigned int delay = dev->d3hot_delay;
67
68 if (delay < pci_pm_d3hot_delay)
69 delay = pci_pm_d3hot_delay;
70
71 if (delay)
72 msleep(delay);
73 }
74
75 #ifdef CONFIG_PCI_DOMAINS
76 int pci_domains_supported = 1;
77 #endif
78
79 #define DEFAULT_CARDBUS_IO_SIZE (256)
80 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
82 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85 #define DEFAULT_HOTPLUG_IO_SIZE (256)
86 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
87 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
88 /* hpiosize=nn can override this */
89 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
90 /*
91 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
92 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
93 * pci=hpmemsize=nnM overrides both
94 */
95 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97
98 #define DEFAULT_HOTPLUG_BUS_SIZE 1
99 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100
101
102 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
103 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
104 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105 #elif defined CONFIG_PCIE_BUS_SAFE
106 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
108 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109 #elif defined CONFIG_PCIE_BUS_PEER2PEER
110 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111 #else
112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113 #endif
114
115 /*
116 * The default CLS is used if arch didn't set CLS explicitly and not
117 * all pci devices agree on the same value. Arch can override either
118 * the dfl or actual value as it sees fit. Don't forget this is
119 * measured in 32-bit words, not bytes.
120 */
121 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122 u8 pci_cache_line_size;
123
124 /*
125 * If we set up a device for bus mastering, we need to check the latency
126 * timer as certain BIOSes forget to set it properly.
127 */
128 unsigned int pcibios_max_latency = 255;
129
130 /* If set, the PCIe ARI capability will not be used. */
131 static bool pcie_ari_disabled;
132
133 /* If set, the PCIe ATS capability will not be used. */
134 static bool pcie_ats_disabled;
135
136 /* If set, the PCI config space of each device is printed during boot. */
137 bool pci_early_dump;
138
pci_ats_disabled(void)139 bool pci_ats_disabled(void)
140 {
141 return pcie_ats_disabled;
142 }
143 EXPORT_SYMBOL_GPL(pci_ats_disabled);
144
145 /* Disable bridge_d3 for all PCIe ports */
146 static bool pci_bridge_d3_disable;
147 /* Force bridge_d3 for all PCIe ports */
148 static bool pci_bridge_d3_force;
149
pcie_port_pm_setup(char * str)150 static int __init pcie_port_pm_setup(char *str)
151 {
152 if (!strcmp(str, "off"))
153 pci_bridge_d3_disable = true;
154 else if (!strcmp(str, "force"))
155 pci_bridge_d3_force = true;
156 return 1;
157 }
158 __setup("pcie_port_pm=", pcie_port_pm_setup);
159
160 /* Time to wait after a reset for device to become responsive */
161 #define PCIE_RESET_READY_POLL_MS 60000
162
163 /**
164 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
165 * @bus: pointer to PCI bus structure to search
166 *
167 * Given a PCI bus, returns the highest PCI bus number present in the set
168 * including the given PCI bus and its list of child PCI buses.
169 */
pci_bus_max_busnr(struct pci_bus * bus)170 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171 {
172 struct pci_bus *tmp;
173 unsigned char max, n;
174
175 max = bus->busn_res.end;
176 list_for_each_entry(tmp, &bus->children, node) {
177 n = pci_bus_max_busnr(tmp);
178 if (n > max)
179 max = n;
180 }
181 return max;
182 }
183 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184
185 /**
186 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
187 * @pdev: the PCI device
188 *
189 * Returns error bits set in PCI_STATUS and clears them.
190 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)191 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192 {
193 u16 status;
194 int ret;
195
196 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 if (ret != PCIBIOS_SUCCESSFUL)
198 return -EIO;
199
200 status &= PCI_STATUS_ERROR_BITS;
201 if (status)
202 pci_write_config_word(pdev, PCI_STATUS, status);
203
204 return status;
205 }
206 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207
208 #ifdef CONFIG_HAS_IOMEM
pci_ioremap_bar(struct pci_dev * pdev,int bar)209 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210 {
211 struct resource *res = &pdev->resource[bar];
212
213 /*
214 * Make sure the BAR is actually a memory resource, not an IO resource
215 */
216 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 return NULL;
219 }
220 return ioremap(res->start, resource_size(res));
221 }
222 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)224 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225 {
226 /*
227 * Make sure the BAR is actually a memory resource, not an IO resource
228 */
229 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 return ioremap_wc(pci_resource_start(pdev, bar),
234 pci_resource_len(pdev, bar));
235 }
236 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237 #endif
238
239 /**
240 * pci_dev_str_match_path - test if a path string matches a device
241 * @dev: the PCI device to test
242 * @path: string to match the device against
243 * @endptr: pointer to the string after the match
244 *
245 * Test if a string (typically from a kernel parameter) formatted as a
246 * path of device/function addresses matches a PCI device. The string must
247 * be of the form:
248 *
249 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
250 *
251 * A path for a device can be obtained using 'lspci -t'. Using a path
252 * is more robust against bus renumbering than using only a single bus,
253 * device and function address.
254 *
255 * Returns 1 if the string matches the device, 0 if it does not and
256 * a negative error code if it fails to parse the string.
257 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)258 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 const char **endptr)
260 {
261 int ret;
262 int seg, bus, slot, func;
263 char *wpath, *p;
264 char end;
265
266 *endptr = strchrnul(path, ';');
267
268 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
269 if (!wpath)
270 return -ENOMEM;
271
272 while (1) {
273 p = strrchr(wpath, '/');
274 if (!p)
275 break;
276 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 if (ret != 2) {
278 ret = -EINVAL;
279 goto free_and_exit;
280 }
281
282 if (dev->devfn != PCI_DEVFN(slot, func)) {
283 ret = 0;
284 goto free_and_exit;
285 }
286
287 /*
288 * Note: we don't need to get a reference to the upstream
289 * bridge because we hold a reference to the top level
290 * device which should hold a reference to the bridge,
291 * and so on.
292 */
293 dev = pci_upstream_bridge(dev);
294 if (!dev) {
295 ret = 0;
296 goto free_and_exit;
297 }
298
299 *p = 0;
300 }
301
302 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 &func, &end);
304 if (ret != 4) {
305 seg = 0;
306 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 if (ret != 3) {
308 ret = -EINVAL;
309 goto free_and_exit;
310 }
311 }
312
313 ret = (seg == pci_domain_nr(dev->bus) &&
314 bus == dev->bus->number &&
315 dev->devfn == PCI_DEVFN(slot, func));
316
317 free_and_exit:
318 kfree(wpath);
319 return ret;
320 }
321
322 /**
323 * pci_dev_str_match - test if a string matches a device
324 * @dev: the PCI device to test
325 * @p: string to match the device against
326 * @endptr: pointer to the string after the match
327 *
328 * Test if a string (typically from a kernel parameter) matches a specified
329 * PCI device. The string may be of one of the following formats:
330 *
331 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
332 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
333 *
334 * The first format specifies a PCI bus/device/function address which
335 * may change if new hardware is inserted, if motherboard firmware changes,
336 * or due to changes caused in kernel parameters. If the domain is
337 * left unspecified, it is taken to be 0. In order to be robust against
338 * bus renumbering issues, a path of PCI device/function numbers may be used
339 * to address the specific device. The path for a device can be determined
340 * through the use of 'lspci -t'.
341 *
342 * The second format matches devices using IDs in the configuration
343 * space which may match multiple devices in the system. A value of 0
344 * for any field will match all devices. (Note: this differs from
345 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
346 * legacy reasons and convenience so users don't have to specify
347 * FFFFFFFFs on the command line.)
348 *
349 * Returns 1 if the string matches the device, 0 if it does not and
350 * a negative error code if the string cannot be parsed.
351 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)352 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 const char **endptr)
354 {
355 int ret;
356 int count;
357 unsigned short vendor, device, subsystem_vendor, subsystem_device;
358
359 if (strncmp(p, "pci:", 4) == 0) {
360 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
361 p += 4;
362 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 &subsystem_vendor, &subsystem_device, &count);
364 if (ret != 4) {
365 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 if (ret != 2)
367 return -EINVAL;
368
369 subsystem_vendor = 0;
370 subsystem_device = 0;
371 }
372
373 p += count;
374
375 if ((!vendor || vendor == dev->vendor) &&
376 (!device || device == dev->device) &&
377 (!subsystem_vendor ||
378 subsystem_vendor == dev->subsystem_vendor) &&
379 (!subsystem_device ||
380 subsystem_device == dev->subsystem_device))
381 goto found;
382 } else {
383 /*
384 * PCI Bus, Device, Function IDs are specified
385 * (optionally, may include a path of devfns following it)
386 */
387 ret = pci_dev_str_match_path(dev, p, &p);
388 if (ret < 0)
389 return ret;
390 else if (ret)
391 goto found;
392 }
393
394 *endptr = p;
395 return 0;
396
397 found:
398 *endptr = p;
399 return 1;
400 }
401
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)402 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 u8 pos, int cap, int *ttl)
404 {
405 u8 id;
406 u16 ent;
407
408 pci_bus_read_config_byte(bus, devfn, pos, &pos);
409
410 while ((*ttl)--) {
411 if (pos < 0x40)
412 break;
413 pos &= ~3;
414 pci_bus_read_config_word(bus, devfn, pos, &ent);
415
416 id = ent & 0xff;
417 if (id == 0xff)
418 break;
419 if (id == cap)
420 return pos;
421 pos = (ent >> 8);
422 }
423 return 0;
424 }
425
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)426 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428 {
429 int ttl = PCI_FIND_CAP_TTL;
430
431 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432 }
433
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)434 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435 {
436 return __pci_find_next_cap(dev->bus, dev->devfn,
437 pos + PCI_CAP_LIST_NEXT, cap);
438 }
439 EXPORT_SYMBOL_GPL(pci_find_next_capability);
440
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)441 static int __pci_bus_find_cap_start(struct pci_bus *bus,
442 unsigned int devfn, u8 hdr_type)
443 {
444 u16 status;
445
446 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 if (!(status & PCI_STATUS_CAP_LIST))
448 return 0;
449
450 switch (hdr_type) {
451 case PCI_HEADER_TYPE_NORMAL:
452 case PCI_HEADER_TYPE_BRIDGE:
453 return PCI_CAPABILITY_LIST;
454 case PCI_HEADER_TYPE_CARDBUS:
455 return PCI_CB_CAPABILITY_LIST;
456 }
457
458 return 0;
459 }
460
461 /**
462 * pci_find_capability - query for devices' capabilities
463 * @dev: PCI device to query
464 * @cap: capability code
465 *
466 * Tell if a device supports a given PCI capability.
467 * Returns the address of the requested capability structure within the
468 * device's PCI configuration space or 0 in case the device does not
469 * support it. Possible values for @cap include:
470 *
471 * %PCI_CAP_ID_PM Power Management
472 * %PCI_CAP_ID_AGP Accelerated Graphics Port
473 * %PCI_CAP_ID_VPD Vital Product Data
474 * %PCI_CAP_ID_SLOTID Slot Identification
475 * %PCI_CAP_ID_MSI Message Signalled Interrupts
476 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
477 * %PCI_CAP_ID_PCIX PCI-X
478 * %PCI_CAP_ID_EXP PCI Express
479 */
pci_find_capability(struct pci_dev * dev,int cap)480 int pci_find_capability(struct pci_dev *dev, int cap)
481 {
482 int pos;
483
484 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 if (pos)
486 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487
488 return pos;
489 }
490 EXPORT_SYMBOL(pci_find_capability);
491
492 /**
493 * pci_bus_find_capability - query for devices' capabilities
494 * @bus: the PCI bus to query
495 * @devfn: PCI device to query
496 * @cap: capability code
497 *
498 * Like pci_find_capability() but works for PCI devices that do not have a
499 * pci_dev structure set up yet.
500 *
501 * Returns the address of the requested capability structure within the
502 * device's PCI configuration space or 0 in case the device does not
503 * support it.
504 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)505 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506 {
507 int pos;
508 u8 hdr_type;
509
510 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
511
512 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
513 if (pos)
514 pos = __pci_find_next_cap(bus, devfn, pos, cap);
515
516 return pos;
517 }
518 EXPORT_SYMBOL(pci_bus_find_capability);
519
520 /**
521 * pci_find_next_ext_capability - Find an extended capability
522 * @dev: PCI device to query
523 * @start: address at which to start looking (0 to start at beginning of list)
524 * @cap: capability code
525 *
526 * Returns the address of the next matching extended capability structure
527 * within the device's PCI configuration space or 0 if the device does
528 * not support it. Some capabilities can occur several times, e.g., the
529 * vendor-specific capability, and this provides a way to find them all.
530 */
pci_find_next_ext_capability(struct pci_dev * dev,int start,int cap)531 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
532 {
533 u32 header;
534 int ttl;
535 int pos = PCI_CFG_SPACE_SIZE;
536
537 /* minimum 8 bytes per capability */
538 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
539
540 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
541 return 0;
542
543 if (start)
544 pos = start;
545
546 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
547 return 0;
548
549 /*
550 * If we have no capabilities, this is indicated by cap ID,
551 * cap version and next pointer all being 0.
552 */
553 if (header == 0)
554 return 0;
555
556 while (ttl-- > 0) {
557 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
558 return pos;
559
560 pos = PCI_EXT_CAP_NEXT(header);
561 if (pos < PCI_CFG_SPACE_SIZE)
562 break;
563
564 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
565 break;
566 }
567
568 return 0;
569 }
570 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
571
572 /**
573 * pci_find_ext_capability - Find an extended capability
574 * @dev: PCI device to query
575 * @cap: capability code
576 *
577 * Returns the address of the requested extended capability structure
578 * within the device's PCI configuration space or 0 if the device does
579 * not support it. Possible values for @cap include:
580 *
581 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
582 * %PCI_EXT_CAP_ID_VC Virtual Channel
583 * %PCI_EXT_CAP_ID_DSN Device Serial Number
584 * %PCI_EXT_CAP_ID_PWR Power Budgeting
585 */
pci_find_ext_capability(struct pci_dev * dev,int cap)586 int pci_find_ext_capability(struct pci_dev *dev, int cap)
587 {
588 return pci_find_next_ext_capability(dev, 0, cap);
589 }
590 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
591
592 /**
593 * pci_get_dsn - Read and return the 8-byte Device Serial Number
594 * @dev: PCI device to query
595 *
596 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
597 * Number.
598 *
599 * Returns the DSN, or zero if the capability does not exist.
600 */
pci_get_dsn(struct pci_dev * dev)601 u64 pci_get_dsn(struct pci_dev *dev)
602 {
603 u32 dword;
604 u64 dsn;
605 int pos;
606
607 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
608 if (!pos)
609 return 0;
610
611 /*
612 * The Device Serial Number is two dwords offset 4 bytes from the
613 * capability position. The specification says that the first dword is
614 * the lower half, and the second dword is the upper half.
615 */
616 pos += 4;
617 pci_read_config_dword(dev, pos, &dword);
618 dsn = (u64)dword;
619 pci_read_config_dword(dev, pos + 4, &dword);
620 dsn |= ((u64)dword) << 32;
621
622 return dsn;
623 }
624 EXPORT_SYMBOL_GPL(pci_get_dsn);
625
__pci_find_next_ht_cap(struct pci_dev * dev,int pos,int ht_cap)626 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
627 {
628 int rc, ttl = PCI_FIND_CAP_TTL;
629 u8 cap, mask;
630
631 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
632 mask = HT_3BIT_CAP_MASK;
633 else
634 mask = HT_5BIT_CAP_MASK;
635
636 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
637 PCI_CAP_ID_HT, &ttl);
638 while (pos) {
639 rc = pci_read_config_byte(dev, pos + 3, &cap);
640 if (rc != PCIBIOS_SUCCESSFUL)
641 return 0;
642
643 if ((cap & mask) == ht_cap)
644 return pos;
645
646 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
647 pos + PCI_CAP_LIST_NEXT,
648 PCI_CAP_ID_HT, &ttl);
649 }
650
651 return 0;
652 }
653 /**
654 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
655 * @dev: PCI device to query
656 * @pos: Position from which to continue searching
657 * @ht_cap: Hypertransport capability code
658 *
659 * To be used in conjunction with pci_find_ht_capability() to search for
660 * all capabilities matching @ht_cap. @pos should always be a value returned
661 * from pci_find_ht_capability().
662 *
663 * NB. To be 100% safe against broken PCI devices, the caller should take
664 * steps to avoid an infinite loop.
665 */
pci_find_next_ht_capability(struct pci_dev * dev,int pos,int ht_cap)666 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
667 {
668 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669 }
670 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671
672 /**
673 * pci_find_ht_capability - query a device's Hypertransport capabilities
674 * @dev: PCI device to query
675 * @ht_cap: Hypertransport capability code
676 *
677 * Tell if a device supports a given Hypertransport capability.
678 * Returns an address within the device's PCI configuration space
679 * or 0 in case the device does not support the request capability.
680 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
681 * which has a Hypertransport capability matching @ht_cap.
682 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)683 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684 {
685 int pos;
686
687 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 if (pos)
689 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690
691 return pos;
692 }
693 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694
695 /**
696 * pci_find_parent_resource - return resource region of parent bus of given
697 * region
698 * @dev: PCI device structure contains resources to be searched
699 * @res: child resource record for which parent is sought
700 *
701 * For given resource region of given device, return the resource region of
702 * parent bus the given region is contained in.
703 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)704 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
705 struct resource *res)
706 {
707 const struct pci_bus *bus = dev->bus;
708 struct resource *r;
709 int i;
710
711 pci_bus_for_each_resource(bus, r, i) {
712 if (!r)
713 continue;
714 if (resource_contains(r, res)) {
715
716 /*
717 * If the window is prefetchable but the BAR is
718 * not, the allocator made a mistake.
719 */
720 if (r->flags & IORESOURCE_PREFETCH &&
721 !(res->flags & IORESOURCE_PREFETCH))
722 return NULL;
723
724 /*
725 * If we're below a transparent bridge, there may
726 * be both a positively-decoded aperture and a
727 * subtractively-decoded region that contain the BAR.
728 * We want the positively-decoded one, so this depends
729 * on pci_bus_for_each_resource() giving us those
730 * first.
731 */
732 return r;
733 }
734 }
735 return NULL;
736 }
737 EXPORT_SYMBOL(pci_find_parent_resource);
738
739 /**
740 * pci_find_resource - Return matching PCI device resource
741 * @dev: PCI device to query
742 * @res: Resource to look for
743 *
744 * Goes over standard PCI resources (BARs) and checks if the given resource
745 * is partially or fully contained in any of them. In that case the
746 * matching resource is returned, %NULL otherwise.
747 */
pci_find_resource(struct pci_dev * dev,struct resource * res)748 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
749 {
750 int i;
751
752 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
753 struct resource *r = &dev->resource[i];
754
755 if (r->start && resource_contains(r, res))
756 return r;
757 }
758
759 return NULL;
760 }
761 EXPORT_SYMBOL(pci_find_resource);
762
763 /**
764 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
765 * @dev: the PCI device to operate on
766 * @pos: config space offset of status word
767 * @mask: mask of bit(s) to care about in status word
768 *
769 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
770 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)771 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
772 {
773 int i;
774
775 /* Wait for Transaction Pending bit clean */
776 for (i = 0; i < 4; i++) {
777 u16 status;
778 if (i)
779 msleep((1 << (i - 1)) * 100);
780
781 pci_read_config_word(dev, pos, &status);
782 if (!(status & mask))
783 return 1;
784 }
785
786 return 0;
787 }
788
789 static int pci_acs_enable;
790
791 /**
792 * pci_request_acs - ask for ACS to be enabled if supported
793 */
pci_request_acs(void)794 void pci_request_acs(void)
795 {
796 pci_acs_enable = 1;
797 }
798
799 static const char *disable_acs_redir_param;
800
801 /**
802 * pci_disable_acs_redir - disable ACS redirect capabilities
803 * @dev: the PCI device
804 *
805 * For only devices specified in the disable_acs_redir parameter.
806 */
pci_disable_acs_redir(struct pci_dev * dev)807 static void pci_disable_acs_redir(struct pci_dev *dev)
808 {
809 int ret = 0;
810 const char *p;
811 int pos;
812 u16 ctrl;
813
814 if (!disable_acs_redir_param)
815 return;
816
817 p = disable_acs_redir_param;
818 while (*p) {
819 ret = pci_dev_str_match(dev, p, &p);
820 if (ret < 0) {
821 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
822 disable_acs_redir_param);
823
824 break;
825 } else if (ret == 1) {
826 /* Found a match */
827 break;
828 }
829
830 if (*p != ';' && *p != ',') {
831 /* End of param or invalid format */
832 break;
833 }
834 p++;
835 }
836
837 if (ret != 1)
838 return;
839
840 if (!pci_dev_specific_disable_acs_redir(dev))
841 return;
842
843 pos = dev->acs_cap;
844 if (!pos) {
845 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
846 return;
847 }
848
849 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
850
851 /* P2P Request & Completion Redirect */
852 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
853
854 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
855
856 pci_info(dev, "disabled ACS redirect\n");
857 }
858
859 /**
860 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
861 * @dev: the PCI device
862 */
pci_std_enable_acs(struct pci_dev * dev)863 static void pci_std_enable_acs(struct pci_dev *dev)
864 {
865 int pos;
866 u16 cap;
867 u16 ctrl;
868
869 pos = dev->acs_cap;
870 if (!pos)
871 return;
872
873 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
874 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
875
876 /* Source Validation */
877 ctrl |= (cap & PCI_ACS_SV);
878
879 /* P2P Request Redirect */
880 ctrl |= (cap & PCI_ACS_RR);
881
882 /* P2P Completion Redirect */
883 ctrl |= (cap & PCI_ACS_CR);
884
885 /* Upstream Forwarding */
886 ctrl |= (cap & PCI_ACS_UF);
887
888 /* Enable Translation Blocking for external devices */
889 if (dev->external_facing || dev->untrusted)
890 ctrl |= (cap & PCI_ACS_TB);
891
892 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
893 }
894
895 /**
896 * pci_enable_acs - enable ACS if hardware support it
897 * @dev: the PCI device
898 */
pci_enable_acs(struct pci_dev * dev)899 static void pci_enable_acs(struct pci_dev *dev)
900 {
901 if (!pci_acs_enable)
902 goto disable_acs_redir;
903
904 if (!pci_dev_specific_enable_acs(dev))
905 goto disable_acs_redir;
906
907 pci_std_enable_acs(dev);
908
909 disable_acs_redir:
910 /*
911 * Note: pci_disable_acs_redir() must be called even if ACS was not
912 * enabled by the kernel because it may have been enabled by
913 * platform firmware. So if we are told to disable it, we should
914 * always disable it after setting the kernel's default
915 * preferences.
916 */
917 pci_disable_acs_redir(dev);
918 }
919
920 /**
921 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
922 * @dev: PCI device to have its BARs restored
923 *
924 * Restore the BAR values for a given device, so as to make it
925 * accessible by its driver.
926 */
pci_restore_bars(struct pci_dev * dev)927 static void pci_restore_bars(struct pci_dev *dev)
928 {
929 int i;
930
931 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
932 pci_update_resource(dev, i);
933 }
934
935 static const struct pci_platform_pm_ops *pci_platform_pm;
936
pci_set_platform_pm(const struct pci_platform_pm_ops * ops)937 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
938 {
939 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
940 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
941 return -EINVAL;
942 pci_platform_pm = ops;
943 return 0;
944 }
945
platform_pci_power_manageable(struct pci_dev * dev)946 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
947 {
948 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
949 }
950
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)951 static inline int platform_pci_set_power_state(struct pci_dev *dev,
952 pci_power_t t)
953 {
954 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
955 }
956
platform_pci_get_power_state(struct pci_dev * dev)957 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
958 {
959 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
960 }
961
platform_pci_refresh_power_state(struct pci_dev * dev)962 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
963 {
964 if (pci_platform_pm && pci_platform_pm->refresh_state)
965 pci_platform_pm->refresh_state(dev);
966 }
967
platform_pci_choose_state(struct pci_dev * dev)968 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
969 {
970 return pci_platform_pm ?
971 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
972 }
973
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)974 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
975 {
976 return pci_platform_pm ?
977 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
978 }
979
platform_pci_need_resume(struct pci_dev * dev)980 static inline bool platform_pci_need_resume(struct pci_dev *dev)
981 {
982 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
983 }
984
platform_pci_bridge_d3(struct pci_dev * dev)985 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
986 {
987 if (pci_platform_pm && pci_platform_pm->bridge_d3)
988 return pci_platform_pm->bridge_d3(dev);
989 return false;
990 }
991
992 /**
993 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
994 * given PCI device
995 * @dev: PCI device to handle.
996 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
997 *
998 * RETURN VALUE:
999 * -EINVAL if the requested state is invalid.
1000 * -EIO if device does not support PCI PM or its PM capabilities register has a
1001 * wrong version, or device doesn't support the requested state.
1002 * 0 if device already is in the requested state.
1003 * 0 if device's power state has been successfully changed.
1004 */
pci_raw_set_power_state(struct pci_dev * dev,pci_power_t state)1005 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1006 {
1007 u16 pmcsr;
1008 bool need_restore = false;
1009
1010 /* Check if we're already there */
1011 if (dev->current_state == state)
1012 return 0;
1013
1014 if (!dev->pm_cap)
1015 return -EIO;
1016
1017 if (state < PCI_D0 || state > PCI_D3hot)
1018 return -EINVAL;
1019
1020 /*
1021 * Validate transition: We can enter D0 from any state, but if
1022 * we're already in a low-power state, we can only go deeper. E.g.,
1023 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1024 * we'd have to go from D3 to D0, then to D1.
1025 */
1026 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1027 && dev->current_state > state) {
1028 pci_err(dev, "invalid power transition (from %s to %s)\n",
1029 pci_power_name(dev->current_state),
1030 pci_power_name(state));
1031 return -EINVAL;
1032 }
1033
1034 /* Check if this device supports the desired state */
1035 if ((state == PCI_D1 && !dev->d1_support)
1036 || (state == PCI_D2 && !dev->d2_support))
1037 return -EIO;
1038
1039 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1040 if (pmcsr == (u16) ~0) {
1041 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1042 pci_power_name(dev->current_state),
1043 pci_power_name(state));
1044 return -EIO;
1045 }
1046
1047 /*
1048 * If we're (effectively) in D3, force entire word to 0.
1049 * This doesn't affect PME_Status, disables PME_En, and
1050 * sets PowerState to 0.
1051 */
1052 switch (dev->current_state) {
1053 case PCI_D0:
1054 case PCI_D1:
1055 case PCI_D2:
1056 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1057 pmcsr |= state;
1058 break;
1059 case PCI_D3hot:
1060 case PCI_D3cold:
1061 case PCI_UNKNOWN: /* Boot-up */
1062 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1063 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1064 need_restore = true;
1065 fallthrough; /* force to D0 */
1066 default:
1067 pmcsr = 0;
1068 break;
1069 }
1070
1071 /* Enter specified state */
1072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
1074 /*
1075 * Mandatory power management transition delays; see PCI PM 1.1
1076 * 5.6.1 table 18
1077 */
1078 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1079 pci_dev_d3_sleep(dev);
1080 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1081 udelay(PCI_PM_D2_DELAY);
1082
1083 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1084 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1085 if (dev->current_state != state)
1086 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1087 pci_power_name(dev->current_state),
1088 pci_power_name(state));
1089
1090 /*
1091 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1092 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1093 * from D3hot to D0 _may_ perform an internal reset, thereby
1094 * going to "D0 Uninitialized" rather than "D0 Initialized".
1095 * For example, at least some versions of the 3c905B and the
1096 * 3c556B exhibit this behaviour.
1097 *
1098 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1099 * devices in a D3hot state at boot. Consequently, we need to
1100 * restore at least the BARs so that the device will be
1101 * accessible to its driver.
1102 */
1103 if (need_restore)
1104 pci_restore_bars(dev);
1105
1106 if (dev->bus->self)
1107 pcie_aspm_pm_state_change(dev->bus->self);
1108
1109 return 0;
1110 }
1111
1112 /**
1113 * pci_update_current_state - Read power state of given device and cache it
1114 * @dev: PCI device to handle.
1115 * @state: State to cache in case the device doesn't have the PM capability
1116 *
1117 * The power state is read from the PMCSR register, which however is
1118 * inaccessible in D3cold. The platform firmware is therefore queried first
1119 * to detect accessibility of the register. In case the platform firmware
1120 * reports an incorrect state or the device isn't power manageable by the
1121 * platform at all, we try to detect D3cold by testing accessibility of the
1122 * vendor ID in config space.
1123 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1124 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1125 {
1126 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1127 !pci_device_is_present(dev)) {
1128 dev->current_state = PCI_D3cold;
1129 } else if (dev->pm_cap) {
1130 u16 pmcsr;
1131
1132 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1133 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1134 } else {
1135 dev->current_state = state;
1136 }
1137 }
1138
1139 /**
1140 * pci_refresh_power_state - Refresh the given device's power state data
1141 * @dev: Target PCI device.
1142 *
1143 * Ask the platform to refresh the devices power state information and invoke
1144 * pci_update_current_state() to update its current PCI power state.
1145 */
pci_refresh_power_state(struct pci_dev * dev)1146 void pci_refresh_power_state(struct pci_dev *dev)
1147 {
1148 if (platform_pci_power_manageable(dev))
1149 platform_pci_refresh_power_state(dev);
1150
1151 pci_update_current_state(dev, dev->current_state);
1152 }
1153
1154 /**
1155 * pci_platform_power_transition - Use platform to change device power state
1156 * @dev: PCI device to handle.
1157 * @state: State to put the device into.
1158 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1159 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1160 {
1161 int error;
1162
1163 if (platform_pci_power_manageable(dev)) {
1164 error = platform_pci_set_power_state(dev, state);
1165 if (!error)
1166 pci_update_current_state(dev, state);
1167 } else
1168 error = -ENODEV;
1169
1170 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1171 dev->current_state = PCI_D0;
1172
1173 return error;
1174 }
1175 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1176
1177 /**
1178 * pci_wakeup - Wake up a PCI device
1179 * @pci_dev: Device to handle.
1180 * @ign: ignored parameter
1181 */
pci_wakeup(struct pci_dev * pci_dev,void * ign)1182 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1183 {
1184 pci_wakeup_event(pci_dev);
1185 pm_request_resume(&pci_dev->dev);
1186 return 0;
1187 }
1188
1189 /**
1190 * pci_wakeup_bus - Walk given bus and wake up devices on it
1191 * @bus: Top bus of the subtree to walk.
1192 */
pci_wakeup_bus(struct pci_bus * bus)1193 void pci_wakeup_bus(struct pci_bus *bus)
1194 {
1195 if (bus)
1196 pci_walk_bus(bus, pci_wakeup, NULL);
1197 }
1198
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1199 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1200 {
1201 int delay = 1;
1202 u32 id;
1203
1204 /*
1205 * After reset, the device should not silently discard config
1206 * requests, but it may still indicate that it needs more time by
1207 * responding to them with CRS completions. The Root Port will
1208 * generally synthesize ~0 data to complete the read (except when
1209 * CRS SV is enabled and the read was for the Vendor ID; in that
1210 * case it synthesizes 0x0001 data).
1211 *
1212 * Wait for the device to return a non-CRS completion. Read the
1213 * Command register instead of Vendor ID so we don't have to
1214 * contend with the CRS SV value.
1215 */
1216 pci_read_config_dword(dev, PCI_COMMAND, &id);
1217 while (id == ~0) {
1218 if (delay > timeout) {
1219 pci_warn(dev, "not ready %dms after %s; giving up\n",
1220 delay - 1, reset_type);
1221 return -ENOTTY;
1222 }
1223
1224 if (delay > 1000)
1225 pci_info(dev, "not ready %dms after %s; waiting\n",
1226 delay - 1, reset_type);
1227
1228 msleep(delay);
1229 delay *= 2;
1230 pci_read_config_dword(dev, PCI_COMMAND, &id);
1231 }
1232
1233 if (delay > 1000)
1234 pci_info(dev, "ready %dms after %s\n", delay - 1,
1235 reset_type);
1236
1237 return 0;
1238 }
1239
1240 /**
1241 * pci_power_up - Put the given device into D0
1242 * @dev: PCI device to power up
1243 */
pci_power_up(struct pci_dev * dev)1244 int pci_power_up(struct pci_dev *dev)
1245 {
1246 pci_platform_power_transition(dev, PCI_D0);
1247
1248 /*
1249 * Mandatory power management transition delays are handled in
1250 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1251 * corresponding bridge.
1252 */
1253 if (dev->runtime_d3cold) {
1254 /*
1255 * When powering on a bridge from D3cold, the whole hierarchy
1256 * may be powered on into D0uninitialized state, resume them to
1257 * give them a chance to suspend again
1258 */
1259 pci_wakeup_bus(dev->subordinate);
1260 }
1261
1262 return pci_raw_set_power_state(dev, PCI_D0);
1263 }
1264
1265 /**
1266 * __pci_dev_set_current_state - Set current state of a PCI device
1267 * @dev: Device to handle
1268 * @data: pointer to state to be set
1269 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1270 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1271 {
1272 pci_power_t state = *(pci_power_t *)data;
1273
1274 dev->current_state = state;
1275 return 0;
1276 }
1277
1278 /**
1279 * pci_bus_set_current_state - Walk given bus and set current state of devices
1280 * @bus: Top bus of the subtree to walk.
1281 * @state: state to be set
1282 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1283 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1284 {
1285 if (bus)
1286 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1287 }
1288
1289 /**
1290 * pci_set_power_state - Set the power state of a PCI device
1291 * @dev: PCI device to handle.
1292 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1293 *
1294 * Transition a device to a new power state, using the platform firmware and/or
1295 * the device's PCI PM registers.
1296 *
1297 * RETURN VALUE:
1298 * -EINVAL if the requested state is invalid.
1299 * -EIO if device does not support PCI PM or its PM capabilities register has a
1300 * wrong version, or device doesn't support the requested state.
1301 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1302 * 0 if device already is in the requested state.
1303 * 0 if the transition is to D3 but D3 is not supported.
1304 * 0 if device's power state has been successfully changed.
1305 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1306 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1307 {
1308 int error;
1309
1310 /* Bound the state we're entering */
1311 if (state > PCI_D3cold)
1312 state = PCI_D3cold;
1313 else if (state < PCI_D0)
1314 state = PCI_D0;
1315 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1316
1317 /*
1318 * If the device or the parent bridge do not support PCI
1319 * PM, ignore the request if we're doing anything other
1320 * than putting it into D0 (which would only happen on
1321 * boot).
1322 */
1323 return 0;
1324
1325 /* Check if we're already there */
1326 if (dev->current_state == state)
1327 return 0;
1328
1329 if (state == PCI_D0)
1330 return pci_power_up(dev);
1331
1332 /*
1333 * This device is quirked not to be put into D3, so don't put it in
1334 * D3
1335 */
1336 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1337 return 0;
1338
1339 /*
1340 * To put device in D3cold, we put device into D3hot in native
1341 * way, then put device into D3cold with platform ops
1342 */
1343 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1344 PCI_D3hot : state);
1345
1346 if (pci_platform_power_transition(dev, state))
1347 return error;
1348
1349 /* Powering off a bridge may power off the whole hierarchy */
1350 if (state == PCI_D3cold)
1351 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1352
1353 return 0;
1354 }
1355 EXPORT_SYMBOL(pci_set_power_state);
1356
1357 /**
1358 * pci_choose_state - Choose the power state of a PCI device
1359 * @dev: PCI device to be suspended
1360 * @state: target sleep state for the whole system. This is the value
1361 * that is passed to suspend() function.
1362 *
1363 * Returns PCI power state suitable for given device and given system
1364 * message.
1365 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)1366 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1367 {
1368 pci_power_t ret;
1369
1370 if (!dev->pm_cap)
1371 return PCI_D0;
1372
1373 ret = platform_pci_choose_state(dev);
1374 if (ret != PCI_POWER_ERROR)
1375 return ret;
1376
1377 switch (state.event) {
1378 case PM_EVENT_ON:
1379 return PCI_D0;
1380 case PM_EVENT_FREEZE:
1381 case PM_EVENT_PRETHAW:
1382 /* REVISIT both freeze and pre-thaw "should" use D0 */
1383 case PM_EVENT_SUSPEND:
1384 case PM_EVENT_HIBERNATE:
1385 return PCI_D3hot;
1386 default:
1387 pci_info(dev, "unrecognized suspend event %d\n",
1388 state.event);
1389 BUG();
1390 }
1391 return PCI_D0;
1392 }
1393 EXPORT_SYMBOL(pci_choose_state);
1394
1395 #define PCI_EXP_SAVE_REGS 7
1396
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1397 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1398 u16 cap, bool extended)
1399 {
1400 struct pci_cap_saved_state *tmp;
1401
1402 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1403 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1404 return tmp;
1405 }
1406 return NULL;
1407 }
1408
pci_find_saved_cap(struct pci_dev * dev,char cap)1409 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1410 {
1411 return _pci_find_saved_cap(dev, cap, false);
1412 }
1413
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1414 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1415 {
1416 return _pci_find_saved_cap(dev, cap, true);
1417 }
1418
pci_save_pcie_state(struct pci_dev * dev)1419 static int pci_save_pcie_state(struct pci_dev *dev)
1420 {
1421 int i = 0;
1422 struct pci_cap_saved_state *save_state;
1423 u16 *cap;
1424
1425 if (!pci_is_pcie(dev))
1426 return 0;
1427
1428 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1429 if (!save_state) {
1430 pci_err(dev, "buffer not found in %s\n", __func__);
1431 return -ENOMEM;
1432 }
1433
1434 cap = (u16 *)&save_state->cap.data[0];
1435 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1436 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1437 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1438 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1439 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1440 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1441 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1442
1443 return 0;
1444 }
1445
pci_restore_pcie_state(struct pci_dev * dev)1446 static void pci_restore_pcie_state(struct pci_dev *dev)
1447 {
1448 int i = 0;
1449 struct pci_cap_saved_state *save_state;
1450 u16 *cap;
1451
1452 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1453 if (!save_state)
1454 return;
1455
1456 cap = (u16 *)&save_state->cap.data[0];
1457 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1458 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1459 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1460 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1461 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1462 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1463 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1464 }
1465
pci_save_pcix_state(struct pci_dev * dev)1466 static int pci_save_pcix_state(struct pci_dev *dev)
1467 {
1468 int pos;
1469 struct pci_cap_saved_state *save_state;
1470
1471 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1472 if (!pos)
1473 return 0;
1474
1475 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1476 if (!save_state) {
1477 pci_err(dev, "buffer not found in %s\n", __func__);
1478 return -ENOMEM;
1479 }
1480
1481 pci_read_config_word(dev, pos + PCI_X_CMD,
1482 (u16 *)save_state->cap.data);
1483
1484 return 0;
1485 }
1486
pci_restore_pcix_state(struct pci_dev * dev)1487 static void pci_restore_pcix_state(struct pci_dev *dev)
1488 {
1489 int i = 0, pos;
1490 struct pci_cap_saved_state *save_state;
1491 u16 *cap;
1492
1493 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1494 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1495 if (!save_state || !pos)
1496 return;
1497 cap = (u16 *)&save_state->cap.data[0];
1498
1499 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1500 }
1501
pci_save_ltr_state(struct pci_dev * dev)1502 static void pci_save_ltr_state(struct pci_dev *dev)
1503 {
1504 int ltr;
1505 struct pci_cap_saved_state *save_state;
1506 u16 *cap;
1507
1508 if (!pci_is_pcie(dev))
1509 return;
1510
1511 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1512 if (!ltr)
1513 return;
1514
1515 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1516 if (!save_state) {
1517 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1518 return;
1519 }
1520
1521 cap = (u16 *)&save_state->cap.data[0];
1522 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1523 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1524 }
1525
pci_restore_ltr_state(struct pci_dev * dev)1526 static void pci_restore_ltr_state(struct pci_dev *dev)
1527 {
1528 struct pci_cap_saved_state *save_state;
1529 int ltr;
1530 u16 *cap;
1531
1532 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1533 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1534 if (!save_state || !ltr)
1535 return;
1536
1537 cap = (u16 *)&save_state->cap.data[0];
1538 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1539 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1540 }
1541
1542 /**
1543 * pci_save_state - save the PCI configuration space of a device before
1544 * suspending
1545 * @dev: PCI device that we're dealing with
1546 */
pci_save_state(struct pci_dev * dev)1547 int pci_save_state(struct pci_dev *dev)
1548 {
1549 int i;
1550 /* XXX: 100% dword access ok here? */
1551 for (i = 0; i < 16; i++) {
1552 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1553 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1554 i * 4, dev->saved_config_space[i]);
1555 }
1556 dev->state_saved = true;
1557
1558 i = pci_save_pcie_state(dev);
1559 if (i != 0)
1560 return i;
1561
1562 i = pci_save_pcix_state(dev);
1563 if (i != 0)
1564 return i;
1565
1566 pci_save_ltr_state(dev);
1567 pci_save_dpc_state(dev);
1568 pci_save_aer_state(dev);
1569 return pci_save_vc_state(dev);
1570 }
1571 EXPORT_SYMBOL(pci_save_state);
1572
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1573 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1574 u32 saved_val, int retry, bool force)
1575 {
1576 u32 val;
1577
1578 pci_read_config_dword(pdev, offset, &val);
1579 if (!force && val == saved_val)
1580 return;
1581
1582 for (;;) {
1583 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1584 offset, val, saved_val);
1585 pci_write_config_dword(pdev, offset, saved_val);
1586 if (retry-- <= 0)
1587 return;
1588
1589 pci_read_config_dword(pdev, offset, &val);
1590 if (val == saved_val)
1591 return;
1592
1593 mdelay(1);
1594 }
1595 }
1596
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1597 static void pci_restore_config_space_range(struct pci_dev *pdev,
1598 int start, int end, int retry,
1599 bool force)
1600 {
1601 int index;
1602
1603 for (index = end; index >= start; index--)
1604 pci_restore_config_dword(pdev, 4 * index,
1605 pdev->saved_config_space[index],
1606 retry, force);
1607 }
1608
pci_restore_config_space(struct pci_dev * pdev)1609 static void pci_restore_config_space(struct pci_dev *pdev)
1610 {
1611 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1612 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1613 /* Restore BARs before the command register. */
1614 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1615 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1616 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1617 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1618
1619 /*
1620 * Force rewriting of prefetch registers to avoid S3 resume
1621 * issues on Intel PCI bridges that occur when these
1622 * registers are not explicitly written.
1623 */
1624 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1625 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1626 } else {
1627 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1628 }
1629 }
1630
pci_restore_rebar_state(struct pci_dev * pdev)1631 static void pci_restore_rebar_state(struct pci_dev *pdev)
1632 {
1633 unsigned int pos, nbars, i;
1634 u32 ctrl;
1635
1636 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1637 if (!pos)
1638 return;
1639
1640 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1641 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1642 PCI_REBAR_CTRL_NBAR_SHIFT;
1643
1644 for (i = 0; i < nbars; i++, pos += 8) {
1645 struct resource *res;
1646 int bar_idx, size;
1647
1648 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1649 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1650 res = pdev->resource + bar_idx;
1651 size = ilog2(resource_size(res)) - 20;
1652 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1653 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1654 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1655 }
1656 }
1657
1658 /**
1659 * pci_restore_state - Restore the saved state of a PCI device
1660 * @dev: PCI device that we're dealing with
1661 */
pci_restore_state(struct pci_dev * dev)1662 void pci_restore_state(struct pci_dev *dev)
1663 {
1664 if (!dev->state_saved)
1665 return;
1666
1667 /*
1668 * Restore max latencies (in the LTR capability) before enabling
1669 * LTR itself (in the PCIe capability).
1670 */
1671 pci_restore_ltr_state(dev);
1672
1673 pci_restore_pcie_state(dev);
1674 pci_restore_pasid_state(dev);
1675 pci_restore_pri_state(dev);
1676 pci_restore_ats_state(dev);
1677 pci_restore_vc_state(dev);
1678 pci_restore_rebar_state(dev);
1679 pci_restore_dpc_state(dev);
1680
1681 pci_aer_clear_status(dev);
1682 pci_restore_aer_state(dev);
1683
1684 pci_restore_config_space(dev);
1685
1686 pci_restore_pcix_state(dev);
1687 pci_restore_msi_state(dev);
1688
1689 /* Restore ACS and IOV configuration state */
1690 pci_enable_acs(dev);
1691 pci_restore_iov_state(dev);
1692
1693 dev->state_saved = false;
1694 }
1695 EXPORT_SYMBOL(pci_restore_state);
1696
1697 struct pci_saved_state {
1698 u32 config_space[16];
1699 struct pci_cap_saved_data cap[];
1700 };
1701
1702 /**
1703 * pci_store_saved_state - Allocate and return an opaque struct containing
1704 * the device saved state.
1705 * @dev: PCI device that we're dealing with
1706 *
1707 * Return NULL if no state or error.
1708 */
pci_store_saved_state(struct pci_dev * dev)1709 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1710 {
1711 struct pci_saved_state *state;
1712 struct pci_cap_saved_state *tmp;
1713 struct pci_cap_saved_data *cap;
1714 size_t size;
1715
1716 if (!dev->state_saved)
1717 return NULL;
1718
1719 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1720
1721 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1722 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1723
1724 state = kzalloc(size, GFP_KERNEL);
1725 if (!state)
1726 return NULL;
1727
1728 memcpy(state->config_space, dev->saved_config_space,
1729 sizeof(state->config_space));
1730
1731 cap = state->cap;
1732 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1733 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1734 memcpy(cap, &tmp->cap, len);
1735 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1736 }
1737 /* Empty cap_save terminates list */
1738
1739 return state;
1740 }
1741 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1742
1743 /**
1744 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1745 * @dev: PCI device that we're dealing with
1746 * @state: Saved state returned from pci_store_saved_state()
1747 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1748 int pci_load_saved_state(struct pci_dev *dev,
1749 struct pci_saved_state *state)
1750 {
1751 struct pci_cap_saved_data *cap;
1752
1753 dev->state_saved = false;
1754
1755 if (!state)
1756 return 0;
1757
1758 memcpy(dev->saved_config_space, state->config_space,
1759 sizeof(state->config_space));
1760
1761 cap = state->cap;
1762 while (cap->size) {
1763 struct pci_cap_saved_state *tmp;
1764
1765 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1766 if (!tmp || tmp->cap.size != cap->size)
1767 return -EINVAL;
1768
1769 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1770 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1771 sizeof(struct pci_cap_saved_data) + cap->size);
1772 }
1773
1774 dev->state_saved = true;
1775 return 0;
1776 }
1777 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1778
1779 /**
1780 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1781 * and free the memory allocated for it.
1782 * @dev: PCI device that we're dealing with
1783 * @state: Pointer to saved state returned from pci_store_saved_state()
1784 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1785 int pci_load_and_free_saved_state(struct pci_dev *dev,
1786 struct pci_saved_state **state)
1787 {
1788 int ret = pci_load_saved_state(dev, *state);
1789 kfree(*state);
1790 *state = NULL;
1791 return ret;
1792 }
1793 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1794
pcibios_enable_device(struct pci_dev * dev,int bars)1795 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1796 {
1797 return pci_enable_resources(dev, bars);
1798 }
1799
do_pci_enable_device(struct pci_dev * dev,int bars)1800 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1801 {
1802 int err;
1803 struct pci_dev *bridge;
1804 u16 cmd;
1805 u8 pin;
1806
1807 err = pci_set_power_state(dev, PCI_D0);
1808 if (err < 0 && err != -EIO)
1809 return err;
1810
1811 bridge = pci_upstream_bridge(dev);
1812 if (bridge)
1813 pcie_aspm_powersave_config_link(bridge);
1814
1815 err = pcibios_enable_device(dev, bars);
1816 if (err < 0)
1817 return err;
1818 pci_fixup_device(pci_fixup_enable, dev);
1819
1820 if (dev->msi_enabled || dev->msix_enabled)
1821 return 0;
1822
1823 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1824 if (pin) {
1825 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1826 if (cmd & PCI_COMMAND_INTX_DISABLE)
1827 pci_write_config_word(dev, PCI_COMMAND,
1828 cmd & ~PCI_COMMAND_INTX_DISABLE);
1829 }
1830
1831 return 0;
1832 }
1833
1834 /**
1835 * pci_reenable_device - Resume abandoned device
1836 * @dev: PCI device to be resumed
1837 *
1838 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1839 * to be called by normal code, write proper resume handler and use it instead.
1840 */
pci_reenable_device(struct pci_dev * dev)1841 int pci_reenable_device(struct pci_dev *dev)
1842 {
1843 if (pci_is_enabled(dev))
1844 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1845 return 0;
1846 }
1847 EXPORT_SYMBOL(pci_reenable_device);
1848
pci_enable_bridge(struct pci_dev * dev)1849 static void pci_enable_bridge(struct pci_dev *dev)
1850 {
1851 struct pci_dev *bridge;
1852 int retval;
1853
1854 bridge = pci_upstream_bridge(dev);
1855 if (bridge)
1856 pci_enable_bridge(bridge);
1857
1858 if (pci_is_enabled(dev)) {
1859 if (!dev->is_busmaster)
1860 pci_set_master(dev);
1861 return;
1862 }
1863
1864 retval = pci_enable_device(dev);
1865 if (retval)
1866 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1867 retval);
1868 pci_set_master(dev);
1869 }
1870
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1871 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1872 {
1873 struct pci_dev *bridge;
1874 int err;
1875 int i, bars = 0;
1876
1877 /*
1878 * Power state could be unknown at this point, either due to a fresh
1879 * boot or a device removal call. So get the current power state
1880 * so that things like MSI message writing will behave as expected
1881 * (e.g. if the device really is in D0 at enable time).
1882 */
1883 if (dev->pm_cap) {
1884 u16 pmcsr;
1885 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1886 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1887 }
1888
1889 if (atomic_inc_return(&dev->enable_cnt) > 1)
1890 return 0; /* already enabled */
1891
1892 bridge = pci_upstream_bridge(dev);
1893 if (bridge)
1894 pci_enable_bridge(bridge);
1895
1896 /* only skip sriov related */
1897 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1898 if (dev->resource[i].flags & flags)
1899 bars |= (1 << i);
1900 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1901 if (dev->resource[i].flags & flags)
1902 bars |= (1 << i);
1903
1904 err = do_pci_enable_device(dev, bars);
1905 if (err < 0)
1906 atomic_dec(&dev->enable_cnt);
1907 return err;
1908 }
1909
1910 /**
1911 * pci_enable_device_io - Initialize a device for use with IO space
1912 * @dev: PCI device to be initialized
1913 *
1914 * Initialize device before it's used by a driver. Ask low-level code
1915 * to enable I/O resources. Wake up the device if it was suspended.
1916 * Beware, this function can fail.
1917 */
pci_enable_device_io(struct pci_dev * dev)1918 int pci_enable_device_io(struct pci_dev *dev)
1919 {
1920 return pci_enable_device_flags(dev, IORESOURCE_IO);
1921 }
1922 EXPORT_SYMBOL(pci_enable_device_io);
1923
1924 /**
1925 * pci_enable_device_mem - Initialize a device for use with Memory space
1926 * @dev: PCI device to be initialized
1927 *
1928 * Initialize device before it's used by a driver. Ask low-level code
1929 * to enable Memory resources. Wake up the device if it was suspended.
1930 * Beware, this function can fail.
1931 */
pci_enable_device_mem(struct pci_dev * dev)1932 int pci_enable_device_mem(struct pci_dev *dev)
1933 {
1934 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1935 }
1936 EXPORT_SYMBOL(pci_enable_device_mem);
1937
1938 /**
1939 * pci_enable_device - Initialize device before it's used by a driver.
1940 * @dev: PCI device to be initialized
1941 *
1942 * Initialize device before it's used by a driver. Ask low-level code
1943 * to enable I/O and memory. Wake up the device if it was suspended.
1944 * Beware, this function can fail.
1945 *
1946 * Note we don't actually enable the device many times if we call
1947 * this function repeatedly (we just increment the count).
1948 */
pci_enable_device(struct pci_dev * dev)1949 int pci_enable_device(struct pci_dev *dev)
1950 {
1951 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1952 }
1953 EXPORT_SYMBOL(pci_enable_device);
1954
1955 /*
1956 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
1957 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
1958 * there's no need to track it separately. pci_devres is initialized
1959 * when a device is enabled using managed PCI device enable interface.
1960 */
1961 struct pci_devres {
1962 unsigned int enabled:1;
1963 unsigned int pinned:1;
1964 unsigned int orig_intx:1;
1965 unsigned int restore_intx:1;
1966 unsigned int mwi:1;
1967 u32 region_mask;
1968 };
1969
pcim_release(struct device * gendev,void * res)1970 static void pcim_release(struct device *gendev, void *res)
1971 {
1972 struct pci_dev *dev = to_pci_dev(gendev);
1973 struct pci_devres *this = res;
1974 int i;
1975
1976 if (dev->msi_enabled)
1977 pci_disable_msi(dev);
1978 if (dev->msix_enabled)
1979 pci_disable_msix(dev);
1980
1981 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1982 if (this->region_mask & (1 << i))
1983 pci_release_region(dev, i);
1984
1985 if (this->mwi)
1986 pci_clear_mwi(dev);
1987
1988 if (this->restore_intx)
1989 pci_intx(dev, this->orig_intx);
1990
1991 if (this->enabled && !this->pinned)
1992 pci_disable_device(dev);
1993 }
1994
get_pci_dr(struct pci_dev * pdev)1995 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1996 {
1997 struct pci_devres *dr, *new_dr;
1998
1999 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2000 if (dr)
2001 return dr;
2002
2003 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2004 if (!new_dr)
2005 return NULL;
2006 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2007 }
2008
find_pci_dr(struct pci_dev * pdev)2009 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2010 {
2011 if (pci_is_managed(pdev))
2012 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2013 return NULL;
2014 }
2015
2016 /**
2017 * pcim_enable_device - Managed pci_enable_device()
2018 * @pdev: PCI device to be initialized
2019 *
2020 * Managed pci_enable_device().
2021 */
pcim_enable_device(struct pci_dev * pdev)2022 int pcim_enable_device(struct pci_dev *pdev)
2023 {
2024 struct pci_devres *dr;
2025 int rc;
2026
2027 dr = get_pci_dr(pdev);
2028 if (unlikely(!dr))
2029 return -ENOMEM;
2030 if (dr->enabled)
2031 return 0;
2032
2033 rc = pci_enable_device(pdev);
2034 if (!rc) {
2035 pdev->is_managed = 1;
2036 dr->enabled = 1;
2037 }
2038 return rc;
2039 }
2040 EXPORT_SYMBOL(pcim_enable_device);
2041
2042 /**
2043 * pcim_pin_device - Pin managed PCI device
2044 * @pdev: PCI device to pin
2045 *
2046 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2047 * driver detach. @pdev must have been enabled with
2048 * pcim_enable_device().
2049 */
pcim_pin_device(struct pci_dev * pdev)2050 void pcim_pin_device(struct pci_dev *pdev)
2051 {
2052 struct pci_devres *dr;
2053
2054 dr = find_pci_dr(pdev);
2055 WARN_ON(!dr || !dr->enabled);
2056 if (dr)
2057 dr->pinned = 1;
2058 }
2059 EXPORT_SYMBOL(pcim_pin_device);
2060
2061 /*
2062 * pcibios_add_device - provide arch specific hooks when adding device dev
2063 * @dev: the PCI device being added
2064 *
2065 * Permits the platform to provide architecture specific functionality when
2066 * devices are added. This is the default implementation. Architecture
2067 * implementations can override this.
2068 */
pcibios_add_device(struct pci_dev * dev)2069 int __weak pcibios_add_device(struct pci_dev *dev)
2070 {
2071 return 0;
2072 }
2073
2074 /**
2075 * pcibios_release_device - provide arch specific hooks when releasing
2076 * device dev
2077 * @dev: the PCI device being released
2078 *
2079 * Permits the platform to provide architecture specific functionality when
2080 * devices are released. This is the default implementation. Architecture
2081 * implementations can override this.
2082 */
pcibios_release_device(struct pci_dev * dev)2083 void __weak pcibios_release_device(struct pci_dev *dev) {}
2084
2085 /**
2086 * pcibios_disable_device - disable arch specific PCI resources for device dev
2087 * @dev: the PCI device to disable
2088 *
2089 * Disables architecture specific PCI resources for the device. This
2090 * is the default implementation. Architecture implementations can
2091 * override this.
2092 */
pcibios_disable_device(struct pci_dev * dev)2093 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2094
2095 /**
2096 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2097 * @irq: ISA IRQ to penalize
2098 * @active: IRQ active or not
2099 *
2100 * Permits the platform to provide architecture-specific functionality when
2101 * penalizing ISA IRQs. This is the default implementation. Architecture
2102 * implementations can override this.
2103 */
pcibios_penalize_isa_irq(int irq,int active)2104 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2105
do_pci_disable_device(struct pci_dev * dev)2106 static void do_pci_disable_device(struct pci_dev *dev)
2107 {
2108 u16 pci_command;
2109
2110 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2111 if (pci_command & PCI_COMMAND_MASTER) {
2112 pci_command &= ~PCI_COMMAND_MASTER;
2113 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2114 }
2115
2116 pcibios_disable_device(dev);
2117 }
2118
2119 /**
2120 * pci_disable_enabled_device - Disable device without updating enable_cnt
2121 * @dev: PCI device to disable
2122 *
2123 * NOTE: This function is a backend of PCI power management routines and is
2124 * not supposed to be called drivers.
2125 */
pci_disable_enabled_device(struct pci_dev * dev)2126 void pci_disable_enabled_device(struct pci_dev *dev)
2127 {
2128 if (pci_is_enabled(dev))
2129 do_pci_disable_device(dev);
2130 }
2131
2132 /**
2133 * pci_disable_device - Disable PCI device after use
2134 * @dev: PCI device to be disabled
2135 *
2136 * Signal to the system that the PCI device is not in use by the system
2137 * anymore. This only involves disabling PCI bus-mastering, if active.
2138 *
2139 * Note we don't actually disable the device until all callers of
2140 * pci_enable_device() have called pci_disable_device().
2141 */
pci_disable_device(struct pci_dev * dev)2142 void pci_disable_device(struct pci_dev *dev)
2143 {
2144 struct pci_devres *dr;
2145
2146 dr = find_pci_dr(dev);
2147 if (dr)
2148 dr->enabled = 0;
2149
2150 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2151 "disabling already-disabled device");
2152
2153 if (atomic_dec_return(&dev->enable_cnt) != 0)
2154 return;
2155
2156 do_pci_disable_device(dev);
2157
2158 dev->is_busmaster = 0;
2159 }
2160 EXPORT_SYMBOL(pci_disable_device);
2161
2162 /**
2163 * pcibios_set_pcie_reset_state - set reset state for device dev
2164 * @dev: the PCIe device reset
2165 * @state: Reset state to enter into
2166 *
2167 * Set the PCIe reset state for the device. This is the default
2168 * implementation. Architecture implementations can override this.
2169 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2170 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2171 enum pcie_reset_state state)
2172 {
2173 return -EINVAL;
2174 }
2175
2176 /**
2177 * pci_set_pcie_reset_state - set reset state for device dev
2178 * @dev: the PCIe device reset
2179 * @state: Reset state to enter into
2180 *
2181 * Sets the PCI reset state for the device.
2182 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2183 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2184 {
2185 return pcibios_set_pcie_reset_state(dev, state);
2186 }
2187 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2188
pcie_clear_device_status(struct pci_dev * dev)2189 void pcie_clear_device_status(struct pci_dev *dev)
2190 {
2191 u16 sta;
2192
2193 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2194 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2195 }
2196
2197 /**
2198 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2199 * @dev: PCIe root port or event collector.
2200 */
pcie_clear_root_pme_status(struct pci_dev * dev)2201 void pcie_clear_root_pme_status(struct pci_dev *dev)
2202 {
2203 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2204 }
2205
2206 /**
2207 * pci_check_pme_status - Check if given device has generated PME.
2208 * @dev: Device to check.
2209 *
2210 * Check the PME status of the device and if set, clear it and clear PME enable
2211 * (if set). Return 'true' if PME status and PME enable were both set or
2212 * 'false' otherwise.
2213 */
pci_check_pme_status(struct pci_dev * dev)2214 bool pci_check_pme_status(struct pci_dev *dev)
2215 {
2216 int pmcsr_pos;
2217 u16 pmcsr;
2218 bool ret = false;
2219
2220 if (!dev->pm_cap)
2221 return false;
2222
2223 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2224 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2225 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2226 return false;
2227
2228 /* Clear PME status. */
2229 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2230 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2231 /* Disable PME to avoid interrupt flood. */
2232 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2233 ret = true;
2234 }
2235
2236 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2237
2238 return ret;
2239 }
2240
2241 /**
2242 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2243 * @dev: Device to handle.
2244 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2245 *
2246 * Check if @dev has generated PME and queue a resume request for it in that
2247 * case.
2248 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2249 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2250 {
2251 if (pme_poll_reset && dev->pme_poll)
2252 dev->pme_poll = false;
2253
2254 if (pci_check_pme_status(dev)) {
2255 pci_wakeup_event(dev);
2256 pm_request_resume(&dev->dev);
2257 }
2258 return 0;
2259 }
2260
2261 /**
2262 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2263 * @bus: Top bus of the subtree to walk.
2264 */
pci_pme_wakeup_bus(struct pci_bus * bus)2265 void pci_pme_wakeup_bus(struct pci_bus *bus)
2266 {
2267 if (bus)
2268 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2269 }
2270
2271
2272 /**
2273 * pci_pme_capable - check the capability of PCI device to generate PME#
2274 * @dev: PCI device to handle.
2275 * @state: PCI state from which device will issue PME#.
2276 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2277 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2278 {
2279 if (!dev->pm_cap)
2280 return false;
2281
2282 return !!(dev->pme_support & (1 << state));
2283 }
2284 EXPORT_SYMBOL(pci_pme_capable);
2285
pci_pme_list_scan(struct work_struct * work)2286 static void pci_pme_list_scan(struct work_struct *work)
2287 {
2288 struct pci_pme_device *pme_dev, *n;
2289
2290 mutex_lock(&pci_pme_list_mutex);
2291 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2292 if (pme_dev->dev->pme_poll) {
2293 struct pci_dev *bridge;
2294
2295 bridge = pme_dev->dev->bus->self;
2296 /*
2297 * If bridge is in low power state, the
2298 * configuration space of subordinate devices
2299 * may be not accessible
2300 */
2301 if (bridge && bridge->current_state != PCI_D0)
2302 continue;
2303 /*
2304 * If the device is in D3cold it should not be
2305 * polled either.
2306 */
2307 if (pme_dev->dev->current_state == PCI_D3cold)
2308 continue;
2309
2310 pci_pme_wakeup(pme_dev->dev, NULL);
2311 } else {
2312 list_del(&pme_dev->list);
2313 kfree(pme_dev);
2314 }
2315 }
2316 if (!list_empty(&pci_pme_list))
2317 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2318 msecs_to_jiffies(PME_TIMEOUT));
2319 mutex_unlock(&pci_pme_list_mutex);
2320 }
2321
__pci_pme_active(struct pci_dev * dev,bool enable)2322 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2323 {
2324 u16 pmcsr;
2325
2326 if (!dev->pme_support)
2327 return;
2328
2329 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2330 /* Clear PME_Status by writing 1 to it and enable PME# */
2331 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2332 if (!enable)
2333 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2334
2335 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2336 }
2337
2338 /**
2339 * pci_pme_restore - Restore PME configuration after config space restore.
2340 * @dev: PCI device to update.
2341 */
pci_pme_restore(struct pci_dev * dev)2342 void pci_pme_restore(struct pci_dev *dev)
2343 {
2344 u16 pmcsr;
2345
2346 if (!dev->pme_support)
2347 return;
2348
2349 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2350 if (dev->wakeup_prepared) {
2351 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2352 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2353 } else {
2354 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2355 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2356 }
2357 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2358 }
2359
2360 /**
2361 * pci_pme_active - enable or disable PCI device's PME# function
2362 * @dev: PCI device to handle.
2363 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2364 *
2365 * The caller must verify that the device is capable of generating PME# before
2366 * calling this function with @enable equal to 'true'.
2367 */
pci_pme_active(struct pci_dev * dev,bool enable)2368 void pci_pme_active(struct pci_dev *dev, bool enable)
2369 {
2370 __pci_pme_active(dev, enable);
2371
2372 /*
2373 * PCI (as opposed to PCIe) PME requires that the device have
2374 * its PME# line hooked up correctly. Not all hardware vendors
2375 * do this, so the PME never gets delivered and the device
2376 * remains asleep. The easiest way around this is to
2377 * periodically walk the list of suspended devices and check
2378 * whether any have their PME flag set. The assumption is that
2379 * we'll wake up often enough anyway that this won't be a huge
2380 * hit, and the power savings from the devices will still be a
2381 * win.
2382 *
2383 * Although PCIe uses in-band PME message instead of PME# line
2384 * to report PME, PME does not work for some PCIe devices in
2385 * reality. For example, there are devices that set their PME
2386 * status bits, but don't really bother to send a PME message;
2387 * there are PCI Express Root Ports that don't bother to
2388 * trigger interrupts when they receive PME messages from the
2389 * devices below. So PME poll is used for PCIe devices too.
2390 */
2391
2392 if (dev->pme_poll) {
2393 struct pci_pme_device *pme_dev;
2394 if (enable) {
2395 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2396 GFP_KERNEL);
2397 if (!pme_dev) {
2398 pci_warn(dev, "can't enable PME#\n");
2399 return;
2400 }
2401 pme_dev->dev = dev;
2402 mutex_lock(&pci_pme_list_mutex);
2403 list_add(&pme_dev->list, &pci_pme_list);
2404 if (list_is_singular(&pci_pme_list))
2405 queue_delayed_work(system_freezable_wq,
2406 &pci_pme_work,
2407 msecs_to_jiffies(PME_TIMEOUT));
2408 mutex_unlock(&pci_pme_list_mutex);
2409 } else {
2410 mutex_lock(&pci_pme_list_mutex);
2411 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2412 if (pme_dev->dev == dev) {
2413 list_del(&pme_dev->list);
2414 kfree(pme_dev);
2415 break;
2416 }
2417 }
2418 mutex_unlock(&pci_pme_list_mutex);
2419 }
2420 }
2421
2422 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2423 }
2424 EXPORT_SYMBOL(pci_pme_active);
2425
2426 /**
2427 * __pci_enable_wake - enable PCI device as wakeup event source
2428 * @dev: PCI device affected
2429 * @state: PCI state from which device will issue wakeup events
2430 * @enable: True to enable event generation; false to disable
2431 *
2432 * This enables the device as a wakeup event source, or disables it.
2433 * When such events involves platform-specific hooks, those hooks are
2434 * called automatically by this routine.
2435 *
2436 * Devices with legacy power management (no standard PCI PM capabilities)
2437 * always require such platform hooks.
2438 *
2439 * RETURN VALUE:
2440 * 0 is returned on success
2441 * -EINVAL is returned if device is not supposed to wake up the system
2442 * Error code depending on the platform is returned if both the platform and
2443 * the native mechanism fail to enable the generation of wake-up events
2444 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2445 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2446 {
2447 int ret = 0;
2448
2449 /*
2450 * Bridges that are not power-manageable directly only signal
2451 * wakeup on behalf of subordinate devices which is set up
2452 * elsewhere, so skip them. However, bridges that are
2453 * power-manageable may signal wakeup for themselves (for example,
2454 * on a hotplug event) and they need to be covered here.
2455 */
2456 if (!pci_power_manageable(dev))
2457 return 0;
2458
2459 /* Don't do the same thing twice in a row for one device. */
2460 if (!!enable == !!dev->wakeup_prepared)
2461 return 0;
2462
2463 /*
2464 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2465 * Anderson we should be doing PME# wake enable followed by ACPI wake
2466 * enable. To disable wake-up we call the platform first, for symmetry.
2467 */
2468
2469 if (enable) {
2470 int error;
2471
2472 if (pci_pme_capable(dev, state))
2473 pci_pme_active(dev, true);
2474 else
2475 ret = 1;
2476 error = platform_pci_set_wakeup(dev, true);
2477 if (ret)
2478 ret = error;
2479 if (!ret)
2480 dev->wakeup_prepared = true;
2481 } else {
2482 platform_pci_set_wakeup(dev, false);
2483 pci_pme_active(dev, false);
2484 dev->wakeup_prepared = false;
2485 }
2486
2487 return ret;
2488 }
2489
2490 /**
2491 * pci_enable_wake - change wakeup settings for a PCI device
2492 * @pci_dev: Target device
2493 * @state: PCI state from which device will issue wakeup events
2494 * @enable: Whether or not to enable event generation
2495 *
2496 * If @enable is set, check device_may_wakeup() for the device before calling
2497 * __pci_enable_wake() for it.
2498 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2499 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2500 {
2501 if (enable && !device_may_wakeup(&pci_dev->dev))
2502 return -EINVAL;
2503
2504 return __pci_enable_wake(pci_dev, state, enable);
2505 }
2506 EXPORT_SYMBOL(pci_enable_wake);
2507
2508 /**
2509 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2510 * @dev: PCI device to prepare
2511 * @enable: True to enable wake-up event generation; false to disable
2512 *
2513 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2514 * and this function allows them to set that up cleanly - pci_enable_wake()
2515 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2516 * ordering constraints.
2517 *
2518 * This function only returns error code if the device is not allowed to wake
2519 * up the system from sleep or it is not capable of generating PME# from both
2520 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2521 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2522 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2523 {
2524 return pci_pme_capable(dev, PCI_D3cold) ?
2525 pci_enable_wake(dev, PCI_D3cold, enable) :
2526 pci_enable_wake(dev, PCI_D3hot, enable);
2527 }
2528 EXPORT_SYMBOL(pci_wake_from_d3);
2529
2530 /**
2531 * pci_target_state - find an appropriate low power state for a given PCI dev
2532 * @dev: PCI device
2533 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2534 *
2535 * Use underlying platform code to find a supported low power state for @dev.
2536 * If the platform can't manage @dev, return the deepest state from which it
2537 * can generate wake events, based on any available PME info.
2538 */
pci_target_state(struct pci_dev * dev,bool wakeup)2539 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2540 {
2541 pci_power_t target_state = PCI_D3hot;
2542
2543 if (platform_pci_power_manageable(dev)) {
2544 /*
2545 * Call the platform to find the target state for the device.
2546 */
2547 pci_power_t state = platform_pci_choose_state(dev);
2548
2549 switch (state) {
2550 case PCI_POWER_ERROR:
2551 case PCI_UNKNOWN:
2552 break;
2553 case PCI_D1:
2554 case PCI_D2:
2555 if (pci_no_d1d2(dev))
2556 break;
2557 fallthrough;
2558 default:
2559 target_state = state;
2560 }
2561
2562 return target_state;
2563 }
2564
2565 if (!dev->pm_cap)
2566 target_state = PCI_D0;
2567
2568 /*
2569 * If the device is in D3cold even though it's not power-manageable by
2570 * the platform, it may have been powered down by non-standard means.
2571 * Best to let it slumber.
2572 */
2573 if (dev->current_state == PCI_D3cold)
2574 target_state = PCI_D3cold;
2575
2576 if (wakeup) {
2577 /*
2578 * Find the deepest state from which the device can generate
2579 * PME#.
2580 */
2581 if (dev->pme_support) {
2582 while (target_state
2583 && !(dev->pme_support & (1 << target_state)))
2584 target_state--;
2585 }
2586 }
2587
2588 return target_state;
2589 }
2590
2591 /**
2592 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2593 * into a sleep state
2594 * @dev: Device to handle.
2595 *
2596 * Choose the power state appropriate for the device depending on whether
2597 * it can wake up the system and/or is power manageable by the platform
2598 * (PCI_D3hot is the default) and put the device into that state.
2599 */
pci_prepare_to_sleep(struct pci_dev * dev)2600 int pci_prepare_to_sleep(struct pci_dev *dev)
2601 {
2602 bool wakeup = device_may_wakeup(&dev->dev);
2603 pci_power_t target_state = pci_target_state(dev, wakeup);
2604 int error;
2605
2606 if (target_state == PCI_POWER_ERROR)
2607 return -EIO;
2608
2609 pci_enable_wake(dev, target_state, wakeup);
2610
2611 error = pci_set_power_state(dev, target_state);
2612
2613 if (error)
2614 pci_enable_wake(dev, target_state, false);
2615
2616 return error;
2617 }
2618 EXPORT_SYMBOL(pci_prepare_to_sleep);
2619
2620 /**
2621 * pci_back_from_sleep - turn PCI device on during system-wide transition
2622 * into working state
2623 * @dev: Device to handle.
2624 *
2625 * Disable device's system wake-up capability and put it into D0.
2626 */
pci_back_from_sleep(struct pci_dev * dev)2627 int pci_back_from_sleep(struct pci_dev *dev)
2628 {
2629 pci_enable_wake(dev, PCI_D0, false);
2630 return pci_set_power_state(dev, PCI_D0);
2631 }
2632 EXPORT_SYMBOL(pci_back_from_sleep);
2633
2634 /**
2635 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2636 * @dev: PCI device being suspended.
2637 *
2638 * Prepare @dev to generate wake-up events at run time and put it into a low
2639 * power state.
2640 */
pci_finish_runtime_suspend(struct pci_dev * dev)2641 int pci_finish_runtime_suspend(struct pci_dev *dev)
2642 {
2643 pci_power_t target_state;
2644 int error;
2645
2646 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2647 if (target_state == PCI_POWER_ERROR)
2648 return -EIO;
2649
2650 dev->runtime_d3cold = target_state == PCI_D3cold;
2651
2652 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2653
2654 error = pci_set_power_state(dev, target_state);
2655
2656 if (error) {
2657 pci_enable_wake(dev, target_state, false);
2658 dev->runtime_d3cold = false;
2659 }
2660
2661 return error;
2662 }
2663
2664 /**
2665 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2666 * @dev: Device to check.
2667 *
2668 * Return true if the device itself is capable of generating wake-up events
2669 * (through the platform or using the native PCIe PME) or if the device supports
2670 * PME and one of its upstream bridges can generate wake-up events.
2671 */
pci_dev_run_wake(struct pci_dev * dev)2672 bool pci_dev_run_wake(struct pci_dev *dev)
2673 {
2674 struct pci_bus *bus = dev->bus;
2675
2676 if (!dev->pme_support)
2677 return false;
2678
2679 /* PME-capable in principle, but not from the target power state */
2680 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2681 return false;
2682
2683 if (device_can_wakeup(&dev->dev))
2684 return true;
2685
2686 while (bus->parent) {
2687 struct pci_dev *bridge = bus->self;
2688
2689 if (device_can_wakeup(&bridge->dev))
2690 return true;
2691
2692 bus = bus->parent;
2693 }
2694
2695 /* We have reached the root bus. */
2696 if (bus->bridge)
2697 return device_can_wakeup(bus->bridge);
2698
2699 return false;
2700 }
2701 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2702
2703 /**
2704 * pci_dev_need_resume - Check if it is necessary to resume the device.
2705 * @pci_dev: Device to check.
2706 *
2707 * Return 'true' if the device is not runtime-suspended or it has to be
2708 * reconfigured due to wakeup settings difference between system and runtime
2709 * suspend, or the current power state of it is not suitable for the upcoming
2710 * (system-wide) transition.
2711 */
pci_dev_need_resume(struct pci_dev * pci_dev)2712 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2713 {
2714 struct device *dev = &pci_dev->dev;
2715 pci_power_t target_state;
2716
2717 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2718 return true;
2719
2720 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2721
2722 /*
2723 * If the earlier platform check has not triggered, D3cold is just power
2724 * removal on top of D3hot, so no need to resume the device in that
2725 * case.
2726 */
2727 return target_state != pci_dev->current_state &&
2728 target_state != PCI_D3cold &&
2729 pci_dev->current_state != PCI_D3hot;
2730 }
2731
2732 /**
2733 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2734 * @pci_dev: Device to check.
2735 *
2736 * If the device is suspended and it is not configured for system wakeup,
2737 * disable PME for it to prevent it from waking up the system unnecessarily.
2738 *
2739 * Note that if the device's power state is D3cold and the platform check in
2740 * pci_dev_need_resume() has not triggered, the device's configuration need not
2741 * be changed.
2742 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2743 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2744 {
2745 struct device *dev = &pci_dev->dev;
2746
2747 spin_lock_irq(&dev->power.lock);
2748
2749 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2750 pci_dev->current_state < PCI_D3cold)
2751 __pci_pme_active(pci_dev, false);
2752
2753 spin_unlock_irq(&dev->power.lock);
2754 }
2755
2756 /**
2757 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2758 * @pci_dev: Device to handle.
2759 *
2760 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2761 * it might have been disabled during the prepare phase of system suspend if
2762 * the device was not configured for system wakeup.
2763 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2764 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2765 {
2766 struct device *dev = &pci_dev->dev;
2767
2768 if (!pci_dev_run_wake(pci_dev))
2769 return;
2770
2771 spin_lock_irq(&dev->power.lock);
2772
2773 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2774 __pci_pme_active(pci_dev, true);
2775
2776 spin_unlock_irq(&dev->power.lock);
2777 }
2778
pci_config_pm_runtime_get(struct pci_dev * pdev)2779 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2780 {
2781 struct device *dev = &pdev->dev;
2782 struct device *parent = dev->parent;
2783
2784 if (parent)
2785 pm_runtime_get_sync(parent);
2786 pm_runtime_get_noresume(dev);
2787 /*
2788 * pdev->current_state is set to PCI_D3cold during suspending,
2789 * so wait until suspending completes
2790 */
2791 pm_runtime_barrier(dev);
2792 /*
2793 * Only need to resume devices in D3cold, because config
2794 * registers are still accessible for devices suspended but
2795 * not in D3cold.
2796 */
2797 if (pdev->current_state == PCI_D3cold)
2798 pm_runtime_resume(dev);
2799 }
2800
pci_config_pm_runtime_put(struct pci_dev * pdev)2801 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2802 {
2803 struct device *dev = &pdev->dev;
2804 struct device *parent = dev->parent;
2805
2806 pm_runtime_put(dev);
2807 if (parent)
2808 pm_runtime_put_sync(parent);
2809 }
2810
2811 static const struct dmi_system_id bridge_d3_blacklist[] = {
2812 #ifdef CONFIG_X86
2813 {
2814 /*
2815 * Gigabyte X299 root port is not marked as hotplug capable
2816 * which allows Linux to power manage it. However, this
2817 * confuses the BIOS SMI handler so don't power manage root
2818 * ports on that system.
2819 */
2820 .ident = "X299 DESIGNARE EX-CF",
2821 .matches = {
2822 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2823 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2824 },
2825 },
2826 #endif
2827 { }
2828 };
2829
2830 /**
2831 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2832 * @bridge: Bridge to check
2833 *
2834 * This function checks if it is possible to move the bridge to D3.
2835 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2836 */
pci_bridge_d3_possible(struct pci_dev * bridge)2837 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2838 {
2839 if (!pci_is_pcie(bridge))
2840 return false;
2841
2842 switch (pci_pcie_type(bridge)) {
2843 case PCI_EXP_TYPE_ROOT_PORT:
2844 case PCI_EXP_TYPE_UPSTREAM:
2845 case PCI_EXP_TYPE_DOWNSTREAM:
2846 if (pci_bridge_d3_disable)
2847 return false;
2848
2849 /*
2850 * Hotplug ports handled by firmware in System Management Mode
2851 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2852 */
2853 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2854 return false;
2855
2856 if (pci_bridge_d3_force)
2857 return true;
2858
2859 /* Even the oldest 2010 Thunderbolt controller supports D3. */
2860 if (bridge->is_thunderbolt)
2861 return true;
2862
2863 /* Platform might know better if the bridge supports D3 */
2864 if (platform_pci_bridge_d3(bridge))
2865 return true;
2866
2867 /*
2868 * Hotplug ports handled natively by the OS were not validated
2869 * by vendors for runtime D3 at least until 2018 because there
2870 * was no OS support.
2871 */
2872 if (bridge->is_hotplug_bridge)
2873 return false;
2874
2875 if (dmi_check_system(bridge_d3_blacklist))
2876 return false;
2877
2878 /*
2879 * It should be safe to put PCIe ports from 2015 or newer
2880 * to D3.
2881 */
2882 if (dmi_get_bios_year() >= 2015)
2883 return true;
2884 break;
2885 }
2886
2887 return false;
2888 }
2889
pci_dev_check_d3cold(struct pci_dev * dev,void * data)2890 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2891 {
2892 bool *d3cold_ok = data;
2893
2894 if (/* The device needs to be allowed to go D3cold ... */
2895 dev->no_d3cold || !dev->d3cold_allowed ||
2896
2897 /* ... and if it is wakeup capable to do so from D3cold. */
2898 (device_may_wakeup(&dev->dev) &&
2899 !pci_pme_capable(dev, PCI_D3cold)) ||
2900
2901 /* If it is a bridge it must be allowed to go to D3. */
2902 !pci_power_manageable(dev))
2903
2904 *d3cold_ok = false;
2905
2906 return !*d3cold_ok;
2907 }
2908
2909 /*
2910 * pci_bridge_d3_update - Update bridge D3 capabilities
2911 * @dev: PCI device which is changed
2912 *
2913 * Update upstream bridge PM capabilities accordingly depending on if the
2914 * device PM configuration was changed or the device is being removed. The
2915 * change is also propagated upstream.
2916 */
pci_bridge_d3_update(struct pci_dev * dev)2917 void pci_bridge_d3_update(struct pci_dev *dev)
2918 {
2919 bool remove = !device_is_registered(&dev->dev);
2920 struct pci_dev *bridge;
2921 bool d3cold_ok = true;
2922
2923 bridge = pci_upstream_bridge(dev);
2924 if (!bridge || !pci_bridge_d3_possible(bridge))
2925 return;
2926
2927 /*
2928 * If D3 is currently allowed for the bridge, removing one of its
2929 * children won't change that.
2930 */
2931 if (remove && bridge->bridge_d3)
2932 return;
2933
2934 /*
2935 * If D3 is currently allowed for the bridge and a child is added or
2936 * changed, disallowance of D3 can only be caused by that child, so
2937 * we only need to check that single device, not any of its siblings.
2938 *
2939 * If D3 is currently not allowed for the bridge, checking the device
2940 * first may allow us to skip checking its siblings.
2941 */
2942 if (!remove)
2943 pci_dev_check_d3cold(dev, &d3cold_ok);
2944
2945 /*
2946 * If D3 is currently not allowed for the bridge, this may be caused
2947 * either by the device being changed/removed or any of its siblings,
2948 * so we need to go through all children to find out if one of them
2949 * continues to block D3.
2950 */
2951 if (d3cold_ok && !bridge->bridge_d3)
2952 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2953 &d3cold_ok);
2954
2955 if (bridge->bridge_d3 != d3cold_ok) {
2956 bridge->bridge_d3 = d3cold_ok;
2957 /* Propagate change to upstream bridges */
2958 pci_bridge_d3_update(bridge);
2959 }
2960 }
2961
2962 /**
2963 * pci_d3cold_enable - Enable D3cold for device
2964 * @dev: PCI device to handle
2965 *
2966 * This function can be used in drivers to enable D3cold from the device
2967 * they handle. It also updates upstream PCI bridge PM capabilities
2968 * accordingly.
2969 */
pci_d3cold_enable(struct pci_dev * dev)2970 void pci_d3cold_enable(struct pci_dev *dev)
2971 {
2972 if (dev->no_d3cold) {
2973 dev->no_d3cold = false;
2974 pci_bridge_d3_update(dev);
2975 }
2976 }
2977 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2978
2979 /**
2980 * pci_d3cold_disable - Disable D3cold for device
2981 * @dev: PCI device to handle
2982 *
2983 * This function can be used in drivers to disable D3cold from the device
2984 * they handle. It also updates upstream PCI bridge PM capabilities
2985 * accordingly.
2986 */
pci_d3cold_disable(struct pci_dev * dev)2987 void pci_d3cold_disable(struct pci_dev *dev)
2988 {
2989 if (!dev->no_d3cold) {
2990 dev->no_d3cold = true;
2991 pci_bridge_d3_update(dev);
2992 }
2993 }
2994 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2995
2996 /**
2997 * pci_pm_init - Initialize PM functions of given PCI device
2998 * @dev: PCI device to handle.
2999 */
pci_pm_init(struct pci_dev * dev)3000 void pci_pm_init(struct pci_dev *dev)
3001 {
3002 int pm;
3003 u16 status;
3004 u16 pmc;
3005
3006 pm_runtime_forbid(&dev->dev);
3007 pm_runtime_set_active(&dev->dev);
3008 pm_runtime_enable(&dev->dev);
3009 device_enable_async_suspend(&dev->dev);
3010 dev->wakeup_prepared = false;
3011
3012 dev->pm_cap = 0;
3013 dev->pme_support = 0;
3014
3015 /* find PCI PM capability in list */
3016 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3017 if (!pm)
3018 return;
3019 /* Check device's ability to generate PME# */
3020 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3021
3022 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3023 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3024 pmc & PCI_PM_CAP_VER_MASK);
3025 return;
3026 }
3027
3028 dev->pm_cap = pm;
3029 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3030 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3031 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3032 dev->d3cold_allowed = true;
3033
3034 dev->d1_support = false;
3035 dev->d2_support = false;
3036 if (!pci_no_d1d2(dev)) {
3037 if (pmc & PCI_PM_CAP_D1)
3038 dev->d1_support = true;
3039 if (pmc & PCI_PM_CAP_D2)
3040 dev->d2_support = true;
3041
3042 if (dev->d1_support || dev->d2_support)
3043 pci_info(dev, "supports%s%s\n",
3044 dev->d1_support ? " D1" : "",
3045 dev->d2_support ? " D2" : "");
3046 }
3047
3048 pmc &= PCI_PM_CAP_PME_MASK;
3049 if (pmc) {
3050 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3051 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3052 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3053 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3054 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3055 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3056 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3057 dev->pme_poll = true;
3058 /*
3059 * Make device's PM flags reflect the wake-up capability, but
3060 * let the user space enable it to wake up the system as needed.
3061 */
3062 device_set_wakeup_capable(&dev->dev, true);
3063 /* Disable the PME# generation functionality */
3064 pci_pme_active(dev, false);
3065 }
3066
3067 pci_read_config_word(dev, PCI_STATUS, &status);
3068 if (status & PCI_STATUS_IMM_READY)
3069 dev->imm_ready = 1;
3070 }
3071
pci_ea_flags(struct pci_dev * dev,u8 prop)3072 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3073 {
3074 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3075
3076 switch (prop) {
3077 case PCI_EA_P_MEM:
3078 case PCI_EA_P_VF_MEM:
3079 flags |= IORESOURCE_MEM;
3080 break;
3081 case PCI_EA_P_MEM_PREFETCH:
3082 case PCI_EA_P_VF_MEM_PREFETCH:
3083 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3084 break;
3085 case PCI_EA_P_IO:
3086 flags |= IORESOURCE_IO;
3087 break;
3088 default:
3089 return 0;
3090 }
3091
3092 return flags;
3093 }
3094
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3095 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3096 u8 prop)
3097 {
3098 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3099 return &dev->resource[bei];
3100 #ifdef CONFIG_PCI_IOV
3101 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3102 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3103 return &dev->resource[PCI_IOV_RESOURCES +
3104 bei - PCI_EA_BEI_VF_BAR0];
3105 #endif
3106 else if (bei == PCI_EA_BEI_ROM)
3107 return &dev->resource[PCI_ROM_RESOURCE];
3108 else
3109 return NULL;
3110 }
3111
3112 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3113 static int pci_ea_read(struct pci_dev *dev, int offset)
3114 {
3115 struct resource *res;
3116 int ent_size, ent_offset = offset;
3117 resource_size_t start, end;
3118 unsigned long flags;
3119 u32 dw0, bei, base, max_offset;
3120 u8 prop;
3121 bool support_64 = (sizeof(resource_size_t) >= 8);
3122
3123 pci_read_config_dword(dev, ent_offset, &dw0);
3124 ent_offset += 4;
3125
3126 /* Entry size field indicates DWORDs after 1st */
3127 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3128
3129 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3130 goto out;
3131
3132 bei = (dw0 & PCI_EA_BEI) >> 4;
3133 prop = (dw0 & PCI_EA_PP) >> 8;
3134
3135 /*
3136 * If the Property is in the reserved range, try the Secondary
3137 * Property instead.
3138 */
3139 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3140 prop = (dw0 & PCI_EA_SP) >> 16;
3141 if (prop > PCI_EA_P_BRIDGE_IO)
3142 goto out;
3143
3144 res = pci_ea_get_resource(dev, bei, prop);
3145 if (!res) {
3146 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3147 goto out;
3148 }
3149
3150 flags = pci_ea_flags(dev, prop);
3151 if (!flags) {
3152 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3153 goto out;
3154 }
3155
3156 /* Read Base */
3157 pci_read_config_dword(dev, ent_offset, &base);
3158 start = (base & PCI_EA_FIELD_MASK);
3159 ent_offset += 4;
3160
3161 /* Read MaxOffset */
3162 pci_read_config_dword(dev, ent_offset, &max_offset);
3163 ent_offset += 4;
3164
3165 /* Read Base MSBs (if 64-bit entry) */
3166 if (base & PCI_EA_IS_64) {
3167 u32 base_upper;
3168
3169 pci_read_config_dword(dev, ent_offset, &base_upper);
3170 ent_offset += 4;
3171
3172 flags |= IORESOURCE_MEM_64;
3173
3174 /* entry starts above 32-bit boundary, can't use */
3175 if (!support_64 && base_upper)
3176 goto out;
3177
3178 if (support_64)
3179 start |= ((u64)base_upper << 32);
3180 }
3181
3182 end = start + (max_offset | 0x03);
3183
3184 /* Read MaxOffset MSBs (if 64-bit entry) */
3185 if (max_offset & PCI_EA_IS_64) {
3186 u32 max_offset_upper;
3187
3188 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3189 ent_offset += 4;
3190
3191 flags |= IORESOURCE_MEM_64;
3192
3193 /* entry too big, can't use */
3194 if (!support_64 && max_offset_upper)
3195 goto out;
3196
3197 if (support_64)
3198 end += ((u64)max_offset_upper << 32);
3199 }
3200
3201 if (end < start) {
3202 pci_err(dev, "EA Entry crosses address boundary\n");
3203 goto out;
3204 }
3205
3206 if (ent_size != ent_offset - offset) {
3207 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3208 ent_size, ent_offset - offset);
3209 goto out;
3210 }
3211
3212 res->name = pci_name(dev);
3213 res->start = start;
3214 res->end = end;
3215 res->flags = flags;
3216
3217 if (bei <= PCI_EA_BEI_BAR5)
3218 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3219 bei, res, prop);
3220 else if (bei == PCI_EA_BEI_ROM)
3221 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3222 res, prop);
3223 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3224 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3225 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3226 else
3227 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3228 bei, res, prop);
3229
3230 out:
3231 return offset + ent_size;
3232 }
3233
3234 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3235 void pci_ea_init(struct pci_dev *dev)
3236 {
3237 int ea;
3238 u8 num_ent;
3239 int offset;
3240 int i;
3241
3242 /* find PCI EA capability in list */
3243 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3244 if (!ea)
3245 return;
3246
3247 /* determine the number of entries */
3248 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3249 &num_ent);
3250 num_ent &= PCI_EA_NUM_ENT_MASK;
3251
3252 offset = ea + PCI_EA_FIRST_ENT;
3253
3254 /* Skip DWORD 2 for type 1 functions */
3255 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3256 offset += 4;
3257
3258 /* parse each EA entry */
3259 for (i = 0; i < num_ent; ++i)
3260 offset = pci_ea_read(dev, offset);
3261 }
3262
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3263 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3264 struct pci_cap_saved_state *new_cap)
3265 {
3266 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3267 }
3268
3269 /**
3270 * _pci_add_cap_save_buffer - allocate buffer for saving given
3271 * capability registers
3272 * @dev: the PCI device
3273 * @cap: the capability to allocate the buffer for
3274 * @extended: Standard or Extended capability ID
3275 * @size: requested size of the buffer
3276 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3277 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3278 bool extended, unsigned int size)
3279 {
3280 int pos;
3281 struct pci_cap_saved_state *save_state;
3282
3283 if (extended)
3284 pos = pci_find_ext_capability(dev, cap);
3285 else
3286 pos = pci_find_capability(dev, cap);
3287
3288 if (!pos)
3289 return 0;
3290
3291 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3292 if (!save_state)
3293 return -ENOMEM;
3294
3295 save_state->cap.cap_nr = cap;
3296 save_state->cap.cap_extended = extended;
3297 save_state->cap.size = size;
3298 pci_add_saved_cap(dev, save_state);
3299
3300 return 0;
3301 }
3302
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3303 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3304 {
3305 return _pci_add_cap_save_buffer(dev, cap, false, size);
3306 }
3307
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3308 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3309 {
3310 return _pci_add_cap_save_buffer(dev, cap, true, size);
3311 }
3312
3313 /**
3314 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3315 * @dev: the PCI device
3316 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3317 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3318 {
3319 int error;
3320
3321 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3322 PCI_EXP_SAVE_REGS * sizeof(u16));
3323 if (error)
3324 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3325
3326 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3327 if (error)
3328 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3329
3330 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3331 2 * sizeof(u16));
3332 if (error)
3333 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3334
3335 pci_allocate_vc_save_buffers(dev);
3336 }
3337
pci_free_cap_save_buffers(struct pci_dev * dev)3338 void pci_free_cap_save_buffers(struct pci_dev *dev)
3339 {
3340 struct pci_cap_saved_state *tmp;
3341 struct hlist_node *n;
3342
3343 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3344 kfree(tmp);
3345 }
3346
3347 /**
3348 * pci_configure_ari - enable or disable ARI forwarding
3349 * @dev: the PCI device
3350 *
3351 * If @dev and its upstream bridge both support ARI, enable ARI in the
3352 * bridge. Otherwise, disable ARI in the bridge.
3353 */
pci_configure_ari(struct pci_dev * dev)3354 void pci_configure_ari(struct pci_dev *dev)
3355 {
3356 u32 cap;
3357 struct pci_dev *bridge;
3358
3359 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3360 return;
3361
3362 bridge = dev->bus->self;
3363 if (!bridge)
3364 return;
3365
3366 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3367 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3368 return;
3369
3370 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3371 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3372 PCI_EXP_DEVCTL2_ARI);
3373 bridge->ari_enabled = 1;
3374 } else {
3375 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3376 PCI_EXP_DEVCTL2_ARI);
3377 bridge->ari_enabled = 0;
3378 }
3379 }
3380
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3381 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3382 {
3383 int pos;
3384 u16 cap, ctrl;
3385
3386 pos = pdev->acs_cap;
3387 if (!pos)
3388 return false;
3389
3390 /*
3391 * Except for egress control, capabilities are either required
3392 * or only required if controllable. Features missing from the
3393 * capability field can therefore be assumed as hard-wired enabled.
3394 */
3395 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3396 acs_flags &= (cap | PCI_ACS_EC);
3397
3398 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3399 return (ctrl & acs_flags) == acs_flags;
3400 }
3401
3402 /**
3403 * pci_acs_enabled - test ACS against required flags for a given device
3404 * @pdev: device to test
3405 * @acs_flags: required PCI ACS flags
3406 *
3407 * Return true if the device supports the provided flags. Automatically
3408 * filters out flags that are not implemented on multifunction devices.
3409 *
3410 * Note that this interface checks the effective ACS capabilities of the
3411 * device rather than the actual capabilities. For instance, most single
3412 * function endpoints are not required to support ACS because they have no
3413 * opportunity for peer-to-peer access. We therefore return 'true'
3414 * regardless of whether the device exposes an ACS capability. This makes
3415 * it much easier for callers of this function to ignore the actual type
3416 * or topology of the device when testing ACS support.
3417 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3418 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3419 {
3420 int ret;
3421
3422 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3423 if (ret >= 0)
3424 return ret > 0;
3425
3426 /*
3427 * Conventional PCI and PCI-X devices never support ACS, either
3428 * effectively or actually. The shared bus topology implies that
3429 * any device on the bus can receive or snoop DMA.
3430 */
3431 if (!pci_is_pcie(pdev))
3432 return false;
3433
3434 switch (pci_pcie_type(pdev)) {
3435 /*
3436 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3437 * but since their primary interface is PCI/X, we conservatively
3438 * handle them as we would a non-PCIe device.
3439 */
3440 case PCI_EXP_TYPE_PCIE_BRIDGE:
3441 /*
3442 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3443 * applicable... must never implement an ACS Extended Capability...".
3444 * This seems arbitrary, but we take a conservative interpretation
3445 * of this statement.
3446 */
3447 case PCI_EXP_TYPE_PCI_BRIDGE:
3448 case PCI_EXP_TYPE_RC_EC:
3449 return false;
3450 /*
3451 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3452 * implement ACS in order to indicate their peer-to-peer capabilities,
3453 * regardless of whether they are single- or multi-function devices.
3454 */
3455 case PCI_EXP_TYPE_DOWNSTREAM:
3456 case PCI_EXP_TYPE_ROOT_PORT:
3457 return pci_acs_flags_enabled(pdev, acs_flags);
3458 /*
3459 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3460 * implemented by the remaining PCIe types to indicate peer-to-peer
3461 * capabilities, but only when they are part of a multifunction
3462 * device. The footnote for section 6.12 indicates the specific
3463 * PCIe types included here.
3464 */
3465 case PCI_EXP_TYPE_ENDPOINT:
3466 case PCI_EXP_TYPE_UPSTREAM:
3467 case PCI_EXP_TYPE_LEG_END:
3468 case PCI_EXP_TYPE_RC_END:
3469 if (!pdev->multifunction)
3470 break;
3471
3472 return pci_acs_flags_enabled(pdev, acs_flags);
3473 }
3474
3475 /*
3476 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3477 * to single function devices with the exception of downstream ports.
3478 */
3479 return true;
3480 }
3481
3482 /**
3483 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3484 * @start: starting downstream device
3485 * @end: ending upstream device or NULL to search to the root bus
3486 * @acs_flags: required flags
3487 *
3488 * Walk up a device tree from start to end testing PCI ACS support. If
3489 * any step along the way does not support the required flags, return false.
3490 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3491 bool pci_acs_path_enabled(struct pci_dev *start,
3492 struct pci_dev *end, u16 acs_flags)
3493 {
3494 struct pci_dev *pdev, *parent = start;
3495
3496 do {
3497 pdev = parent;
3498
3499 if (!pci_acs_enabled(pdev, acs_flags))
3500 return false;
3501
3502 if (pci_is_root_bus(pdev->bus))
3503 return (end == NULL);
3504
3505 parent = pdev->bus->self;
3506 } while (pdev != end);
3507
3508 return true;
3509 }
3510
3511 /**
3512 * pci_acs_init - Initialize ACS if hardware supports it
3513 * @dev: the PCI device
3514 */
pci_acs_init(struct pci_dev * dev)3515 void pci_acs_init(struct pci_dev *dev)
3516 {
3517 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3518
3519 /*
3520 * Attempt to enable ACS regardless of capability because some Root
3521 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3522 * the standard ACS capability but still support ACS via those
3523 * quirks.
3524 */
3525 pci_enable_acs(dev);
3526 }
3527
3528 /**
3529 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3530 * @pdev: PCI device
3531 * @bar: BAR to find
3532 *
3533 * Helper to find the position of the ctrl register for a BAR.
3534 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3535 * Returns -ENOENT if no ctrl register for the BAR could be found.
3536 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3537 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3538 {
3539 unsigned int pos, nbars, i;
3540 u32 ctrl;
3541
3542 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3543 if (!pos)
3544 return -ENOTSUPP;
3545
3546 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3547 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3548 PCI_REBAR_CTRL_NBAR_SHIFT;
3549
3550 for (i = 0; i < nbars; i++, pos += 8) {
3551 int bar_idx;
3552
3553 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3554 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3555 if (bar_idx == bar)
3556 return pos;
3557 }
3558
3559 return -ENOENT;
3560 }
3561
3562 /**
3563 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3564 * @pdev: PCI device
3565 * @bar: BAR to query
3566 *
3567 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3568 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3569 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3570 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3571 {
3572 int pos;
3573 u32 cap;
3574
3575 pos = pci_rebar_find_pos(pdev, bar);
3576 if (pos < 0)
3577 return 0;
3578
3579 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3580 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3581 }
3582
3583 /**
3584 * pci_rebar_get_current_size - get the current size of a BAR
3585 * @pdev: PCI device
3586 * @bar: BAR to set size to
3587 *
3588 * Read the size of a BAR from the resizable BAR config.
3589 * Returns size if found or negative error code.
3590 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3591 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3592 {
3593 int pos;
3594 u32 ctrl;
3595
3596 pos = pci_rebar_find_pos(pdev, bar);
3597 if (pos < 0)
3598 return pos;
3599
3600 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3601 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3602 }
3603
3604 /**
3605 * pci_rebar_set_size - set a new size for a BAR
3606 * @pdev: PCI device
3607 * @bar: BAR to set size to
3608 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3609 *
3610 * Set the new size of a BAR as defined in the spec.
3611 * Returns zero if resizing was successful, error code otherwise.
3612 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3613 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3614 {
3615 int pos;
3616 u32 ctrl;
3617
3618 pos = pci_rebar_find_pos(pdev, bar);
3619 if (pos < 0)
3620 return pos;
3621
3622 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3623 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3624 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3625 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3626 return 0;
3627 }
3628
3629 /**
3630 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3631 * @dev: the PCI device
3632 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3633 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3634 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3635 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3636 *
3637 * Return 0 if all upstream bridges support AtomicOp routing, egress
3638 * blocking is disabled on all upstream ports, and the root port supports
3639 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3640 * AtomicOp completion), or negative otherwise.
3641 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3642 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3643 {
3644 struct pci_bus *bus = dev->bus;
3645 struct pci_dev *bridge;
3646 u32 cap, ctl2;
3647
3648 if (!pci_is_pcie(dev))
3649 return -EINVAL;
3650
3651 /*
3652 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3653 * AtomicOp requesters. For now, we only support endpoints as
3654 * requesters and root ports as completers. No endpoints as
3655 * completers, and no peer-to-peer.
3656 */
3657
3658 switch (pci_pcie_type(dev)) {
3659 case PCI_EXP_TYPE_ENDPOINT:
3660 case PCI_EXP_TYPE_LEG_END:
3661 case PCI_EXP_TYPE_RC_END:
3662 break;
3663 default:
3664 return -EINVAL;
3665 }
3666
3667 while (bus->parent) {
3668 bridge = bus->self;
3669
3670 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3671
3672 switch (pci_pcie_type(bridge)) {
3673 /* Ensure switch ports support AtomicOp routing */
3674 case PCI_EXP_TYPE_UPSTREAM:
3675 case PCI_EXP_TYPE_DOWNSTREAM:
3676 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3677 return -EINVAL;
3678 break;
3679
3680 /* Ensure root port supports all the sizes we care about */
3681 case PCI_EXP_TYPE_ROOT_PORT:
3682 if ((cap & cap_mask) != cap_mask)
3683 return -EINVAL;
3684 break;
3685 }
3686
3687 /* Ensure upstream ports don't block AtomicOps on egress */
3688 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3689 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3690 &ctl2);
3691 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3692 return -EINVAL;
3693 }
3694
3695 bus = bus->parent;
3696 }
3697
3698 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3699 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3700 return 0;
3701 }
3702 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3703
3704 /**
3705 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3706 * @dev: the PCI device
3707 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3708 *
3709 * Perform INTx swizzling for a device behind one level of bridge. This is
3710 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3711 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3712 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3713 * the PCI Express Base Specification, Revision 2.1)
3714 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3715 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3716 {
3717 int slot;
3718
3719 if (pci_ari_enabled(dev->bus))
3720 slot = 0;
3721 else
3722 slot = PCI_SLOT(dev->devfn);
3723
3724 return (((pin - 1) + slot) % 4) + 1;
3725 }
3726
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3727 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3728 {
3729 u8 pin;
3730
3731 pin = dev->pin;
3732 if (!pin)
3733 return -1;
3734
3735 while (!pci_is_root_bus(dev->bus)) {
3736 pin = pci_swizzle_interrupt_pin(dev, pin);
3737 dev = dev->bus->self;
3738 }
3739 *bridge = dev;
3740 return pin;
3741 }
3742
3743 /**
3744 * pci_common_swizzle - swizzle INTx all the way to root bridge
3745 * @dev: the PCI device
3746 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3747 *
3748 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3749 * bridges all the way up to a PCI root bus.
3750 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3751 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3752 {
3753 u8 pin = *pinp;
3754
3755 while (!pci_is_root_bus(dev->bus)) {
3756 pin = pci_swizzle_interrupt_pin(dev, pin);
3757 dev = dev->bus->self;
3758 }
3759 *pinp = pin;
3760 return PCI_SLOT(dev->devfn);
3761 }
3762 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3763
3764 /**
3765 * pci_release_region - Release a PCI bar
3766 * @pdev: PCI device whose resources were previously reserved by
3767 * pci_request_region()
3768 * @bar: BAR to release
3769 *
3770 * Releases the PCI I/O and memory resources previously reserved by a
3771 * successful call to pci_request_region(). Call this function only
3772 * after all use of the PCI regions has ceased.
3773 */
pci_release_region(struct pci_dev * pdev,int bar)3774 void pci_release_region(struct pci_dev *pdev, int bar)
3775 {
3776 struct pci_devres *dr;
3777
3778 if (pci_resource_len(pdev, bar) == 0)
3779 return;
3780 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3781 release_region(pci_resource_start(pdev, bar),
3782 pci_resource_len(pdev, bar));
3783 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3784 release_mem_region(pci_resource_start(pdev, bar),
3785 pci_resource_len(pdev, bar));
3786
3787 dr = find_pci_dr(pdev);
3788 if (dr)
3789 dr->region_mask &= ~(1 << bar);
3790 }
3791 EXPORT_SYMBOL(pci_release_region);
3792
3793 /**
3794 * __pci_request_region - Reserved PCI I/O and memory resource
3795 * @pdev: PCI device whose resources are to be reserved
3796 * @bar: BAR to be reserved
3797 * @res_name: Name to be associated with resource.
3798 * @exclusive: whether the region access is exclusive or not
3799 *
3800 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3801 * being reserved by owner @res_name. Do not access any
3802 * address inside the PCI regions unless this call returns
3803 * successfully.
3804 *
3805 * If @exclusive is set, then the region is marked so that userspace
3806 * is explicitly not allowed to map the resource via /dev/mem or
3807 * sysfs MMIO access.
3808 *
3809 * Returns 0 on success, or %EBUSY on error. A warning
3810 * message is also printed on failure.
3811 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3812 static int __pci_request_region(struct pci_dev *pdev, int bar,
3813 const char *res_name, int exclusive)
3814 {
3815 struct pci_devres *dr;
3816
3817 if (pci_resource_len(pdev, bar) == 0)
3818 return 0;
3819
3820 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3821 if (!request_region(pci_resource_start(pdev, bar),
3822 pci_resource_len(pdev, bar), res_name))
3823 goto err_out;
3824 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3825 if (!__request_mem_region(pci_resource_start(pdev, bar),
3826 pci_resource_len(pdev, bar), res_name,
3827 exclusive))
3828 goto err_out;
3829 }
3830
3831 dr = find_pci_dr(pdev);
3832 if (dr)
3833 dr->region_mask |= 1 << bar;
3834
3835 return 0;
3836
3837 err_out:
3838 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3839 &pdev->resource[bar]);
3840 return -EBUSY;
3841 }
3842
3843 /**
3844 * pci_request_region - Reserve PCI I/O and memory resource
3845 * @pdev: PCI device whose resources are to be reserved
3846 * @bar: BAR to be reserved
3847 * @res_name: Name to be associated with resource
3848 *
3849 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3850 * being reserved by owner @res_name. Do not access any
3851 * address inside the PCI regions unless this call returns
3852 * successfully.
3853 *
3854 * Returns 0 on success, or %EBUSY on error. A warning
3855 * message is also printed on failure.
3856 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)3857 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3858 {
3859 return __pci_request_region(pdev, bar, res_name, 0);
3860 }
3861 EXPORT_SYMBOL(pci_request_region);
3862
3863 /**
3864 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3865 * @pdev: PCI device whose resources were previously reserved
3866 * @bars: Bitmask of BARs to be released
3867 *
3868 * Release selected PCI I/O and memory resources previously reserved.
3869 * Call this function only after all use of the PCI regions has ceased.
3870 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3871 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3872 {
3873 int i;
3874
3875 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3876 if (bars & (1 << i))
3877 pci_release_region(pdev, i);
3878 }
3879 EXPORT_SYMBOL(pci_release_selected_regions);
3880
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)3881 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3882 const char *res_name, int excl)
3883 {
3884 int i;
3885
3886 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3887 if (bars & (1 << i))
3888 if (__pci_request_region(pdev, i, res_name, excl))
3889 goto err_out;
3890 return 0;
3891
3892 err_out:
3893 while (--i >= 0)
3894 if (bars & (1 << i))
3895 pci_release_region(pdev, i);
3896
3897 return -EBUSY;
3898 }
3899
3900
3901 /**
3902 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3903 * @pdev: PCI device whose resources are to be reserved
3904 * @bars: Bitmask of BARs to be requested
3905 * @res_name: Name to be associated with resource
3906 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)3907 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3908 const char *res_name)
3909 {
3910 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3911 }
3912 EXPORT_SYMBOL(pci_request_selected_regions);
3913
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)3914 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3915 const char *res_name)
3916 {
3917 return __pci_request_selected_regions(pdev, bars, res_name,
3918 IORESOURCE_EXCLUSIVE);
3919 }
3920 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3921
3922 /**
3923 * pci_release_regions - Release reserved PCI I/O and memory resources
3924 * @pdev: PCI device whose resources were previously reserved by
3925 * pci_request_regions()
3926 *
3927 * Releases all PCI I/O and memory resources previously reserved by a
3928 * successful call to pci_request_regions(). Call this function only
3929 * after all use of the PCI regions has ceased.
3930 */
3931
pci_release_regions(struct pci_dev * pdev)3932 void pci_release_regions(struct pci_dev *pdev)
3933 {
3934 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3935 }
3936 EXPORT_SYMBOL(pci_release_regions);
3937
3938 /**
3939 * pci_request_regions - Reserve PCI I/O and memory resources
3940 * @pdev: PCI device whose resources are to be reserved
3941 * @res_name: Name to be associated with resource.
3942 *
3943 * Mark all PCI regions associated with PCI device @pdev as
3944 * being reserved by owner @res_name. Do not access any
3945 * address inside the PCI regions unless this call returns
3946 * successfully.
3947 *
3948 * Returns 0 on success, or %EBUSY on error. A warning
3949 * message is also printed on failure.
3950 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)3951 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3952 {
3953 return pci_request_selected_regions(pdev,
3954 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3955 }
3956 EXPORT_SYMBOL(pci_request_regions);
3957
3958 /**
3959 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3960 * @pdev: PCI device whose resources are to be reserved
3961 * @res_name: Name to be associated with resource.
3962 *
3963 * Mark all PCI regions associated with PCI device @pdev as being reserved
3964 * by owner @res_name. Do not access any address inside the PCI regions
3965 * unless this call returns successfully.
3966 *
3967 * pci_request_regions_exclusive() will mark the region so that /dev/mem
3968 * and the sysfs MMIO access will not be allowed.
3969 *
3970 * Returns 0 on success, or %EBUSY on error. A warning message is also
3971 * printed on failure.
3972 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)3973 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3974 {
3975 return pci_request_selected_regions_exclusive(pdev,
3976 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3977 }
3978 EXPORT_SYMBOL(pci_request_regions_exclusive);
3979
3980 /*
3981 * Record the PCI IO range (expressed as CPU physical address + size).
3982 * Return a negative value if an error has occurred, zero otherwise
3983 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)3984 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3985 resource_size_t size)
3986 {
3987 int ret = 0;
3988 #ifdef PCI_IOBASE
3989 struct logic_pio_hwaddr *range;
3990
3991 if (!size || addr + size < addr)
3992 return -EINVAL;
3993
3994 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3995 if (!range)
3996 return -ENOMEM;
3997
3998 range->fwnode = fwnode;
3999 range->size = size;
4000 range->hw_start = addr;
4001 range->flags = LOGIC_PIO_CPU_MMIO;
4002
4003 ret = logic_pio_register_range(range);
4004 if (ret)
4005 kfree(range);
4006 #endif
4007
4008 return ret;
4009 }
4010
pci_pio_to_address(unsigned long pio)4011 phys_addr_t pci_pio_to_address(unsigned long pio)
4012 {
4013 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4014
4015 #ifdef PCI_IOBASE
4016 if (pio >= MMIO_UPPER_LIMIT)
4017 return address;
4018
4019 address = logic_pio_to_hwaddr(pio);
4020 #endif
4021
4022 return address;
4023 }
4024
pci_address_to_pio(phys_addr_t address)4025 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4026 {
4027 #ifdef PCI_IOBASE
4028 return logic_pio_trans_cpuaddr(address);
4029 #else
4030 if (address > IO_SPACE_LIMIT)
4031 return (unsigned long)-1;
4032
4033 return (unsigned long) address;
4034 #endif
4035 }
4036
4037 /**
4038 * pci_remap_iospace - Remap the memory mapped I/O space
4039 * @res: Resource describing the I/O space
4040 * @phys_addr: physical address of range to be mapped
4041 *
4042 * Remap the memory mapped I/O space described by the @res and the CPU
4043 * physical address @phys_addr into virtual address space. Only
4044 * architectures that have memory mapped IO functions defined (and the
4045 * PCI_IOBASE value defined) should call this function.
4046 */
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4047 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4048 {
4049 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4050 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4051
4052 if (!(res->flags & IORESOURCE_IO))
4053 return -EINVAL;
4054
4055 if (res->end > IO_SPACE_LIMIT)
4056 return -EINVAL;
4057
4058 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4059 pgprot_device(PAGE_KERNEL));
4060 #else
4061 /*
4062 * This architecture does not have memory mapped I/O space,
4063 * so this function should never be called
4064 */
4065 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4066 return -ENODEV;
4067 #endif
4068 }
4069 EXPORT_SYMBOL(pci_remap_iospace);
4070
4071 /**
4072 * pci_unmap_iospace - Unmap the memory mapped I/O space
4073 * @res: resource to be unmapped
4074 *
4075 * Unmap the CPU virtual address @res from virtual address space. Only
4076 * architectures that have memory mapped IO functions defined (and the
4077 * PCI_IOBASE value defined) should call this function.
4078 */
pci_unmap_iospace(struct resource * res)4079 void pci_unmap_iospace(struct resource *res)
4080 {
4081 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4082 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4083
4084 unmap_kernel_range(vaddr, resource_size(res));
4085 #endif
4086 }
4087 EXPORT_SYMBOL(pci_unmap_iospace);
4088
devm_pci_unmap_iospace(struct device * dev,void * ptr)4089 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4090 {
4091 struct resource **res = ptr;
4092
4093 pci_unmap_iospace(*res);
4094 }
4095
4096 /**
4097 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4098 * @dev: Generic device to remap IO address for
4099 * @res: Resource describing the I/O space
4100 * @phys_addr: physical address of range to be mapped
4101 *
4102 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4103 * detach.
4104 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4105 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4106 phys_addr_t phys_addr)
4107 {
4108 const struct resource **ptr;
4109 int error;
4110
4111 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4112 if (!ptr)
4113 return -ENOMEM;
4114
4115 error = pci_remap_iospace(res, phys_addr);
4116 if (error) {
4117 devres_free(ptr);
4118 } else {
4119 *ptr = res;
4120 devres_add(dev, ptr);
4121 }
4122
4123 return error;
4124 }
4125 EXPORT_SYMBOL(devm_pci_remap_iospace);
4126
4127 /**
4128 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4129 * @dev: Generic device to remap IO address for
4130 * @offset: Resource address to map
4131 * @size: Size of map
4132 *
4133 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4134 * detach.
4135 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4136 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4137 resource_size_t offset,
4138 resource_size_t size)
4139 {
4140 void __iomem **ptr, *addr;
4141
4142 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4143 if (!ptr)
4144 return NULL;
4145
4146 addr = pci_remap_cfgspace(offset, size);
4147 if (addr) {
4148 *ptr = addr;
4149 devres_add(dev, ptr);
4150 } else
4151 devres_free(ptr);
4152
4153 return addr;
4154 }
4155 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4156
4157 /**
4158 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4159 * @dev: generic device to handle the resource for
4160 * @res: configuration space resource to be handled
4161 *
4162 * Checks that a resource is a valid memory region, requests the memory
4163 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4164 * proper PCI configuration space memory attributes are guaranteed.
4165 *
4166 * All operations are managed and will be undone on driver detach.
4167 *
4168 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4169 * on failure. Usage example::
4170 *
4171 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4172 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4173 * if (IS_ERR(base))
4174 * return PTR_ERR(base);
4175 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4176 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4177 struct resource *res)
4178 {
4179 resource_size_t size;
4180 const char *name;
4181 void __iomem *dest_ptr;
4182
4183 BUG_ON(!dev);
4184
4185 if (!res || resource_type(res) != IORESOURCE_MEM) {
4186 dev_err(dev, "invalid resource\n");
4187 return IOMEM_ERR_PTR(-EINVAL);
4188 }
4189
4190 size = resource_size(res);
4191 name = res->name ?: dev_name(dev);
4192
4193 if (!devm_request_mem_region(dev, res->start, size, name)) {
4194 dev_err(dev, "can't request region for resource %pR\n", res);
4195 return IOMEM_ERR_PTR(-EBUSY);
4196 }
4197
4198 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4199 if (!dest_ptr) {
4200 dev_err(dev, "ioremap failed for resource %pR\n", res);
4201 devm_release_mem_region(dev, res->start, size);
4202 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4203 }
4204
4205 return dest_ptr;
4206 }
4207 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4208
__pci_set_master(struct pci_dev * dev,bool enable)4209 static void __pci_set_master(struct pci_dev *dev, bool enable)
4210 {
4211 u16 old_cmd, cmd;
4212
4213 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4214 if (enable)
4215 cmd = old_cmd | PCI_COMMAND_MASTER;
4216 else
4217 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4218 if (cmd != old_cmd) {
4219 pci_dbg(dev, "%s bus mastering\n",
4220 enable ? "enabling" : "disabling");
4221 pci_write_config_word(dev, PCI_COMMAND, cmd);
4222 }
4223 dev->is_busmaster = enable;
4224 }
4225
4226 /**
4227 * pcibios_setup - process "pci=" kernel boot arguments
4228 * @str: string used to pass in "pci=" kernel boot arguments
4229 *
4230 * Process kernel boot arguments. This is the default implementation.
4231 * Architecture specific implementations can override this as necessary.
4232 */
pcibios_setup(char * str)4233 char * __weak __init pcibios_setup(char *str)
4234 {
4235 return str;
4236 }
4237
4238 /**
4239 * pcibios_set_master - enable PCI bus-mastering for device dev
4240 * @dev: the PCI device to enable
4241 *
4242 * Enables PCI bus-mastering for the device. This is the default
4243 * implementation. Architecture specific implementations can override
4244 * this if necessary.
4245 */
pcibios_set_master(struct pci_dev * dev)4246 void __weak pcibios_set_master(struct pci_dev *dev)
4247 {
4248 u8 lat;
4249
4250 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4251 if (pci_is_pcie(dev))
4252 return;
4253
4254 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4255 if (lat < 16)
4256 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4257 else if (lat > pcibios_max_latency)
4258 lat = pcibios_max_latency;
4259 else
4260 return;
4261
4262 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4263 }
4264
4265 /**
4266 * pci_set_master - enables bus-mastering for device dev
4267 * @dev: the PCI device to enable
4268 *
4269 * Enables bus-mastering on the device and calls pcibios_set_master()
4270 * to do the needed arch specific settings.
4271 */
pci_set_master(struct pci_dev * dev)4272 void pci_set_master(struct pci_dev *dev)
4273 {
4274 __pci_set_master(dev, true);
4275 pcibios_set_master(dev);
4276 }
4277 EXPORT_SYMBOL(pci_set_master);
4278
4279 /**
4280 * pci_clear_master - disables bus-mastering for device dev
4281 * @dev: the PCI device to disable
4282 */
pci_clear_master(struct pci_dev * dev)4283 void pci_clear_master(struct pci_dev *dev)
4284 {
4285 __pci_set_master(dev, false);
4286 }
4287 EXPORT_SYMBOL(pci_clear_master);
4288
4289 /**
4290 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4291 * @dev: the PCI device for which MWI is to be enabled
4292 *
4293 * Helper function for pci_set_mwi.
4294 * Originally copied from drivers/net/acenic.c.
4295 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4296 *
4297 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4298 */
pci_set_cacheline_size(struct pci_dev * dev)4299 int pci_set_cacheline_size(struct pci_dev *dev)
4300 {
4301 u8 cacheline_size;
4302
4303 if (!pci_cache_line_size)
4304 return -EINVAL;
4305
4306 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4307 equal to or multiple of the right value. */
4308 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4309 if (cacheline_size >= pci_cache_line_size &&
4310 (cacheline_size % pci_cache_line_size) == 0)
4311 return 0;
4312
4313 /* Write the correct value. */
4314 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4315 /* Read it back. */
4316 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4317 if (cacheline_size == pci_cache_line_size)
4318 return 0;
4319
4320 pci_info(dev, "cache line size of %d is not supported\n",
4321 pci_cache_line_size << 2);
4322
4323 return -EINVAL;
4324 }
4325 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4326
4327 /**
4328 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4329 * @dev: the PCI device for which MWI is enabled
4330 *
4331 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4332 *
4333 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4334 */
pci_set_mwi(struct pci_dev * dev)4335 int pci_set_mwi(struct pci_dev *dev)
4336 {
4337 #ifdef PCI_DISABLE_MWI
4338 return 0;
4339 #else
4340 int rc;
4341 u16 cmd;
4342
4343 rc = pci_set_cacheline_size(dev);
4344 if (rc)
4345 return rc;
4346
4347 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4348 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4349 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4350 cmd |= PCI_COMMAND_INVALIDATE;
4351 pci_write_config_word(dev, PCI_COMMAND, cmd);
4352 }
4353 return 0;
4354 #endif
4355 }
4356 EXPORT_SYMBOL(pci_set_mwi);
4357
4358 /**
4359 * pcim_set_mwi - a device-managed pci_set_mwi()
4360 * @dev: the PCI device for which MWI is enabled
4361 *
4362 * Managed pci_set_mwi().
4363 *
4364 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4365 */
pcim_set_mwi(struct pci_dev * dev)4366 int pcim_set_mwi(struct pci_dev *dev)
4367 {
4368 struct pci_devres *dr;
4369
4370 dr = find_pci_dr(dev);
4371 if (!dr)
4372 return -ENOMEM;
4373
4374 dr->mwi = 1;
4375 return pci_set_mwi(dev);
4376 }
4377 EXPORT_SYMBOL(pcim_set_mwi);
4378
4379 /**
4380 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4381 * @dev: the PCI device for which MWI is enabled
4382 *
4383 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4384 * Callers are not required to check the return value.
4385 *
4386 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4387 */
pci_try_set_mwi(struct pci_dev * dev)4388 int pci_try_set_mwi(struct pci_dev *dev)
4389 {
4390 #ifdef PCI_DISABLE_MWI
4391 return 0;
4392 #else
4393 return pci_set_mwi(dev);
4394 #endif
4395 }
4396 EXPORT_SYMBOL(pci_try_set_mwi);
4397
4398 /**
4399 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4400 * @dev: the PCI device to disable
4401 *
4402 * Disables PCI Memory-Write-Invalidate transaction on the device
4403 */
pci_clear_mwi(struct pci_dev * dev)4404 void pci_clear_mwi(struct pci_dev *dev)
4405 {
4406 #ifndef PCI_DISABLE_MWI
4407 u16 cmd;
4408
4409 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4410 if (cmd & PCI_COMMAND_INVALIDATE) {
4411 cmd &= ~PCI_COMMAND_INVALIDATE;
4412 pci_write_config_word(dev, PCI_COMMAND, cmd);
4413 }
4414 #endif
4415 }
4416 EXPORT_SYMBOL(pci_clear_mwi);
4417
4418 /**
4419 * pci_intx - enables/disables PCI INTx for device dev
4420 * @pdev: the PCI device to operate on
4421 * @enable: boolean: whether to enable or disable PCI INTx
4422 *
4423 * Enables/disables PCI INTx for device @pdev
4424 */
pci_intx(struct pci_dev * pdev,int enable)4425 void pci_intx(struct pci_dev *pdev, int enable)
4426 {
4427 u16 pci_command, new;
4428
4429 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4430
4431 if (enable)
4432 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4433 else
4434 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4435
4436 if (new != pci_command) {
4437 struct pci_devres *dr;
4438
4439 pci_write_config_word(pdev, PCI_COMMAND, new);
4440
4441 dr = find_pci_dr(pdev);
4442 if (dr && !dr->restore_intx) {
4443 dr->restore_intx = 1;
4444 dr->orig_intx = !enable;
4445 }
4446 }
4447 }
4448 EXPORT_SYMBOL_GPL(pci_intx);
4449
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4450 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4451 {
4452 struct pci_bus *bus = dev->bus;
4453 bool mask_updated = true;
4454 u32 cmd_status_dword;
4455 u16 origcmd, newcmd;
4456 unsigned long flags;
4457 bool irq_pending;
4458
4459 /*
4460 * We do a single dword read to retrieve both command and status.
4461 * Document assumptions that make this possible.
4462 */
4463 BUILD_BUG_ON(PCI_COMMAND % 4);
4464 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4465
4466 raw_spin_lock_irqsave(&pci_lock, flags);
4467
4468 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4469
4470 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4471
4472 /*
4473 * Check interrupt status register to see whether our device
4474 * triggered the interrupt (when masking) or the next IRQ is
4475 * already pending (when unmasking).
4476 */
4477 if (mask != irq_pending) {
4478 mask_updated = false;
4479 goto done;
4480 }
4481
4482 origcmd = cmd_status_dword;
4483 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4484 if (mask)
4485 newcmd |= PCI_COMMAND_INTX_DISABLE;
4486 if (newcmd != origcmd)
4487 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4488
4489 done:
4490 raw_spin_unlock_irqrestore(&pci_lock, flags);
4491
4492 return mask_updated;
4493 }
4494
4495 /**
4496 * pci_check_and_mask_intx - mask INTx on pending interrupt
4497 * @dev: the PCI device to operate on
4498 *
4499 * Check if the device dev has its INTx line asserted, mask it and return
4500 * true in that case. False is returned if no interrupt was pending.
4501 */
pci_check_and_mask_intx(struct pci_dev * dev)4502 bool pci_check_and_mask_intx(struct pci_dev *dev)
4503 {
4504 return pci_check_and_set_intx_mask(dev, true);
4505 }
4506 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4507
4508 /**
4509 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4510 * @dev: the PCI device to operate on
4511 *
4512 * Check if the device dev has its INTx line asserted, unmask it if not and
4513 * return true. False is returned and the mask remains active if there was
4514 * still an interrupt pending.
4515 */
pci_check_and_unmask_intx(struct pci_dev * dev)4516 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4517 {
4518 return pci_check_and_set_intx_mask(dev, false);
4519 }
4520 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4521
4522 /**
4523 * pci_wait_for_pending_transaction - wait for pending transaction
4524 * @dev: the PCI device to operate on
4525 *
4526 * Return 0 if transaction is pending 1 otherwise.
4527 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4528 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4529 {
4530 if (!pci_is_pcie(dev))
4531 return 1;
4532
4533 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4534 PCI_EXP_DEVSTA_TRPND);
4535 }
4536 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4537
4538 /**
4539 * pcie_has_flr - check if a device supports function level resets
4540 * @dev: device to check
4541 *
4542 * Returns true if the device advertises support for PCIe function level
4543 * resets.
4544 */
pcie_has_flr(struct pci_dev * dev)4545 bool pcie_has_flr(struct pci_dev *dev)
4546 {
4547 u32 cap;
4548
4549 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4550 return false;
4551
4552 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4553 return cap & PCI_EXP_DEVCAP_FLR;
4554 }
4555 EXPORT_SYMBOL_GPL(pcie_has_flr);
4556
4557 /**
4558 * pcie_flr - initiate a PCIe function level reset
4559 * @dev: device to reset
4560 *
4561 * Initiate a function level reset on @dev. The caller should ensure the
4562 * device supports FLR before calling this function, e.g. by using the
4563 * pcie_has_flr() helper.
4564 */
pcie_flr(struct pci_dev * dev)4565 int pcie_flr(struct pci_dev *dev)
4566 {
4567 if (!pci_wait_for_pending_transaction(dev))
4568 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4569
4570 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4571
4572 if (dev->imm_ready)
4573 return 0;
4574
4575 /*
4576 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4577 * 100ms, but may silently discard requests while the FLR is in
4578 * progress. Wait 100ms before trying to access the device.
4579 */
4580 msleep(100);
4581
4582 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4583 }
4584 EXPORT_SYMBOL_GPL(pcie_flr);
4585
pci_af_flr(struct pci_dev * dev,int probe)4586 static int pci_af_flr(struct pci_dev *dev, int probe)
4587 {
4588 int pos;
4589 u8 cap;
4590
4591 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4592 if (!pos)
4593 return -ENOTTY;
4594
4595 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4596 return -ENOTTY;
4597
4598 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4599 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4600 return -ENOTTY;
4601
4602 if (probe)
4603 return 0;
4604
4605 /*
4606 * Wait for Transaction Pending bit to clear. A word-aligned test
4607 * is used, so we use the control offset rather than status and shift
4608 * the test bit to match.
4609 */
4610 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4611 PCI_AF_STATUS_TP << 8))
4612 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4613
4614 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4615
4616 if (dev->imm_ready)
4617 return 0;
4618
4619 /*
4620 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4621 * updated 27 July 2006; a device must complete an FLR within
4622 * 100ms, but may silently discard requests while the FLR is in
4623 * progress. Wait 100ms before trying to access the device.
4624 */
4625 msleep(100);
4626
4627 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4628 }
4629
4630 /**
4631 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4632 * @dev: Device to reset.
4633 * @probe: If set, only check if the device can be reset this way.
4634 *
4635 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4636 * unset, it will be reinitialized internally when going from PCI_D3hot to
4637 * PCI_D0. If that's the case and the device is not in a low-power state
4638 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4639 *
4640 * NOTE: This causes the caller to sleep for twice the device power transition
4641 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4642 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4643 * Moreover, only devices in D0 can be reset by this function.
4644 */
pci_pm_reset(struct pci_dev * dev,int probe)4645 static int pci_pm_reset(struct pci_dev *dev, int probe)
4646 {
4647 u16 csr;
4648
4649 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4650 return -ENOTTY;
4651
4652 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4653 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4654 return -ENOTTY;
4655
4656 if (probe)
4657 return 0;
4658
4659 if (dev->current_state != PCI_D0)
4660 return -EINVAL;
4661
4662 csr &= ~PCI_PM_CTRL_STATE_MASK;
4663 csr |= PCI_D3hot;
4664 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4665 pci_dev_d3_sleep(dev);
4666
4667 csr &= ~PCI_PM_CTRL_STATE_MASK;
4668 csr |= PCI_D0;
4669 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4670 pci_dev_d3_sleep(dev);
4671
4672 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4673 }
4674
4675 /**
4676 * pcie_wait_for_link_delay - Wait until link is active or inactive
4677 * @pdev: Bridge device
4678 * @active: waiting for active or inactive?
4679 * @delay: Delay to wait after link has become active (in ms)
4680 *
4681 * Use this to wait till link becomes active or inactive.
4682 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4683 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4684 int delay)
4685 {
4686 int timeout = 1000;
4687 bool ret;
4688 u16 lnk_status;
4689
4690 /*
4691 * Some controllers might not implement link active reporting. In this
4692 * case, we wait for 1000 ms + any delay requested by the caller.
4693 */
4694 if (!pdev->link_active_reporting) {
4695 msleep(timeout + delay);
4696 return true;
4697 }
4698
4699 /*
4700 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4701 * after which we should expect an link active if the reset was
4702 * successful. If so, software must wait a minimum 100ms before sending
4703 * configuration requests to devices downstream this port.
4704 *
4705 * If the link fails to activate, either the device was physically
4706 * removed or the link is permanently failed.
4707 */
4708 if (active)
4709 msleep(20);
4710 for (;;) {
4711 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4712 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4713 if (ret == active)
4714 break;
4715 if (timeout <= 0)
4716 break;
4717 msleep(10);
4718 timeout -= 10;
4719 }
4720 if (active && ret)
4721 msleep(delay);
4722
4723 return ret == active;
4724 }
4725
4726 /**
4727 * pcie_wait_for_link - Wait until link is active or inactive
4728 * @pdev: Bridge device
4729 * @active: waiting for active or inactive?
4730 *
4731 * Use this to wait till link becomes active or inactive.
4732 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4733 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4734 {
4735 return pcie_wait_for_link_delay(pdev, active, 100);
4736 }
4737
4738 /*
4739 * Find maximum D3cold delay required by all the devices on the bus. The
4740 * spec says 100 ms, but firmware can lower it and we allow drivers to
4741 * increase it as well.
4742 *
4743 * Called with @pci_bus_sem locked for reading.
4744 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4745 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4746 {
4747 const struct pci_dev *pdev;
4748 int min_delay = 100;
4749 int max_delay = 0;
4750
4751 list_for_each_entry(pdev, &bus->devices, bus_list) {
4752 if (pdev->d3cold_delay < min_delay)
4753 min_delay = pdev->d3cold_delay;
4754 if (pdev->d3cold_delay > max_delay)
4755 max_delay = pdev->d3cold_delay;
4756 }
4757
4758 return max(min_delay, max_delay);
4759 }
4760
4761 /**
4762 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4763 * @dev: PCI bridge
4764 *
4765 * Handle necessary delays before access to the devices on the secondary
4766 * side of the bridge are permitted after D3cold to D0 transition.
4767 *
4768 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4769 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4770 * 4.3.2.
4771 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev)4772 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4773 {
4774 struct pci_dev *child;
4775 int delay;
4776
4777 if (pci_dev_is_disconnected(dev))
4778 return;
4779
4780 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4781 return;
4782
4783 down_read(&pci_bus_sem);
4784
4785 /*
4786 * We only deal with devices that are present currently on the bus.
4787 * For any hot-added devices the access delay is handled in pciehp
4788 * board_added(). In case of ACPI hotplug the firmware is expected
4789 * to configure the devices before OS is notified.
4790 */
4791 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4792 up_read(&pci_bus_sem);
4793 return;
4794 }
4795
4796 /* Take d3cold_delay requirements into account */
4797 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4798 if (!delay) {
4799 up_read(&pci_bus_sem);
4800 return;
4801 }
4802
4803 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4804 bus_list);
4805 up_read(&pci_bus_sem);
4806
4807 /*
4808 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4809 * accessing the device after reset (that is 1000 ms + 100 ms). In
4810 * practice this should not be needed because we don't do power
4811 * management for them (see pci_bridge_d3_possible()).
4812 */
4813 if (!pci_is_pcie(dev)) {
4814 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4815 msleep(1000 + delay);
4816 return;
4817 }
4818
4819 /*
4820 * For PCIe downstream and root ports that do not support speeds
4821 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4822 * speeds (gen3) we need to wait first for the data link layer to
4823 * become active.
4824 *
4825 * However, 100 ms is the minimum and the PCIe spec says the
4826 * software must allow at least 1s before it can determine that the
4827 * device that did not respond is a broken device. There is
4828 * evidence that 100 ms is not always enough, for example certain
4829 * Titan Ridge xHCI controller does not always respond to
4830 * configuration requests if we only wait for 100 ms (see
4831 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4832 *
4833 * Therefore we wait for 100 ms and check for the device presence.
4834 * If it is still not present give it an additional 100 ms.
4835 */
4836 if (!pcie_downstream_port(dev))
4837 return;
4838
4839 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4840 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4841 msleep(delay);
4842 } else {
4843 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4844 delay);
4845 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4846 /* Did not train, no need to wait any further */
4847 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4848 return;
4849 }
4850 }
4851
4852 if (!pci_device_is_present(child)) {
4853 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4854 msleep(delay);
4855 }
4856 }
4857
pci_reset_secondary_bus(struct pci_dev * dev)4858 void pci_reset_secondary_bus(struct pci_dev *dev)
4859 {
4860 u16 ctrl;
4861
4862 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4863 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4864 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4865
4866 /*
4867 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4868 * this to 2ms to ensure that we meet the minimum requirement.
4869 */
4870 msleep(2);
4871
4872 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4873 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4874
4875 /*
4876 * Trhfa for conventional PCI is 2^25 clock cycles.
4877 * Assuming a minimum 33MHz clock this results in a 1s
4878 * delay before we can consider subordinate devices to
4879 * be re-initialized. PCIe has some ways to shorten this,
4880 * but we don't make use of them yet.
4881 */
4882 ssleep(1);
4883 }
4884
pcibios_reset_secondary_bus(struct pci_dev * dev)4885 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4886 {
4887 pci_reset_secondary_bus(dev);
4888 }
4889
4890 /**
4891 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4892 * @dev: Bridge device
4893 *
4894 * Use the bridge control register to assert reset on the secondary bus.
4895 * Devices on the secondary bus are left in power-on state.
4896 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4897 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4898 {
4899 pcibios_reset_secondary_bus(dev);
4900
4901 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4902 }
4903 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4904
pci_parent_bus_reset(struct pci_dev * dev,int probe)4905 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4906 {
4907 struct pci_dev *pdev;
4908
4909 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4910 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4911 return -ENOTTY;
4912
4913 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4914 if (pdev != dev)
4915 return -ENOTTY;
4916
4917 if (probe)
4918 return 0;
4919
4920 return pci_bridge_secondary_bus_reset(dev->bus->self);
4921 }
4922
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,int probe)4923 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4924 {
4925 int rc = -ENOTTY;
4926
4927 if (!hotplug || !try_module_get(hotplug->owner))
4928 return rc;
4929
4930 if (hotplug->ops->reset_slot)
4931 rc = hotplug->ops->reset_slot(hotplug, probe);
4932
4933 module_put(hotplug->owner);
4934
4935 return rc;
4936 }
4937
pci_dev_reset_slot_function(struct pci_dev * dev,int probe)4938 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4939 {
4940 if (dev->multifunction || dev->subordinate || !dev->slot ||
4941 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4942 return -ENOTTY;
4943
4944 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4945 }
4946
pci_dev_lock(struct pci_dev * dev)4947 static void pci_dev_lock(struct pci_dev *dev)
4948 {
4949 pci_cfg_access_lock(dev);
4950 /* block PM suspend, driver probe, etc. */
4951 device_lock(&dev->dev);
4952 }
4953
4954 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)4955 static int pci_dev_trylock(struct pci_dev *dev)
4956 {
4957 if (pci_cfg_access_trylock(dev)) {
4958 if (device_trylock(&dev->dev))
4959 return 1;
4960 pci_cfg_access_unlock(dev);
4961 }
4962
4963 return 0;
4964 }
4965
pci_dev_unlock(struct pci_dev * dev)4966 static void pci_dev_unlock(struct pci_dev *dev)
4967 {
4968 device_unlock(&dev->dev);
4969 pci_cfg_access_unlock(dev);
4970 }
4971
pci_dev_save_and_disable(struct pci_dev * dev)4972 static void pci_dev_save_and_disable(struct pci_dev *dev)
4973 {
4974 const struct pci_error_handlers *err_handler =
4975 dev->driver ? dev->driver->err_handler : NULL;
4976
4977 /*
4978 * dev->driver->err_handler->reset_prepare() is protected against
4979 * races with ->remove() by the device lock, which must be held by
4980 * the caller.
4981 */
4982 if (err_handler && err_handler->reset_prepare)
4983 err_handler->reset_prepare(dev);
4984
4985 /*
4986 * Wake-up device prior to save. PM registers default to D0 after
4987 * reset and a simple register restore doesn't reliably return
4988 * to a non-D0 state anyway.
4989 */
4990 pci_set_power_state(dev, PCI_D0);
4991
4992 pci_save_state(dev);
4993 /*
4994 * Disable the device by clearing the Command register, except for
4995 * INTx-disable which is set. This not only disables MMIO and I/O port
4996 * BARs, but also prevents the device from being Bus Master, preventing
4997 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4998 * compliant devices, INTx-disable prevents legacy interrupts.
4999 */
5000 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5001 }
5002
pci_dev_restore(struct pci_dev * dev)5003 static void pci_dev_restore(struct pci_dev *dev)
5004 {
5005 const struct pci_error_handlers *err_handler =
5006 dev->driver ? dev->driver->err_handler : NULL;
5007
5008 pci_restore_state(dev);
5009
5010 /*
5011 * dev->driver->err_handler->reset_done() is protected against
5012 * races with ->remove() by the device lock, which must be held by
5013 * the caller.
5014 */
5015 if (err_handler && err_handler->reset_done)
5016 err_handler->reset_done(dev);
5017 }
5018
5019 /**
5020 * __pci_reset_function_locked - reset a PCI device function while holding
5021 * the @dev mutex lock.
5022 * @dev: PCI device to reset
5023 *
5024 * Some devices allow an individual function to be reset without affecting
5025 * other functions in the same device. The PCI device must be responsive
5026 * to PCI config space in order to use this function.
5027 *
5028 * The device function is presumed to be unused and the caller is holding
5029 * the device mutex lock when this function is called.
5030 *
5031 * Resetting the device will make the contents of PCI configuration space
5032 * random, so any caller of this must be prepared to reinitialise the
5033 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5034 * etc.
5035 *
5036 * Returns 0 if the device function was successfully reset or negative if the
5037 * device doesn't support resetting a single function.
5038 */
__pci_reset_function_locked(struct pci_dev * dev)5039 int __pci_reset_function_locked(struct pci_dev *dev)
5040 {
5041 int rc;
5042
5043 might_sleep();
5044
5045 /*
5046 * A reset method returns -ENOTTY if it doesn't support this device
5047 * and we should try the next method.
5048 *
5049 * If it returns 0 (success), we're finished. If it returns any
5050 * other error, we're also finished: this indicates that further
5051 * reset mechanisms might be broken on the device.
5052 */
5053 rc = pci_dev_specific_reset(dev, 0);
5054 if (rc != -ENOTTY)
5055 return rc;
5056 if (pcie_has_flr(dev)) {
5057 rc = pcie_flr(dev);
5058 if (rc != -ENOTTY)
5059 return rc;
5060 }
5061 rc = pci_af_flr(dev, 0);
5062 if (rc != -ENOTTY)
5063 return rc;
5064 rc = pci_pm_reset(dev, 0);
5065 if (rc != -ENOTTY)
5066 return rc;
5067 rc = pci_dev_reset_slot_function(dev, 0);
5068 if (rc != -ENOTTY)
5069 return rc;
5070 return pci_parent_bus_reset(dev, 0);
5071 }
5072 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5073
5074 /**
5075 * pci_probe_reset_function - check whether the device can be safely reset
5076 * @dev: PCI device to reset
5077 *
5078 * Some devices allow an individual function to be reset without affecting
5079 * other functions in the same device. The PCI device must be responsive
5080 * to PCI config space in order to use this function.
5081 *
5082 * Returns 0 if the device function can be reset or negative if the
5083 * device doesn't support resetting a single function.
5084 */
pci_probe_reset_function(struct pci_dev * dev)5085 int pci_probe_reset_function(struct pci_dev *dev)
5086 {
5087 int rc;
5088
5089 might_sleep();
5090
5091 rc = pci_dev_specific_reset(dev, 1);
5092 if (rc != -ENOTTY)
5093 return rc;
5094 if (pcie_has_flr(dev))
5095 return 0;
5096 rc = pci_af_flr(dev, 1);
5097 if (rc != -ENOTTY)
5098 return rc;
5099 rc = pci_pm_reset(dev, 1);
5100 if (rc != -ENOTTY)
5101 return rc;
5102 rc = pci_dev_reset_slot_function(dev, 1);
5103 if (rc != -ENOTTY)
5104 return rc;
5105
5106 return pci_parent_bus_reset(dev, 1);
5107 }
5108
5109 /**
5110 * pci_reset_function - quiesce and reset a PCI device function
5111 * @dev: PCI device to reset
5112 *
5113 * Some devices allow an individual function to be reset without affecting
5114 * other functions in the same device. The PCI device must be responsive
5115 * to PCI config space in order to use this function.
5116 *
5117 * This function does not just reset the PCI portion of a device, but
5118 * clears all the state associated with the device. This function differs
5119 * from __pci_reset_function_locked() in that it saves and restores device state
5120 * over the reset and takes the PCI device lock.
5121 *
5122 * Returns 0 if the device function was successfully reset or negative if the
5123 * device doesn't support resetting a single function.
5124 */
pci_reset_function(struct pci_dev * dev)5125 int pci_reset_function(struct pci_dev *dev)
5126 {
5127 int rc;
5128
5129 if (!dev->reset_fn)
5130 return -ENOTTY;
5131
5132 pci_dev_lock(dev);
5133 pci_dev_save_and_disable(dev);
5134
5135 rc = __pci_reset_function_locked(dev);
5136
5137 pci_dev_restore(dev);
5138 pci_dev_unlock(dev);
5139
5140 return rc;
5141 }
5142 EXPORT_SYMBOL_GPL(pci_reset_function);
5143
5144 /**
5145 * pci_reset_function_locked - quiesce and reset a PCI device function
5146 * @dev: PCI device to reset
5147 *
5148 * Some devices allow an individual function to be reset without affecting
5149 * other functions in the same device. The PCI device must be responsive
5150 * to PCI config space in order to use this function.
5151 *
5152 * This function does not just reset the PCI portion of a device, but
5153 * clears all the state associated with the device. This function differs
5154 * from __pci_reset_function_locked() in that it saves and restores device state
5155 * over the reset. It also differs from pci_reset_function() in that it
5156 * requires the PCI device lock to be held.
5157 *
5158 * Returns 0 if the device function was successfully reset or negative if the
5159 * device doesn't support resetting a single function.
5160 */
pci_reset_function_locked(struct pci_dev * dev)5161 int pci_reset_function_locked(struct pci_dev *dev)
5162 {
5163 int rc;
5164
5165 if (!dev->reset_fn)
5166 return -ENOTTY;
5167
5168 pci_dev_save_and_disable(dev);
5169
5170 rc = __pci_reset_function_locked(dev);
5171
5172 pci_dev_restore(dev);
5173
5174 return rc;
5175 }
5176 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5177
5178 /**
5179 * pci_try_reset_function - quiesce and reset a PCI device function
5180 * @dev: PCI device to reset
5181 *
5182 * Same as above, except return -EAGAIN if unable to lock device.
5183 */
pci_try_reset_function(struct pci_dev * dev)5184 int pci_try_reset_function(struct pci_dev *dev)
5185 {
5186 int rc;
5187
5188 if (!dev->reset_fn)
5189 return -ENOTTY;
5190
5191 if (!pci_dev_trylock(dev))
5192 return -EAGAIN;
5193
5194 pci_dev_save_and_disable(dev);
5195 rc = __pci_reset_function_locked(dev);
5196 pci_dev_restore(dev);
5197 pci_dev_unlock(dev);
5198
5199 return rc;
5200 }
5201 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5202
5203 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5204 static bool pci_bus_resetable(struct pci_bus *bus)
5205 {
5206 struct pci_dev *dev;
5207
5208
5209 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5210 return false;
5211
5212 list_for_each_entry(dev, &bus->devices, bus_list) {
5213 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5214 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5215 return false;
5216 }
5217
5218 return true;
5219 }
5220
5221 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5222 static void pci_bus_lock(struct pci_bus *bus)
5223 {
5224 struct pci_dev *dev;
5225
5226 list_for_each_entry(dev, &bus->devices, bus_list) {
5227 pci_dev_lock(dev);
5228 if (dev->subordinate)
5229 pci_bus_lock(dev->subordinate);
5230 }
5231 }
5232
5233 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5234 static void pci_bus_unlock(struct pci_bus *bus)
5235 {
5236 struct pci_dev *dev;
5237
5238 list_for_each_entry(dev, &bus->devices, bus_list) {
5239 if (dev->subordinate)
5240 pci_bus_unlock(dev->subordinate);
5241 pci_dev_unlock(dev);
5242 }
5243 }
5244
5245 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5246 static int pci_bus_trylock(struct pci_bus *bus)
5247 {
5248 struct pci_dev *dev;
5249
5250 list_for_each_entry(dev, &bus->devices, bus_list) {
5251 if (!pci_dev_trylock(dev))
5252 goto unlock;
5253 if (dev->subordinate) {
5254 if (!pci_bus_trylock(dev->subordinate)) {
5255 pci_dev_unlock(dev);
5256 goto unlock;
5257 }
5258 }
5259 }
5260 return 1;
5261
5262 unlock:
5263 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5264 if (dev->subordinate)
5265 pci_bus_unlock(dev->subordinate);
5266 pci_dev_unlock(dev);
5267 }
5268 return 0;
5269 }
5270
5271 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5272 static bool pci_slot_resetable(struct pci_slot *slot)
5273 {
5274 struct pci_dev *dev;
5275
5276 if (slot->bus->self &&
5277 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5278 return false;
5279
5280 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5281 if (!dev->slot || dev->slot != slot)
5282 continue;
5283 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5284 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5285 return false;
5286 }
5287
5288 return true;
5289 }
5290
5291 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5292 static void pci_slot_lock(struct pci_slot *slot)
5293 {
5294 struct pci_dev *dev;
5295
5296 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5297 if (!dev->slot || dev->slot != slot)
5298 continue;
5299 pci_dev_lock(dev);
5300 if (dev->subordinate)
5301 pci_bus_lock(dev->subordinate);
5302 }
5303 }
5304
5305 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5306 static void pci_slot_unlock(struct pci_slot *slot)
5307 {
5308 struct pci_dev *dev;
5309
5310 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5311 if (!dev->slot || dev->slot != slot)
5312 continue;
5313 if (dev->subordinate)
5314 pci_bus_unlock(dev->subordinate);
5315 pci_dev_unlock(dev);
5316 }
5317 }
5318
5319 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5320 static int pci_slot_trylock(struct pci_slot *slot)
5321 {
5322 struct pci_dev *dev;
5323
5324 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5325 if (!dev->slot || dev->slot != slot)
5326 continue;
5327 if (!pci_dev_trylock(dev))
5328 goto unlock;
5329 if (dev->subordinate) {
5330 if (!pci_bus_trylock(dev->subordinate)) {
5331 pci_dev_unlock(dev);
5332 goto unlock;
5333 }
5334 }
5335 }
5336 return 1;
5337
5338 unlock:
5339 list_for_each_entry_continue_reverse(dev,
5340 &slot->bus->devices, bus_list) {
5341 if (!dev->slot || dev->slot != slot)
5342 continue;
5343 if (dev->subordinate)
5344 pci_bus_unlock(dev->subordinate);
5345 pci_dev_unlock(dev);
5346 }
5347 return 0;
5348 }
5349
5350 /*
5351 * Save and disable devices from the top of the tree down while holding
5352 * the @dev mutex lock for the entire tree.
5353 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5354 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5355 {
5356 struct pci_dev *dev;
5357
5358 list_for_each_entry(dev, &bus->devices, bus_list) {
5359 pci_dev_save_and_disable(dev);
5360 if (dev->subordinate)
5361 pci_bus_save_and_disable_locked(dev->subordinate);
5362 }
5363 }
5364
5365 /*
5366 * Restore devices from top of the tree down while holding @dev mutex lock
5367 * for the entire tree. Parent bridges need to be restored before we can
5368 * get to subordinate devices.
5369 */
pci_bus_restore_locked(struct pci_bus * bus)5370 static void pci_bus_restore_locked(struct pci_bus *bus)
5371 {
5372 struct pci_dev *dev;
5373
5374 list_for_each_entry(dev, &bus->devices, bus_list) {
5375 pci_dev_restore(dev);
5376 if (dev->subordinate)
5377 pci_bus_restore_locked(dev->subordinate);
5378 }
5379 }
5380
5381 /*
5382 * Save and disable devices from the top of the tree down while holding
5383 * the @dev mutex lock for the entire tree.
5384 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5385 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5386 {
5387 struct pci_dev *dev;
5388
5389 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5390 if (!dev->slot || dev->slot != slot)
5391 continue;
5392 pci_dev_save_and_disable(dev);
5393 if (dev->subordinate)
5394 pci_bus_save_and_disable_locked(dev->subordinate);
5395 }
5396 }
5397
5398 /*
5399 * Restore devices from top of the tree down while holding @dev mutex lock
5400 * for the entire tree. Parent bridges need to be restored before we can
5401 * get to subordinate devices.
5402 */
pci_slot_restore_locked(struct pci_slot * slot)5403 static void pci_slot_restore_locked(struct pci_slot *slot)
5404 {
5405 struct pci_dev *dev;
5406
5407 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5408 if (!dev->slot || dev->slot != slot)
5409 continue;
5410 pci_dev_restore(dev);
5411 if (dev->subordinate)
5412 pci_bus_restore_locked(dev->subordinate);
5413 }
5414 }
5415
pci_slot_reset(struct pci_slot * slot,int probe)5416 static int pci_slot_reset(struct pci_slot *slot, int probe)
5417 {
5418 int rc;
5419
5420 if (!slot || !pci_slot_resetable(slot))
5421 return -ENOTTY;
5422
5423 if (!probe)
5424 pci_slot_lock(slot);
5425
5426 might_sleep();
5427
5428 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5429
5430 if (!probe)
5431 pci_slot_unlock(slot);
5432
5433 return rc;
5434 }
5435
5436 /**
5437 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5438 * @slot: PCI slot to probe
5439 *
5440 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5441 */
pci_probe_reset_slot(struct pci_slot * slot)5442 int pci_probe_reset_slot(struct pci_slot *slot)
5443 {
5444 return pci_slot_reset(slot, 1);
5445 }
5446 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5447
5448 /**
5449 * __pci_reset_slot - Try to reset a PCI slot
5450 * @slot: PCI slot to reset
5451 *
5452 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5453 * independent of other slots. For instance, some slots may support slot power
5454 * control. In the case of a 1:1 bus to slot architecture, this function may
5455 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5456 * Generally a slot reset should be attempted before a bus reset. All of the
5457 * function of the slot and any subordinate buses behind the slot are reset
5458 * through this function. PCI config space of all devices in the slot and
5459 * behind the slot is saved before and restored after reset.
5460 *
5461 * Same as above except return -EAGAIN if the slot cannot be locked
5462 */
__pci_reset_slot(struct pci_slot * slot)5463 static int __pci_reset_slot(struct pci_slot *slot)
5464 {
5465 int rc;
5466
5467 rc = pci_slot_reset(slot, 1);
5468 if (rc)
5469 return rc;
5470
5471 if (pci_slot_trylock(slot)) {
5472 pci_slot_save_and_disable_locked(slot);
5473 might_sleep();
5474 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5475 pci_slot_restore_locked(slot);
5476 pci_slot_unlock(slot);
5477 } else
5478 rc = -EAGAIN;
5479
5480 return rc;
5481 }
5482
pci_bus_reset(struct pci_bus * bus,int probe)5483 static int pci_bus_reset(struct pci_bus *bus, int probe)
5484 {
5485 int ret;
5486
5487 if (!bus->self || !pci_bus_resetable(bus))
5488 return -ENOTTY;
5489
5490 if (probe)
5491 return 0;
5492
5493 pci_bus_lock(bus);
5494
5495 might_sleep();
5496
5497 ret = pci_bridge_secondary_bus_reset(bus->self);
5498
5499 pci_bus_unlock(bus);
5500
5501 return ret;
5502 }
5503
5504 /**
5505 * pci_bus_error_reset - reset the bridge's subordinate bus
5506 * @bridge: The parent device that connects to the bus to reset
5507 *
5508 * This function will first try to reset the slots on this bus if the method is
5509 * available. If slot reset fails or is not available, this will fall back to a
5510 * secondary bus reset.
5511 */
pci_bus_error_reset(struct pci_dev * bridge)5512 int pci_bus_error_reset(struct pci_dev *bridge)
5513 {
5514 struct pci_bus *bus = bridge->subordinate;
5515 struct pci_slot *slot;
5516
5517 if (!bus)
5518 return -ENOTTY;
5519
5520 mutex_lock(&pci_slot_mutex);
5521 if (list_empty(&bus->slots))
5522 goto bus_reset;
5523
5524 list_for_each_entry(slot, &bus->slots, list)
5525 if (pci_probe_reset_slot(slot))
5526 goto bus_reset;
5527
5528 list_for_each_entry(slot, &bus->slots, list)
5529 if (pci_slot_reset(slot, 0))
5530 goto bus_reset;
5531
5532 mutex_unlock(&pci_slot_mutex);
5533 return 0;
5534 bus_reset:
5535 mutex_unlock(&pci_slot_mutex);
5536 return pci_bus_reset(bridge->subordinate, 0);
5537 }
5538
5539 /**
5540 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5541 * @bus: PCI bus to probe
5542 *
5543 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5544 */
pci_probe_reset_bus(struct pci_bus * bus)5545 int pci_probe_reset_bus(struct pci_bus *bus)
5546 {
5547 return pci_bus_reset(bus, 1);
5548 }
5549 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5550
5551 /**
5552 * __pci_reset_bus - Try to reset a PCI bus
5553 * @bus: top level PCI bus to reset
5554 *
5555 * Same as above except return -EAGAIN if the bus cannot be locked
5556 */
__pci_reset_bus(struct pci_bus * bus)5557 static int __pci_reset_bus(struct pci_bus *bus)
5558 {
5559 int rc;
5560
5561 rc = pci_bus_reset(bus, 1);
5562 if (rc)
5563 return rc;
5564
5565 if (pci_bus_trylock(bus)) {
5566 pci_bus_save_and_disable_locked(bus);
5567 might_sleep();
5568 rc = pci_bridge_secondary_bus_reset(bus->self);
5569 pci_bus_restore_locked(bus);
5570 pci_bus_unlock(bus);
5571 } else
5572 rc = -EAGAIN;
5573
5574 return rc;
5575 }
5576
5577 /**
5578 * pci_reset_bus - Try to reset a PCI bus
5579 * @pdev: top level PCI device to reset via slot/bus
5580 *
5581 * Same as above except return -EAGAIN if the bus cannot be locked
5582 */
pci_reset_bus(struct pci_dev * pdev)5583 int pci_reset_bus(struct pci_dev *pdev)
5584 {
5585 return (!pci_probe_reset_slot(pdev->slot)) ?
5586 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5587 }
5588 EXPORT_SYMBOL_GPL(pci_reset_bus);
5589
5590 /**
5591 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5592 * @dev: PCI device to query
5593 *
5594 * Returns mmrbc: maximum designed memory read count in bytes or
5595 * appropriate error value.
5596 */
pcix_get_max_mmrbc(struct pci_dev * dev)5597 int pcix_get_max_mmrbc(struct pci_dev *dev)
5598 {
5599 int cap;
5600 u32 stat;
5601
5602 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5603 if (!cap)
5604 return -EINVAL;
5605
5606 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5607 return -EINVAL;
5608
5609 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5610 }
5611 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5612
5613 /**
5614 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5615 * @dev: PCI device to query
5616 *
5617 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5618 * value.
5619 */
pcix_get_mmrbc(struct pci_dev * dev)5620 int pcix_get_mmrbc(struct pci_dev *dev)
5621 {
5622 int cap;
5623 u16 cmd;
5624
5625 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5626 if (!cap)
5627 return -EINVAL;
5628
5629 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5630 return -EINVAL;
5631
5632 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5633 }
5634 EXPORT_SYMBOL(pcix_get_mmrbc);
5635
5636 /**
5637 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5638 * @dev: PCI device to query
5639 * @mmrbc: maximum memory read count in bytes
5640 * valid values are 512, 1024, 2048, 4096
5641 *
5642 * If possible sets maximum memory read byte count, some bridges have errata
5643 * that prevent this.
5644 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5645 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5646 {
5647 int cap;
5648 u32 stat, v, o;
5649 u16 cmd;
5650
5651 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5652 return -EINVAL;
5653
5654 v = ffs(mmrbc) - 10;
5655
5656 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5657 if (!cap)
5658 return -EINVAL;
5659
5660 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5661 return -EINVAL;
5662
5663 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5664 return -E2BIG;
5665
5666 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5667 return -EINVAL;
5668
5669 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5670 if (o != v) {
5671 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5672 return -EIO;
5673
5674 cmd &= ~PCI_X_CMD_MAX_READ;
5675 cmd |= v << 2;
5676 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5677 return -EIO;
5678 }
5679 return 0;
5680 }
5681 EXPORT_SYMBOL(pcix_set_mmrbc);
5682
5683 /**
5684 * pcie_get_readrq - get PCI Express read request size
5685 * @dev: PCI device to query
5686 *
5687 * Returns maximum memory read request in bytes or appropriate error value.
5688 */
pcie_get_readrq(struct pci_dev * dev)5689 int pcie_get_readrq(struct pci_dev *dev)
5690 {
5691 u16 ctl;
5692
5693 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5694
5695 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5696 }
5697 EXPORT_SYMBOL(pcie_get_readrq);
5698
5699 /**
5700 * pcie_set_readrq - set PCI Express maximum memory read request
5701 * @dev: PCI device to query
5702 * @rq: maximum memory read count in bytes
5703 * valid values are 128, 256, 512, 1024, 2048, 4096
5704 *
5705 * If possible sets maximum memory read request in bytes
5706 */
pcie_set_readrq(struct pci_dev * dev,int rq)5707 int pcie_set_readrq(struct pci_dev *dev, int rq)
5708 {
5709 u16 v;
5710 int ret;
5711
5712 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5713 return -EINVAL;
5714
5715 /*
5716 * If using the "performance" PCIe config, we clamp the read rq
5717 * size to the max packet size to keep the host bridge from
5718 * generating requests larger than we can cope with.
5719 */
5720 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5721 int mps = pcie_get_mps(dev);
5722
5723 if (mps < rq)
5724 rq = mps;
5725 }
5726
5727 v = (ffs(rq) - 8) << 12;
5728
5729 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5730 PCI_EXP_DEVCTL_READRQ, v);
5731
5732 return pcibios_err_to_errno(ret);
5733 }
5734 EXPORT_SYMBOL(pcie_set_readrq);
5735
5736 /**
5737 * pcie_get_mps - get PCI Express maximum payload size
5738 * @dev: PCI device to query
5739 *
5740 * Returns maximum payload size in bytes
5741 */
pcie_get_mps(struct pci_dev * dev)5742 int pcie_get_mps(struct pci_dev *dev)
5743 {
5744 u16 ctl;
5745
5746 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5747
5748 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5749 }
5750 EXPORT_SYMBOL(pcie_get_mps);
5751
5752 /**
5753 * pcie_set_mps - set PCI Express maximum payload size
5754 * @dev: PCI device to query
5755 * @mps: maximum payload size in bytes
5756 * valid values are 128, 256, 512, 1024, 2048, 4096
5757 *
5758 * If possible sets maximum payload size
5759 */
pcie_set_mps(struct pci_dev * dev,int mps)5760 int pcie_set_mps(struct pci_dev *dev, int mps)
5761 {
5762 u16 v;
5763 int ret;
5764
5765 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5766 return -EINVAL;
5767
5768 v = ffs(mps) - 8;
5769 if (v > dev->pcie_mpss)
5770 return -EINVAL;
5771 v <<= 5;
5772
5773 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5774 PCI_EXP_DEVCTL_PAYLOAD, v);
5775
5776 return pcibios_err_to_errno(ret);
5777 }
5778 EXPORT_SYMBOL(pcie_set_mps);
5779
5780 /**
5781 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5782 * device and its bandwidth limitation
5783 * @dev: PCI device to query
5784 * @limiting_dev: storage for device causing the bandwidth limitation
5785 * @speed: storage for speed of limiting device
5786 * @width: storage for width of limiting device
5787 *
5788 * Walk up the PCI device chain and find the point where the minimum
5789 * bandwidth is available. Return the bandwidth available there and (if
5790 * limiting_dev, speed, and width pointers are supplied) information about
5791 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
5792 * raw bandwidth.
5793 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5794 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5795 enum pci_bus_speed *speed,
5796 enum pcie_link_width *width)
5797 {
5798 u16 lnksta;
5799 enum pci_bus_speed next_speed;
5800 enum pcie_link_width next_width;
5801 u32 bw, next_bw;
5802
5803 if (speed)
5804 *speed = PCI_SPEED_UNKNOWN;
5805 if (width)
5806 *width = PCIE_LNK_WIDTH_UNKNOWN;
5807
5808 bw = 0;
5809
5810 while (dev) {
5811 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5812
5813 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5814 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5815 PCI_EXP_LNKSTA_NLW_SHIFT;
5816
5817 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5818
5819 /* Check if current device limits the total bandwidth */
5820 if (!bw || next_bw <= bw) {
5821 bw = next_bw;
5822
5823 if (limiting_dev)
5824 *limiting_dev = dev;
5825 if (speed)
5826 *speed = next_speed;
5827 if (width)
5828 *width = next_width;
5829 }
5830
5831 dev = pci_upstream_bridge(dev);
5832 }
5833
5834 return bw;
5835 }
5836 EXPORT_SYMBOL(pcie_bandwidth_available);
5837
5838 /**
5839 * pcie_get_speed_cap - query for the PCI device's link speed capability
5840 * @dev: PCI device to query
5841 *
5842 * Query the PCI device speed capability. Return the maximum link speed
5843 * supported by the device.
5844 */
pcie_get_speed_cap(struct pci_dev * dev)5845 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5846 {
5847 u32 lnkcap2, lnkcap;
5848
5849 /*
5850 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
5851 * implementation note there recommends using the Supported Link
5852 * Speeds Vector in Link Capabilities 2 when supported.
5853 *
5854 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5855 * should use the Supported Link Speeds field in Link Capabilities,
5856 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5857 */
5858 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5859
5860 /* PCIe r3.0-compliant */
5861 if (lnkcap2)
5862 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5863
5864 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5865 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5866 return PCIE_SPEED_5_0GT;
5867 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5868 return PCIE_SPEED_2_5GT;
5869
5870 return PCI_SPEED_UNKNOWN;
5871 }
5872 EXPORT_SYMBOL(pcie_get_speed_cap);
5873
5874 /**
5875 * pcie_get_width_cap - query for the PCI device's link width capability
5876 * @dev: PCI device to query
5877 *
5878 * Query the PCI device width capability. Return the maximum link width
5879 * supported by the device.
5880 */
pcie_get_width_cap(struct pci_dev * dev)5881 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5882 {
5883 u32 lnkcap;
5884
5885 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5886 if (lnkcap)
5887 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5888
5889 return PCIE_LNK_WIDTH_UNKNOWN;
5890 }
5891 EXPORT_SYMBOL(pcie_get_width_cap);
5892
5893 /**
5894 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5895 * @dev: PCI device
5896 * @speed: storage for link speed
5897 * @width: storage for link width
5898 *
5899 * Calculate a PCI device's link bandwidth by querying for its link speed
5900 * and width, multiplying them, and applying encoding overhead. The result
5901 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5902 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5903 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5904 enum pcie_link_width *width)
5905 {
5906 *speed = pcie_get_speed_cap(dev);
5907 *width = pcie_get_width_cap(dev);
5908
5909 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5910 return 0;
5911
5912 return *width * PCIE_SPEED2MBS_ENC(*speed);
5913 }
5914
5915 /**
5916 * __pcie_print_link_status - Report the PCI device's link speed and width
5917 * @dev: PCI device to query
5918 * @verbose: Print info even when enough bandwidth is available
5919 *
5920 * If the available bandwidth at the device is less than the device is
5921 * capable of, report the device's maximum possible bandwidth and the
5922 * upstream link that limits its performance. If @verbose, always print
5923 * the available bandwidth, even if the device isn't constrained.
5924 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)5925 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5926 {
5927 enum pcie_link_width width, width_cap;
5928 enum pci_bus_speed speed, speed_cap;
5929 struct pci_dev *limiting_dev = NULL;
5930 u32 bw_avail, bw_cap;
5931
5932 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5933 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5934
5935 if (bw_avail >= bw_cap && verbose)
5936 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5937 bw_cap / 1000, bw_cap % 1000,
5938 pci_speed_string(speed_cap), width_cap);
5939 else if (bw_avail < bw_cap)
5940 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5941 bw_avail / 1000, bw_avail % 1000,
5942 pci_speed_string(speed), width,
5943 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5944 bw_cap / 1000, bw_cap % 1000,
5945 pci_speed_string(speed_cap), width_cap);
5946 }
5947
5948 /**
5949 * pcie_print_link_status - Report the PCI device's link speed and width
5950 * @dev: PCI device to query
5951 *
5952 * Report the available bandwidth at the device.
5953 */
pcie_print_link_status(struct pci_dev * dev)5954 void pcie_print_link_status(struct pci_dev *dev)
5955 {
5956 __pcie_print_link_status(dev, true);
5957 }
5958 EXPORT_SYMBOL(pcie_print_link_status);
5959
5960 /**
5961 * pci_select_bars - Make BAR mask from the type of resource
5962 * @dev: the PCI device for which BAR mask is made
5963 * @flags: resource type mask to be selected
5964 *
5965 * This helper routine makes bar mask from the type of resource.
5966 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)5967 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5968 {
5969 int i, bars = 0;
5970 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5971 if (pci_resource_flags(dev, i) & flags)
5972 bars |= (1 << i);
5973 return bars;
5974 }
5975 EXPORT_SYMBOL(pci_select_bars);
5976
5977 /* Some architectures require additional programming to enable VGA */
5978 static arch_set_vga_state_t arch_set_vga_state;
5979
pci_register_set_vga_state(arch_set_vga_state_t func)5980 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5981 {
5982 arch_set_vga_state = func; /* NULL disables */
5983 }
5984
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)5985 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5986 unsigned int command_bits, u32 flags)
5987 {
5988 if (arch_set_vga_state)
5989 return arch_set_vga_state(dev, decode, command_bits,
5990 flags);
5991 return 0;
5992 }
5993
5994 /**
5995 * pci_set_vga_state - set VGA decode state on device and parents if requested
5996 * @dev: the PCI device
5997 * @decode: true = enable decoding, false = disable decoding
5998 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5999 * @flags: traverse ancestors and change bridges
6000 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6001 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6002 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6003 unsigned int command_bits, u32 flags)
6004 {
6005 struct pci_bus *bus;
6006 struct pci_dev *bridge;
6007 u16 cmd;
6008 int rc;
6009
6010 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6011
6012 /* ARCH specific VGA enables */
6013 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6014 if (rc)
6015 return rc;
6016
6017 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6018 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6019 if (decode)
6020 cmd |= command_bits;
6021 else
6022 cmd &= ~command_bits;
6023 pci_write_config_word(dev, PCI_COMMAND, cmd);
6024 }
6025
6026 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6027 return 0;
6028
6029 bus = dev->bus;
6030 while (bus) {
6031 bridge = bus->self;
6032 if (bridge) {
6033 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6034 &cmd);
6035 if (decode)
6036 cmd |= PCI_BRIDGE_CTL_VGA;
6037 else
6038 cmd &= ~PCI_BRIDGE_CTL_VGA;
6039 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6040 cmd);
6041 }
6042 bus = bus->parent;
6043 }
6044 return 0;
6045 }
6046
6047 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6048 bool pci_pr3_present(struct pci_dev *pdev)
6049 {
6050 struct acpi_device *adev;
6051
6052 if (acpi_disabled)
6053 return false;
6054
6055 adev = ACPI_COMPANION(&pdev->dev);
6056 if (!adev)
6057 return false;
6058
6059 return adev->power.flags.power_resources &&
6060 acpi_has_method(adev->handle, "_PR3");
6061 }
6062 EXPORT_SYMBOL_GPL(pci_pr3_present);
6063 #endif
6064
6065 /**
6066 * pci_add_dma_alias - Add a DMA devfn alias for a device
6067 * @dev: the PCI device for which alias is added
6068 * @devfn_from: alias slot and function
6069 * @nr_devfns: number of subsequent devfns to alias
6070 *
6071 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6072 * which is used to program permissible bus-devfn source addresses for DMA
6073 * requests in an IOMMU. These aliases factor into IOMMU group creation
6074 * and are useful for devices generating DMA requests beyond or different
6075 * from their logical bus-devfn. Examples include device quirks where the
6076 * device simply uses the wrong devfn, as well as non-transparent bridges
6077 * where the alias may be a proxy for devices in another domain.
6078 *
6079 * IOMMU group creation is performed during device discovery or addition,
6080 * prior to any potential DMA mapping and therefore prior to driver probing
6081 * (especially for userspace assigned devices where IOMMU group definition
6082 * cannot be left as a userspace activity). DMA aliases should therefore
6083 * be configured via quirks, such as the PCI fixup header quirk.
6084 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned nr_devfns)6085 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6086 {
6087 int devfn_to;
6088
6089 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6090 devfn_to = devfn_from + nr_devfns - 1;
6091
6092 if (!dev->dma_alias_mask)
6093 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6094 if (!dev->dma_alias_mask) {
6095 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6096 return;
6097 }
6098
6099 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6100
6101 if (nr_devfns == 1)
6102 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6103 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6104 else if (nr_devfns > 1)
6105 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6106 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6107 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6108 }
6109
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6110 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6111 {
6112 return (dev1->dma_alias_mask &&
6113 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6114 (dev2->dma_alias_mask &&
6115 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6116 pci_real_dma_dev(dev1) == dev2 ||
6117 pci_real_dma_dev(dev2) == dev1;
6118 }
6119
pci_device_is_present(struct pci_dev * pdev)6120 bool pci_device_is_present(struct pci_dev *pdev)
6121 {
6122 u32 v;
6123
6124 if (pci_dev_is_disconnected(pdev))
6125 return false;
6126 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6127 }
6128 EXPORT_SYMBOL_GPL(pci_device_is_present);
6129
pci_ignore_hotplug(struct pci_dev * dev)6130 void pci_ignore_hotplug(struct pci_dev *dev)
6131 {
6132 struct pci_dev *bridge = dev->bus->self;
6133
6134 dev->ignore_hotplug = 1;
6135 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6136 if (bridge)
6137 bridge->ignore_hotplug = 1;
6138 }
6139 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6140
6141 /**
6142 * pci_real_dma_dev - Get PCI DMA device for PCI device
6143 * @dev: the PCI device that may have a PCI DMA alias
6144 *
6145 * Permits the platform to provide architecture-specific functionality to
6146 * devices needing to alias DMA to another PCI device on another PCI bus. If
6147 * the PCI device is on the same bus, it is recommended to use
6148 * pci_add_dma_alias(). This is the default implementation. Architecture
6149 * implementations can override this.
6150 */
pci_real_dma_dev(struct pci_dev * dev)6151 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6152 {
6153 return dev;
6154 }
6155
pcibios_default_alignment(void)6156 resource_size_t __weak pcibios_default_alignment(void)
6157 {
6158 return 0;
6159 }
6160
6161 /*
6162 * Arches that don't want to expose struct resource to userland as-is in
6163 * sysfs and /proc can implement their own pci_resource_to_user().
6164 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6165 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6166 const struct resource *rsrc,
6167 resource_size_t *start, resource_size_t *end)
6168 {
6169 *start = rsrc->start;
6170 *end = rsrc->end;
6171 }
6172
6173 static char *resource_alignment_param;
6174 static DEFINE_SPINLOCK(resource_alignment_lock);
6175
6176 /**
6177 * pci_specified_resource_alignment - get resource alignment specified by user.
6178 * @dev: the PCI device to get
6179 * @resize: whether or not to change resources' size when reassigning alignment
6180 *
6181 * RETURNS: Resource alignment if it is specified.
6182 * Zero if it is not specified.
6183 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6184 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6185 bool *resize)
6186 {
6187 int align_order, count;
6188 resource_size_t align = pcibios_default_alignment();
6189 const char *p;
6190 int ret;
6191
6192 spin_lock(&resource_alignment_lock);
6193 p = resource_alignment_param;
6194 if (!p || !*p)
6195 goto out;
6196 if (pci_has_flag(PCI_PROBE_ONLY)) {
6197 align = 0;
6198 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6199 goto out;
6200 }
6201
6202 while (*p) {
6203 count = 0;
6204 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6205 p[count] == '@') {
6206 p += count + 1;
6207 } else {
6208 align_order = -1;
6209 }
6210
6211 ret = pci_dev_str_match(dev, p, &p);
6212 if (ret == 1) {
6213 *resize = true;
6214 if (align_order == -1)
6215 align = PAGE_SIZE;
6216 else
6217 align = 1 << align_order;
6218 break;
6219 } else if (ret < 0) {
6220 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6221 p);
6222 break;
6223 }
6224
6225 if (*p != ';' && *p != ',') {
6226 /* End of param or invalid format */
6227 break;
6228 }
6229 p++;
6230 }
6231 out:
6232 spin_unlock(&resource_alignment_lock);
6233 return align;
6234 }
6235
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6236 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6237 resource_size_t align, bool resize)
6238 {
6239 struct resource *r = &dev->resource[bar];
6240 resource_size_t size;
6241
6242 if (!(r->flags & IORESOURCE_MEM))
6243 return;
6244
6245 if (r->flags & IORESOURCE_PCI_FIXED) {
6246 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6247 bar, r, (unsigned long long)align);
6248 return;
6249 }
6250
6251 size = resource_size(r);
6252 if (size >= align)
6253 return;
6254
6255 /*
6256 * Increase the alignment of the resource. There are two ways we
6257 * can do this:
6258 *
6259 * 1) Increase the size of the resource. BARs are aligned on their
6260 * size, so when we reallocate space for this resource, we'll
6261 * allocate it with the larger alignment. This also prevents
6262 * assignment of any other BARs inside the alignment region, so
6263 * if we're requesting page alignment, this means no other BARs
6264 * will share the page.
6265 *
6266 * The disadvantage is that this makes the resource larger than
6267 * the hardware BAR, which may break drivers that compute things
6268 * based on the resource size, e.g., to find registers at a
6269 * fixed offset before the end of the BAR.
6270 *
6271 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6272 * set r->start to the desired alignment. By itself this
6273 * doesn't prevent other BARs being put inside the alignment
6274 * region, but if we realign *every* resource of every device in
6275 * the system, none of them will share an alignment region.
6276 *
6277 * When the user has requested alignment for only some devices via
6278 * the "pci=resource_alignment" argument, "resize" is true and we
6279 * use the first method. Otherwise we assume we're aligning all
6280 * devices and we use the second.
6281 */
6282
6283 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6284 bar, r, (unsigned long long)align);
6285
6286 if (resize) {
6287 r->start = 0;
6288 r->end = align - 1;
6289 } else {
6290 r->flags &= ~IORESOURCE_SIZEALIGN;
6291 r->flags |= IORESOURCE_STARTALIGN;
6292 r->start = align;
6293 r->end = r->start + size - 1;
6294 }
6295 r->flags |= IORESOURCE_UNSET;
6296 }
6297
6298 /*
6299 * This function disables memory decoding and releases memory resources
6300 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6301 * It also rounds up size to specified alignment.
6302 * Later on, the kernel will assign page-aligned memory resource back
6303 * to the device.
6304 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6305 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6306 {
6307 int i;
6308 struct resource *r;
6309 resource_size_t align;
6310 u16 command;
6311 bool resize = false;
6312
6313 /*
6314 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6315 * 3.4.1.11. Their resources are allocated from the space
6316 * described by the VF BARx register in the PF's SR-IOV capability.
6317 * We can't influence their alignment here.
6318 */
6319 if (dev->is_virtfn)
6320 return;
6321
6322 /* check if specified PCI is target device to reassign */
6323 align = pci_specified_resource_alignment(dev, &resize);
6324 if (!align)
6325 return;
6326
6327 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6328 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6329 pci_warn(dev, "Can't reassign resources to host bridge\n");
6330 return;
6331 }
6332
6333 pci_read_config_word(dev, PCI_COMMAND, &command);
6334 command &= ~PCI_COMMAND_MEMORY;
6335 pci_write_config_word(dev, PCI_COMMAND, command);
6336
6337 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6338 pci_request_resource_alignment(dev, i, align, resize);
6339
6340 /*
6341 * Need to disable bridge's resource window,
6342 * to enable the kernel to reassign new resource
6343 * window later on.
6344 */
6345 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6346 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6347 r = &dev->resource[i];
6348 if (!(r->flags & IORESOURCE_MEM))
6349 continue;
6350 r->flags |= IORESOURCE_UNSET;
6351 r->end = resource_size(r) - 1;
6352 r->start = 0;
6353 }
6354 pci_disable_bridge_window(dev);
6355 }
6356 }
6357
resource_alignment_show(struct bus_type * bus,char * buf)6358 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6359 {
6360 size_t count = 0;
6361
6362 spin_lock(&resource_alignment_lock);
6363 if (resource_alignment_param)
6364 count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6365 spin_unlock(&resource_alignment_lock);
6366
6367 /*
6368 * When set by the command line, resource_alignment_param will not
6369 * have a trailing line feed, which is ugly. So conditionally add
6370 * it here.
6371 */
6372 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6373 buf[count - 1] = '\n';
6374 buf[count++] = 0;
6375 }
6376
6377 return count;
6378 }
6379
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6380 static ssize_t resource_alignment_store(struct bus_type *bus,
6381 const char *buf, size_t count)
6382 {
6383 char *param = kstrndup(buf, count, GFP_KERNEL);
6384
6385 if (!param)
6386 return -ENOMEM;
6387
6388 spin_lock(&resource_alignment_lock);
6389 kfree(resource_alignment_param);
6390 resource_alignment_param = param;
6391 spin_unlock(&resource_alignment_lock);
6392 return count;
6393 }
6394
6395 static BUS_ATTR_RW(resource_alignment);
6396
pci_resource_alignment_sysfs_init(void)6397 static int __init pci_resource_alignment_sysfs_init(void)
6398 {
6399 return bus_create_file(&pci_bus_type,
6400 &bus_attr_resource_alignment);
6401 }
6402 late_initcall(pci_resource_alignment_sysfs_init);
6403
pci_no_domains(void)6404 static void pci_no_domains(void)
6405 {
6406 #ifdef CONFIG_PCI_DOMAINS
6407 pci_domains_supported = 0;
6408 #endif
6409 }
6410
6411 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6412 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6413
pci_get_new_domain_nr(void)6414 static int pci_get_new_domain_nr(void)
6415 {
6416 return atomic_inc_return(&__domain_nr);
6417 }
6418
of_pci_bus_find_domain_nr(struct device * parent)6419 static int of_pci_bus_find_domain_nr(struct device *parent)
6420 {
6421 static int use_dt_domains = -1;
6422 int domain = -1;
6423
6424 if (parent)
6425 domain = of_get_pci_domain_nr(parent->of_node);
6426
6427 /*
6428 * Check DT domain and use_dt_domains values.
6429 *
6430 * If DT domain property is valid (domain >= 0) and
6431 * use_dt_domains != 0, the DT assignment is valid since this means
6432 * we have not previously allocated a domain number by using
6433 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6434 * 1, to indicate that we have just assigned a domain number from
6435 * DT.
6436 *
6437 * If DT domain property value is not valid (ie domain < 0), and we
6438 * have not previously assigned a domain number from DT
6439 * (use_dt_domains != 1) we should assign a domain number by
6440 * using the:
6441 *
6442 * pci_get_new_domain_nr()
6443 *
6444 * API and update the use_dt_domains value to keep track of method we
6445 * are using to assign domain numbers (use_dt_domains = 0).
6446 *
6447 * All other combinations imply we have a platform that is trying
6448 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6449 * which is a recipe for domain mishandling and it is prevented by
6450 * invalidating the domain value (domain = -1) and printing a
6451 * corresponding error.
6452 */
6453 if (domain >= 0 && use_dt_domains) {
6454 use_dt_domains = 1;
6455 } else if (domain < 0 && use_dt_domains != 1) {
6456 use_dt_domains = 0;
6457 domain = pci_get_new_domain_nr();
6458 } else {
6459 if (parent)
6460 pr_err("Node %pOF has ", parent->of_node);
6461 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6462 domain = -1;
6463 }
6464
6465 return domain;
6466 }
6467
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6468 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6469 {
6470 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6471 acpi_pci_bus_find_domain_nr(bus);
6472 }
6473 #endif
6474
6475 /**
6476 * pci_ext_cfg_avail - can we access extended PCI config space?
6477 *
6478 * Returns 1 if we can access PCI extended config space (offsets
6479 * greater than 0xff). This is the default implementation. Architecture
6480 * implementations can override this.
6481 */
pci_ext_cfg_avail(void)6482 int __weak pci_ext_cfg_avail(void)
6483 {
6484 return 1;
6485 }
6486
pci_fixup_cardbus(struct pci_bus * bus)6487 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6488 {
6489 }
6490 EXPORT_SYMBOL(pci_fixup_cardbus);
6491
pci_setup(char * str)6492 static int __init pci_setup(char *str)
6493 {
6494 while (str) {
6495 char *k = strchr(str, ',');
6496 if (k)
6497 *k++ = 0;
6498 if (*str && (str = pcibios_setup(str)) && *str) {
6499 if (!strcmp(str, "nomsi")) {
6500 pci_no_msi();
6501 } else if (!strncmp(str, "noats", 5)) {
6502 pr_info("PCIe: ATS is disabled\n");
6503 pcie_ats_disabled = true;
6504 } else if (!strcmp(str, "noaer")) {
6505 pci_no_aer();
6506 } else if (!strcmp(str, "earlydump")) {
6507 pci_early_dump = true;
6508 } else if (!strncmp(str, "realloc=", 8)) {
6509 pci_realloc_get_opt(str + 8);
6510 } else if (!strncmp(str, "realloc", 7)) {
6511 pci_realloc_get_opt("on");
6512 } else if (!strcmp(str, "nodomains")) {
6513 pci_no_domains();
6514 } else if (!strncmp(str, "noari", 5)) {
6515 pcie_ari_disabled = true;
6516 } else if (!strncmp(str, "cbiosize=", 9)) {
6517 pci_cardbus_io_size = memparse(str + 9, &str);
6518 } else if (!strncmp(str, "cbmemsize=", 10)) {
6519 pci_cardbus_mem_size = memparse(str + 10, &str);
6520 } else if (!strncmp(str, "resource_alignment=", 19)) {
6521 resource_alignment_param = str + 19;
6522 } else if (!strncmp(str, "ecrc=", 5)) {
6523 pcie_ecrc_get_policy(str + 5);
6524 } else if (!strncmp(str, "hpiosize=", 9)) {
6525 pci_hotplug_io_size = memparse(str + 9, &str);
6526 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6527 pci_hotplug_mmio_size = memparse(str + 11, &str);
6528 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6529 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6530 } else if (!strncmp(str, "hpmemsize=", 10)) {
6531 pci_hotplug_mmio_size = memparse(str + 10, &str);
6532 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6533 } else if (!strncmp(str, "hpbussize=", 10)) {
6534 pci_hotplug_bus_size =
6535 simple_strtoul(str + 10, &str, 0);
6536 if (pci_hotplug_bus_size > 0xff)
6537 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6538 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6539 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6540 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6541 pcie_bus_config = PCIE_BUS_SAFE;
6542 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6543 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6544 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6545 pcie_bus_config = PCIE_BUS_PEER2PEER;
6546 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6547 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6548 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6549 disable_acs_redir_param = str + 18;
6550 } else {
6551 pr_err("PCI: Unknown option `%s'\n", str);
6552 }
6553 }
6554 str = k;
6555 }
6556 return 0;
6557 }
6558 early_param("pci", pci_setup);
6559
6560 /*
6561 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6562 * in pci_setup(), above, to point to data in the __initdata section which
6563 * will be freed after the init sequence is complete. We can't allocate memory
6564 * in pci_setup() because some architectures do not have any memory allocation
6565 * service available during an early_param() call. So we allocate memory and
6566 * copy the variable here before the init section is freed.
6567 *
6568 */
pci_realloc_setup_params(void)6569 static int __init pci_realloc_setup_params(void)
6570 {
6571 resource_alignment_param = kstrdup(resource_alignment_param,
6572 GFP_KERNEL);
6573 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6574
6575 return 0;
6576 }
6577 pure_initcall(pci_realloc_setup_params);
6578