1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <uapi/linux/pci.h>
42
43 #include <linux/pci_ids.h>
44
45 /*
46 * The PCI interface treats multi-function devices as independent
47 * devices. The slot/function address of each device is encoded
48 * in a single byte as follows:
49 *
50 * 7:3 = slot
51 * 2:0 = function
52 *
53 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
54 * In the interest of not exposing interfaces to user-space unnecessarily,
55 * the following kernel-only defines are being added here.
56 */
57 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
58 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
59 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
60
61 /* pci_slot represents a physical slot */
62 struct pci_slot {
63 struct pci_bus *bus; /* Bus this slot is on */
64 struct list_head list; /* Node in list of slots */
65 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
66 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
67 struct kobject kobj;
68 };
69
pci_slot_name(const struct pci_slot * slot)70 static inline const char *pci_slot_name(const struct pci_slot *slot)
71 {
72 return kobject_name(&slot->kobj);
73 }
74
75 /* File state for mmap()s on /proc/bus/pci/X/Y */
76 enum pci_mmap_state {
77 pci_mmap_io,
78 pci_mmap_mem
79 };
80
81 /* For PCI devices, the region numbers are assigned this way: */
82 enum {
83 /* #0-5: standard PCI resources */
84 PCI_STD_RESOURCES,
85 PCI_STD_RESOURCE_END = 5,
86
87 /* #6: expansion ROM resource */
88 PCI_ROM_RESOURCE,
89
90 /* Device-specific resources */
91 #ifdef CONFIG_PCI_IOV
92 PCI_IOV_RESOURCES,
93 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
94 #endif
95
96 /* Resources assigned to buses behind the bridge */
97 #define PCI_BRIDGE_RESOURCE_NUM 4
98
99 PCI_BRIDGE_RESOURCES,
100 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
101 PCI_BRIDGE_RESOURCE_NUM - 1,
102
103 /* Total resources associated with a PCI device */
104 PCI_NUM_RESOURCES,
105
106 /* Preserve this for compatibility */
107 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
108 };
109
110 /**
111 * enum pci_interrupt_pin - PCI INTx interrupt values
112 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
113 * @PCI_INTERRUPT_INTA: PCI INTA pin
114 * @PCI_INTERRUPT_INTB: PCI INTB pin
115 * @PCI_INTERRUPT_INTC: PCI INTC pin
116 * @PCI_INTERRUPT_INTD: PCI INTD pin
117 *
118 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
119 * PCI_INTERRUPT_PIN register.
120 */
121 enum pci_interrupt_pin {
122 PCI_INTERRUPT_UNKNOWN,
123 PCI_INTERRUPT_INTA,
124 PCI_INTERRUPT_INTB,
125 PCI_INTERRUPT_INTC,
126 PCI_INTERRUPT_INTD,
127 };
128
129 /* The number of legacy PCI INTx interrupts */
130 #define PCI_NUM_INTX 4
131
132 /*
133 * pci_power_t values must match the bits in the Capabilities PME_Support
134 * and Control/Status PowerState fields in the Power Management capability.
135 */
136 typedef int __bitwise pci_power_t;
137
138 #define PCI_D0 ((pci_power_t __force) 0)
139 #define PCI_D1 ((pci_power_t __force) 1)
140 #define PCI_D2 ((pci_power_t __force) 2)
141 #define PCI_D3hot ((pci_power_t __force) 3)
142 #define PCI_D3cold ((pci_power_t __force) 4)
143 #define PCI_UNKNOWN ((pci_power_t __force) 5)
144 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
145
146 /* Remember to update this when the list above changes! */
147 extern const char *pci_power_names[];
148
pci_power_name(pci_power_t state)149 static inline const char *pci_power_name(pci_power_t state)
150 {
151 return pci_power_names[1 + (__force int) state];
152 }
153
154 /**
155 * typedef pci_channel_state_t
156 *
157 * The pci_channel state describes connectivity between the CPU and
158 * the PCI device. If some PCI bus between here and the PCI device
159 * has crashed or locked up, this info is reflected here.
160 */
161 typedef unsigned int __bitwise pci_channel_state_t;
162
163 enum pci_channel_state {
164 /* I/O channel is in normal state */
165 pci_channel_io_normal = (__force pci_channel_state_t) 1,
166
167 /* I/O to channel is blocked */
168 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
169
170 /* PCI card is dead */
171 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
172 };
173
174 typedef unsigned int __bitwise pcie_reset_state_t;
175
176 enum pcie_reset_state {
177 /* Reset is NOT asserted (Use to deassert reset) */
178 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
179
180 /* Use #PERST to reset PCIe device */
181 pcie_warm_reset = (__force pcie_reset_state_t) 2,
182
183 /* Use PCIe Hot Reset to reset device */
184 pcie_hot_reset = (__force pcie_reset_state_t) 3
185 };
186
187 typedef unsigned short __bitwise pci_dev_flags_t;
188 enum pci_dev_flags {
189 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
190 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
191 /* Device configuration is irrevocably lost if disabled into D3 */
192 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
193 /* Provide indication device is assigned by a Virtual Machine Manager */
194 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
195 /* Flag for quirk use to store if quirk-specific ACS is enabled */
196 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
197 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
198 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
199 /* Do not use bus resets for device */
200 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
201 /* Do not use PM reset even if device advertises NoSoftRst- */
202 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
203 /* Get VPD from function 0 VPD */
204 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
205 /* A non-root bridge where translation occurs, stop alias search here */
206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
207 /* Do not use FLR even if device advertises PCI_AF_CAP */
208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
209 /* Don't use Relaxed Ordering for TLPs directed at this device */
210 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
211 };
212
213 enum pci_irq_reroute_variant {
214 INTEL_IRQ_REROUTE_VARIANT = 1,
215 MAX_IRQ_REROUTE_VARIANTS = 3
216 };
217
218 typedef unsigned short __bitwise pci_bus_flags_t;
219 enum pci_bus_flags {
220 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
221 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
222 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
223 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
224 };
225
226 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
227 enum pcie_link_width {
228 PCIE_LNK_WIDTH_RESRV = 0x00,
229 PCIE_LNK_X1 = 0x01,
230 PCIE_LNK_X2 = 0x02,
231 PCIE_LNK_X4 = 0x04,
232 PCIE_LNK_X8 = 0x08,
233 PCIE_LNK_X12 = 0x0c,
234 PCIE_LNK_X16 = 0x10,
235 PCIE_LNK_X32 = 0x20,
236 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
237 };
238
239 /* Based on the PCI Hotplug Spec, but some values are made up by us */
240 enum pci_bus_speed {
241 PCI_SPEED_33MHz = 0x00,
242 PCI_SPEED_66MHz = 0x01,
243 PCI_SPEED_66MHz_PCIX = 0x02,
244 PCI_SPEED_100MHz_PCIX = 0x03,
245 PCI_SPEED_133MHz_PCIX = 0x04,
246 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
247 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
248 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
249 PCI_SPEED_66MHz_PCIX_266 = 0x09,
250 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
251 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
252 AGP_UNKNOWN = 0x0c,
253 AGP_1X = 0x0d,
254 AGP_2X = 0x0e,
255 AGP_4X = 0x0f,
256 AGP_8X = 0x10,
257 PCI_SPEED_66MHz_PCIX_533 = 0x11,
258 PCI_SPEED_100MHz_PCIX_533 = 0x12,
259 PCI_SPEED_133MHz_PCIX_533 = 0x13,
260 PCIE_SPEED_2_5GT = 0x14,
261 PCIE_SPEED_5_0GT = 0x15,
262 PCIE_SPEED_8_0GT = 0x16,
263 PCIE_SPEED_16_0GT = 0x17,
264 PCIE_SPEED_32_0GT = 0x18,
265 PCI_SPEED_UNKNOWN = 0xff,
266 };
267
268 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
269 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
270
271 struct pci_cap_saved_data {
272 u16 cap_nr;
273 bool cap_extended;
274 unsigned int size;
275 u32 data[0];
276 };
277
278 struct pci_cap_saved_state {
279 struct hlist_node next;
280 struct pci_cap_saved_data cap;
281 };
282
283 struct irq_affinity;
284 struct pcie_link_state;
285 struct pci_vpd;
286 struct pci_sriov;
287 struct pci_ats;
288 struct pci_p2pdma;
289
290 /* The pci_dev structure describes PCI devices */
291 struct pci_dev {
292 struct list_head bus_list; /* Node in per-bus list */
293 struct pci_bus *bus; /* Bus this device is on */
294 struct pci_bus *subordinate; /* Bus this device bridges to */
295
296 void *sysdata; /* Hook for sys-specific extension */
297 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
298 struct pci_slot *slot; /* Physical slot this device is in */
299
300 unsigned int devfn; /* Encoded device & function index */
301 unsigned short vendor;
302 unsigned short device;
303 unsigned short subsystem_vendor;
304 unsigned short subsystem_device;
305 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
306 u8 revision; /* PCI revision, low byte of class word */
307 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
308 #ifdef CONFIG_PCIEAER
309 u16 aer_cap; /* AER capability offset */
310 struct aer_stats *aer_stats; /* AER stats for this device */
311 #endif
312 u8 pcie_cap; /* PCIe capability offset */
313 u8 msi_cap; /* MSI capability offset */
314 u8 msix_cap; /* MSI-X capability offset */
315 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
316 u8 rom_base_reg; /* Config register controlling ROM */
317 u8 pin; /* Interrupt pin this device uses */
318 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
319 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
320
321 struct pci_driver *driver; /* Driver bound to this device */
322 u64 dma_mask; /* Mask of the bits of bus address this
323 device implements. Normally this is
324 0xffffffff. You only need to change
325 this if your device has broken DMA
326 or supports 64-bit transfers. */
327
328 struct device_dma_parameters dma_parms;
329
330 pci_power_t current_state; /* Current operating state. In ACPI,
331 this is D0-D3, D0 being fully
332 functional, and D3 being off. */
333 unsigned int imm_ready:1; /* Supports Immediate Readiness */
334 u8 pm_cap; /* PM capability offset */
335 unsigned int pme_support:5; /* Bitmask of states from which PME#
336 can be generated */
337 unsigned int pme_poll:1; /* Poll device's PME status bit */
338 unsigned int d1_support:1; /* Low power state D1 is supported */
339 unsigned int d2_support:1; /* Low power state D2 is supported */
340 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
341 unsigned int no_d3cold:1; /* D3cold is forbidden */
342 unsigned int bridge_d3:1; /* Allow D3 for bridge */
343 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
344 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
345 decoding during BAR sizing */
346 unsigned int wakeup_prepared:1;
347 unsigned int runtime_d3cold:1; /* Whether go through runtime
348 D3cold, not set for devices
349 powered on/off by the
350 corresponding bridge */
351 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
352 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
353 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
354 controlled exclusively by
355 user sysfs */
356 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
357 bit manually */
358 unsigned int d3_delay; /* D3->D0 transition time in ms */
359 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
360
361 #ifdef CONFIG_PCIEASPM
362 struct pcie_link_state *link_state; /* ASPM link state */
363 unsigned int ltr_path:1; /* Latency Tolerance Reporting
364 supported from root to here */
365 #endif
366 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
367
368 pci_channel_state_t error_state; /* Current connectivity state */
369 struct device dev; /* Generic device interface */
370
371 int cfg_size; /* Size of config space */
372
373 /*
374 * Instead of touching interrupt line and base address registers
375 * directly, use the values stored here. They might be different!
376 */
377 unsigned int irq;
378 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
379
380 bool match_driver; /* Skip attaching driver */
381
382 unsigned int transparent:1; /* Subtractive decode bridge */
383 unsigned int io_window:1; /* Bridge has I/O window */
384 unsigned int pref_window:1; /* Bridge has pref mem window */
385 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
386 unsigned int multifunction:1; /* Multi-function device */
387
388 unsigned int is_busmaster:1; /* Is busmaster */
389 unsigned int no_msi:1; /* May not use MSI */
390 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
391 unsigned int block_cfg_access:1; /* Config space access blocked */
392 unsigned int broken_parity_status:1; /* Generates false positive parity */
393 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
394 unsigned int msi_enabled:1;
395 unsigned int msix_enabled:1;
396 unsigned int ari_enabled:1; /* ARI forwarding */
397 unsigned int ats_enabled:1; /* Address Translation Svc */
398 unsigned int pasid_enabled:1; /* Process Address Space ID */
399 unsigned int pri_enabled:1; /* Page Request Interface */
400 unsigned int is_managed:1;
401 unsigned int needs_freset:1; /* Requires fundamental reset */
402 unsigned int state_saved:1;
403 unsigned int is_physfn:1;
404 unsigned int is_virtfn:1;
405 unsigned int reset_fn:1;
406 unsigned int is_hotplug_bridge:1;
407 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
408 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
409 /*
410 * Devices marked being untrusted are the ones that can potentially
411 * execute DMA attacks and similar. They are typically connected
412 * through external ports such as Thunderbolt but not limited to
413 * that. When an IOMMU is enabled they should be getting full
414 * mappings to make sure they cannot access arbitrary memory.
415 */
416 unsigned int untrusted:1;
417 unsigned int __aer_firmware_first_valid:1;
418 unsigned int __aer_firmware_first:1;
419 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
420 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
421 unsigned int irq_managed:1;
422 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
423 unsigned int is_probed:1; /* Device probing in progress */
424 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
425 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
426 pci_dev_flags_t dev_flags;
427 atomic_t enable_cnt; /* pci_enable_device has been called */
428
429 u32 saved_config_space[16]; /* Config space saved at suspend time */
430 struct hlist_head saved_cap_space;
431 struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
432 int rom_attr_enabled; /* Display of ROM attribute enabled? */
433 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
434 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
435
436 #ifdef CONFIG_HOTPLUG_PCI_PCIE
437 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
438 #endif
439 #ifdef CONFIG_PCIE_PTM
440 unsigned int ptm_root:1;
441 unsigned int ptm_enabled:1;
442 u8 ptm_granularity;
443 #endif
444 #ifdef CONFIG_PCI_MSI
445 const struct attribute_group **msi_irq_groups;
446 #endif
447 struct pci_vpd *vpd;
448 #ifdef CONFIG_PCI_ATS
449 union {
450 struct pci_sriov *sriov; /* PF: SR-IOV info */
451 struct pci_dev *physfn; /* VF: related PF */
452 };
453 u16 ats_cap; /* ATS Capability offset */
454 u8 ats_stu; /* ATS Smallest Translation Unit */
455 atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
456 #endif
457 #ifdef CONFIG_PCI_PRI
458 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
459 #endif
460 #ifdef CONFIG_PCI_PASID
461 u16 pasid_features;
462 #endif
463 #ifdef CONFIG_PCI_P2PDMA
464 struct pci_p2pdma *p2pdma;
465 #endif
466 phys_addr_t rom; /* Physical address if not from BAR */
467 size_t romlen; /* Length if not from BAR */
468 char *driver_override; /* Driver name to force a match */
469
470 unsigned long priv_flags; /* Private flags for the PCI driver */
471 };
472
pci_physfn(struct pci_dev * dev)473 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
474 {
475 #ifdef CONFIG_PCI_IOV
476 if (dev->is_virtfn)
477 dev = dev->physfn;
478 #endif
479 return dev;
480 }
481
482 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
483
484 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
485 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
486
pci_channel_offline(struct pci_dev * pdev)487 static inline int pci_channel_offline(struct pci_dev *pdev)
488 {
489 return (pdev->error_state != pci_channel_io_normal);
490 }
491
492 struct pci_host_bridge {
493 struct device dev;
494 struct pci_bus *bus; /* Root bus */
495 struct pci_ops *ops;
496 void *sysdata;
497 int busnr;
498 struct list_head windows; /* resource_entry */
499 struct list_head dma_ranges; /* dma ranges resource list */
500 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
501 int (*map_irq)(const struct pci_dev *, u8, u8);
502 void (*release_fn)(struct pci_host_bridge *);
503 void *release_data;
504 struct msi_controller *msi;
505 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
506 unsigned int no_ext_tags:1; /* No Extended Tags */
507 unsigned int native_aer:1; /* OS may use PCIe AER */
508 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
509 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
510 unsigned int native_pme:1; /* OS may use PCIe PME */
511 unsigned int native_ltr:1; /* OS may use PCIe LTR */
512 unsigned int preserve_config:1; /* Preserve FW resource setup */
513
514 /* Resource alignment requirements */
515 resource_size_t (*align_resource)(struct pci_dev *dev,
516 const struct resource *res,
517 resource_size_t start,
518 resource_size_t size,
519 resource_size_t align);
520 unsigned long private[0] ____cacheline_aligned;
521 };
522
523 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
524
pci_host_bridge_priv(struct pci_host_bridge * bridge)525 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
526 {
527 return (void *)bridge->private;
528 }
529
pci_host_bridge_from_priv(void * priv)530 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
531 {
532 return container_of(priv, struct pci_host_bridge, private);
533 }
534
535 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
536 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
537 size_t priv);
538 void pci_free_host_bridge(struct pci_host_bridge *bridge);
539 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
540
541 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
542 void (*release_fn)(struct pci_host_bridge *),
543 void *release_data);
544
545 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
546
547 /*
548 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
549 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
550 * buses below host bridges or subtractive decode bridges) go in the list.
551 * Use pci_bus_for_each_resource() to iterate through all the resources.
552 */
553
554 /*
555 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
556 * and there's no way to program the bridge with the details of the window.
557 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
558 * decode bit set, because they are explicit and can be programmed with _SRS.
559 */
560 #define PCI_SUBTRACTIVE_DECODE 0x1
561
562 struct pci_bus_resource {
563 struct list_head list;
564 struct resource *res;
565 unsigned int flags;
566 };
567
568 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
569
570 struct pci_bus {
571 struct list_head node; /* Node in list of buses */
572 struct pci_bus *parent; /* Parent bus this bridge is on */
573 struct list_head children; /* List of child buses */
574 struct list_head devices; /* List of devices on this bus */
575 struct pci_dev *self; /* Bridge device as seen by parent */
576 struct list_head slots; /* List of slots on this bus;
577 protected by pci_slot_mutex */
578 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
579 struct list_head resources; /* Address space routed to this bus */
580 struct resource busn_res; /* Bus numbers routed to this bus */
581
582 struct pci_ops *ops; /* Configuration access functions */
583 struct msi_controller *msi; /* MSI controller */
584 void *sysdata; /* Hook for sys-specific extension */
585 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
586
587 unsigned char number; /* Bus number */
588 unsigned char primary; /* Number of primary bridge */
589 unsigned char max_bus_speed; /* enum pci_bus_speed */
590 unsigned char cur_bus_speed; /* enum pci_bus_speed */
591 #ifdef CONFIG_PCI_DOMAINS_GENERIC
592 int domain_nr;
593 #endif
594
595 char name[48];
596
597 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
598 pci_bus_flags_t bus_flags; /* Inherited by child buses */
599 struct device *bridge;
600 struct device dev;
601 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
602 struct bin_attribute *legacy_mem; /* Legacy mem */
603 unsigned int is_added:1;
604 };
605
606 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
607
pci_dev_id(struct pci_dev * dev)608 static inline u16 pci_dev_id(struct pci_dev *dev)
609 {
610 return PCI_DEVID(dev->bus->number, dev->devfn);
611 }
612
613 /*
614 * Returns true if the PCI bus is root (behind host-PCI bridge),
615 * false otherwise
616 *
617 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
618 * This is incorrect because "virtual" buses added for SR-IOV (via
619 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
620 */
pci_is_root_bus(struct pci_bus * pbus)621 static inline bool pci_is_root_bus(struct pci_bus *pbus)
622 {
623 return !(pbus->parent);
624 }
625
626 /**
627 * pci_is_bridge - check if the PCI device is a bridge
628 * @dev: PCI device
629 *
630 * Return true if the PCI device is bridge whether it has subordinate
631 * or not.
632 */
pci_is_bridge(struct pci_dev * dev)633 static inline bool pci_is_bridge(struct pci_dev *dev)
634 {
635 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
636 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
637 }
638
639 #define for_each_pci_bridge(dev, bus) \
640 list_for_each_entry(dev, &bus->devices, bus_list) \
641 if (!pci_is_bridge(dev)) {} else
642
pci_upstream_bridge(struct pci_dev * dev)643 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
644 {
645 dev = pci_physfn(dev);
646 if (pci_is_root_bus(dev->bus))
647 return NULL;
648
649 return dev->bus->self;
650 }
651
652 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)653 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
654 {
655 return pci_dev->msi_enabled || pci_dev->msix_enabled;
656 }
657 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)658 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
659 #endif
660
661 /* Error values that may be returned by PCI functions */
662 #define PCIBIOS_SUCCESSFUL 0x00
663 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
664 #define PCIBIOS_BAD_VENDOR_ID 0x83
665 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
666 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
667 #define PCIBIOS_SET_FAILED 0x88
668 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
669
670 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)671 static inline int pcibios_err_to_errno(int err)
672 {
673 if (err <= PCIBIOS_SUCCESSFUL)
674 return err; /* Assume already errno */
675
676 switch (err) {
677 case PCIBIOS_FUNC_NOT_SUPPORTED:
678 return -ENOENT;
679 case PCIBIOS_BAD_VENDOR_ID:
680 return -ENOTTY;
681 case PCIBIOS_DEVICE_NOT_FOUND:
682 return -ENODEV;
683 case PCIBIOS_BAD_REGISTER_NUMBER:
684 return -EFAULT;
685 case PCIBIOS_SET_FAILED:
686 return -EIO;
687 case PCIBIOS_BUFFER_TOO_SMALL:
688 return -ENOSPC;
689 }
690
691 return -ERANGE;
692 }
693
694 /* Low-level architecture-dependent routines */
695
696 struct pci_ops {
697 int (*add_bus)(struct pci_bus *bus);
698 void (*remove_bus)(struct pci_bus *bus);
699 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
700 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
701 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
702 };
703
704 /*
705 * ACPI needs to be able to access PCI config space before we've done a
706 * PCI bus scan and created pci_bus structures.
707 */
708 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
709 int reg, int len, u32 *val);
710 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
711 int reg, int len, u32 val);
712
713 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
714 typedef u64 pci_bus_addr_t;
715 #else
716 typedef u32 pci_bus_addr_t;
717 #endif
718
719 struct pci_bus_region {
720 pci_bus_addr_t start;
721 pci_bus_addr_t end;
722 };
723
724 struct pci_dynids {
725 spinlock_t lock; /* Protects list, index */
726 struct list_head list; /* For IDs added at runtime */
727 };
728
729
730 /*
731 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
732 * a set of callbacks in struct pci_error_handlers, that device driver
733 * will be notified of PCI bus errors, and will be driven to recovery
734 * when an error occurs.
735 */
736
737 typedef unsigned int __bitwise pci_ers_result_t;
738
739 enum pci_ers_result {
740 /* No result/none/not supported in device driver */
741 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
742
743 /* Device driver can recover without slot reset */
744 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
745
746 /* Device driver wants slot to be reset */
747 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
748
749 /* Device has completely failed, is unrecoverable */
750 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
751
752 /* Device driver is fully recovered and operational */
753 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
754
755 /* No AER capabilities registered for the driver */
756 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
757 };
758
759 /* PCI bus error event callbacks */
760 struct pci_error_handlers {
761 /* PCI bus error detected on this device */
762 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
763 enum pci_channel_state error);
764
765 /* MMIO has been re-enabled, but not DMA */
766 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
767
768 /* PCI slot has been reset */
769 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
770
771 /* PCI function reset prepare or completed */
772 void (*reset_prepare)(struct pci_dev *dev);
773 void (*reset_done)(struct pci_dev *dev);
774
775 /* Device driver may resume normal operations */
776 void (*resume)(struct pci_dev *dev);
777 };
778
779
780 struct module;
781
782 /**
783 * struct pci_driver - PCI driver structure
784 * @node: List of driver structures.
785 * @name: Driver name.
786 * @id_table: Pointer to table of device IDs the driver is
787 * interested in. Most drivers should export this
788 * table using MODULE_DEVICE_TABLE(pci,...).
789 * @probe: This probing function gets called (during execution
790 * of pci_register_driver() for already existing
791 * devices or later if a new device gets inserted) for
792 * all PCI devices which match the ID table and are not
793 * "owned" by the other drivers yet. This function gets
794 * passed a "struct pci_dev \*" for each device whose
795 * entry in the ID table matches the device. The probe
796 * function returns zero when the driver chooses to
797 * take "ownership" of the device or an error code
798 * (negative number) otherwise.
799 * The probe function always gets called from process
800 * context, so it can sleep.
801 * @remove: The remove() function gets called whenever a device
802 * being handled by this driver is removed (either during
803 * deregistration of the driver or when it's manually
804 * pulled out of a hot-pluggable slot).
805 * The remove function always gets called from process
806 * context, so it can sleep.
807 * @suspend: Put device into low power state.
808 * @suspend_late: Put device into low power state.
809 * @resume_early: Wake device from low power state.
810 * @resume: Wake device from low power state.
811 * (Please see Documentation/power/pci.rst for descriptions
812 * of PCI Power Management and the related functions.)
813 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
814 * Intended to stop any idling DMA operations.
815 * Useful for enabling wake-on-lan (NIC) or changing
816 * the power state of a device before reboot.
817 * e.g. drivers/net/e100.c.
818 * @sriov_configure: Optional driver callback to allow configuration of
819 * number of VFs to enable via sysfs "sriov_numvfs" file.
820 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
821 * @groups: Sysfs attribute groups.
822 * @driver: Driver model structure.
823 * @dynids: List of dynamically added device IDs.
824 */
825 struct pci_driver {
826 struct list_head node;
827 const char *name;
828 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
829 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
830 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
831 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
832 int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
833 int (*resume_early)(struct pci_dev *dev);
834 int (*resume)(struct pci_dev *dev); /* Device woken up */
835 void (*shutdown)(struct pci_dev *dev);
836 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
837 const struct pci_error_handlers *err_handler;
838 const struct attribute_group **groups;
839 struct device_driver driver;
840 struct pci_dynids dynids;
841 };
842
843 #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
844
845 /**
846 * PCI_DEVICE - macro used to describe a specific PCI device
847 * @vend: the 16 bit PCI Vendor ID
848 * @dev: the 16 bit PCI Device ID
849 *
850 * This macro is used to create a struct pci_device_id that matches a
851 * specific device. The subvendor and subdevice fields will be set to
852 * PCI_ANY_ID.
853 */
854 #define PCI_DEVICE(vend,dev) \
855 .vendor = (vend), .device = (dev), \
856 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
857
858 /**
859 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
860 * @vend: the 16 bit PCI Vendor ID
861 * @dev: the 16 bit PCI Device ID
862 * @subvend: the 16 bit PCI Subvendor ID
863 * @subdev: the 16 bit PCI Subdevice ID
864 *
865 * This macro is used to create a struct pci_device_id that matches a
866 * specific device with subsystem information.
867 */
868 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
869 .vendor = (vend), .device = (dev), \
870 .subvendor = (subvend), .subdevice = (subdev)
871
872 /**
873 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
874 * @dev_class: the class, subclass, prog-if triple for this device
875 * @dev_class_mask: the class mask for this device
876 *
877 * This macro is used to create a struct pci_device_id that matches a
878 * specific PCI class. The vendor, device, subvendor, and subdevice
879 * fields will be set to PCI_ANY_ID.
880 */
881 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
882 .class = (dev_class), .class_mask = (dev_class_mask), \
883 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
884 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
885
886 /**
887 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
888 * @vend: the vendor name
889 * @dev: the 16 bit PCI Device ID
890 *
891 * This macro is used to create a struct pci_device_id that matches a
892 * specific PCI device. The subvendor, and subdevice fields will be set
893 * to PCI_ANY_ID. The macro allows the next field to follow as the device
894 * private data.
895 */
896 #define PCI_VDEVICE(vend, dev) \
897 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
898 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
899
900 /**
901 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
902 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
903 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
904 * @data: the driver data to be filled
905 *
906 * This macro is used to create a struct pci_device_id that matches a
907 * specific PCI device. The subvendor, and subdevice fields will be set
908 * to PCI_ANY_ID.
909 */
910 #define PCI_DEVICE_DATA(vend, dev, data) \
911 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
912 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
913 .driver_data = (kernel_ulong_t)(data)
914
915 enum {
916 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
917 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
918 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
919 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
920 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
921 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
922 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
923 };
924
925 #define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
926 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
927 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
928 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
929
930 /* These external functions are only available when PCI support is enabled */
931 #ifdef CONFIG_PCI
932
933 extern unsigned int pci_flags;
934
pci_set_flags(int flags)935 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)936 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)937 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)938 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
939
940 void pcie_bus_configure_settings(struct pci_bus *bus);
941
942 enum pcie_bus_config_types {
943 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
944 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
945 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
946 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
947 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
948 };
949
950 extern enum pcie_bus_config_types pcie_bus_config;
951
952 extern struct bus_type pci_bus_type;
953
954 /* Do NOT directly access these two variables, unless you are arch-specific PCI
955 * code, or PCI core code. */
956 extern struct list_head pci_root_buses; /* List of all known PCI buses */
957 /* Some device drivers need know if PCI is initiated */
958 int no_pci_devices(void);
959
960 void pcibios_resource_survey_bus(struct pci_bus *bus);
961 void pcibios_bus_add_device(struct pci_dev *pdev);
962 void pcibios_add_bus(struct pci_bus *bus);
963 void pcibios_remove_bus(struct pci_bus *bus);
964 void pcibios_fixup_bus(struct pci_bus *);
965 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
966 /* Architecture-specific versions may override this (weak) */
967 char *pcibios_setup(char *str);
968
969 /* Used only when drivers/pci/setup.c is used */
970 resource_size_t pcibios_align_resource(void *, const struct resource *,
971 resource_size_t,
972 resource_size_t);
973
974 /* Weak but can be overridden by arch */
975 void pci_fixup_cardbus(struct pci_bus *);
976
977 /* Generic PCI functions used internally */
978
979 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
980 struct resource *res);
981 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
982 struct pci_bus_region *region);
983 void pcibios_scan_specific_bus(int busn);
984 struct pci_bus *pci_find_bus(int domain, int busnr);
985 void pci_bus_add_devices(const struct pci_bus *bus);
986 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
987 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
988 struct pci_ops *ops, void *sysdata,
989 struct list_head *resources);
990 int pci_host_probe(struct pci_host_bridge *bridge);
991 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
992 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
993 void pci_bus_release_busn_res(struct pci_bus *b);
994 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
995 struct pci_ops *ops, void *sysdata,
996 struct list_head *resources);
997 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
998 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
999 int busnr);
1000 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1001 const char *name,
1002 struct hotplug_slot *hotplug);
1003 void pci_destroy_slot(struct pci_slot *slot);
1004 #ifdef CONFIG_SYSFS
1005 void pci_dev_assign_slot(struct pci_dev *dev);
1006 #else
pci_dev_assign_slot(struct pci_dev * dev)1007 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1008 #endif
1009 int pci_scan_slot(struct pci_bus *bus, int devfn);
1010 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1011 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1012 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1013 void pci_bus_add_device(struct pci_dev *dev);
1014 void pci_read_bridge_bases(struct pci_bus *child);
1015 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1016 struct resource *res);
1017 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
1018 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1019 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1020 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1021 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1022 void pci_dev_put(struct pci_dev *dev);
1023 void pci_remove_bus(struct pci_bus *b);
1024 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1025 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1026 void pci_stop_root_bus(struct pci_bus *bus);
1027 void pci_remove_root_bus(struct pci_bus *bus);
1028 void pci_setup_cardbus(struct pci_bus *bus);
1029 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1030 void pci_sort_breadthfirst(void);
1031 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1032 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1033
1034 /* Generic PCI functions exported to card drivers */
1035
1036 enum pci_lost_interrupt_reason {
1037 PCI_LOST_IRQ_NO_INFORMATION = 0,
1038 PCI_LOST_IRQ_DISABLE_MSI,
1039 PCI_LOST_IRQ_DISABLE_MSIX,
1040 PCI_LOST_IRQ_DISABLE_ACPI,
1041 };
1042 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
1043 int pci_find_capability(struct pci_dev *dev, int cap);
1044 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1045 int pci_find_ext_capability(struct pci_dev *dev, int cap);
1046 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
1047 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1048 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
1049 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1050
1051 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1052 struct pci_dev *from);
1053 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1054 unsigned int ss_vendor, unsigned int ss_device,
1055 struct pci_dev *from);
1056 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1057 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1058 unsigned int devfn);
1059 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1060 int pci_dev_present(const struct pci_device_id *ids);
1061
1062 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1063 int where, u8 *val);
1064 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1065 int where, u16 *val);
1066 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1067 int where, u32 *val);
1068 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1069 int where, u8 val);
1070 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1071 int where, u16 val);
1072 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1073 int where, u32 val);
1074
1075 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1076 int where, int size, u32 *val);
1077 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1078 int where, int size, u32 val);
1079 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1080 int where, int size, u32 *val);
1081 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1082 int where, int size, u32 val);
1083
1084 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1085
1086 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1087 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1088 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1089 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1090 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1091 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1092
1093 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1094 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1095 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1096 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1097 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1098 u16 clear, u16 set);
1099 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1100 u32 clear, u32 set);
1101
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1102 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1103 u16 set)
1104 {
1105 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1106 }
1107
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1108 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1109 u32 set)
1110 {
1111 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1112 }
1113
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1114 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1115 u16 clear)
1116 {
1117 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1118 }
1119
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1120 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1121 u32 clear)
1122 {
1123 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1124 }
1125
1126 /* User-space driven config access */
1127 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1128 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1129 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1130 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1131 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1132 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1133
1134 int __must_check pci_enable_device(struct pci_dev *dev);
1135 int __must_check pci_enable_device_io(struct pci_dev *dev);
1136 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1137 int __must_check pci_reenable_device(struct pci_dev *);
1138 int __must_check pcim_enable_device(struct pci_dev *pdev);
1139 void pcim_pin_device(struct pci_dev *pdev);
1140
pci_intx_mask_supported(struct pci_dev * pdev)1141 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1142 {
1143 /*
1144 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1145 * writable and no quirk has marked the feature broken.
1146 */
1147 return !pdev->broken_intx_masking;
1148 }
1149
pci_is_enabled(struct pci_dev * pdev)1150 static inline int pci_is_enabled(struct pci_dev *pdev)
1151 {
1152 return (atomic_read(&pdev->enable_cnt) > 0);
1153 }
1154
pci_is_managed(struct pci_dev * pdev)1155 static inline int pci_is_managed(struct pci_dev *pdev)
1156 {
1157 return pdev->is_managed;
1158 }
1159
1160 void pci_disable_device(struct pci_dev *dev);
1161
1162 extern unsigned int pcibios_max_latency;
1163 void pci_set_master(struct pci_dev *dev);
1164 void pci_clear_master(struct pci_dev *dev);
1165
1166 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1167 int pci_set_cacheline_size(struct pci_dev *dev);
1168 #define HAVE_PCI_SET_MWI
1169 int __must_check pci_set_mwi(struct pci_dev *dev);
1170 int __must_check pcim_set_mwi(struct pci_dev *dev);
1171 int pci_try_set_mwi(struct pci_dev *dev);
1172 void pci_clear_mwi(struct pci_dev *dev);
1173 void pci_intx(struct pci_dev *dev, int enable);
1174 bool pci_check_and_mask_intx(struct pci_dev *dev);
1175 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1176 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1177 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1178 int pcix_get_max_mmrbc(struct pci_dev *dev);
1179 int pcix_get_mmrbc(struct pci_dev *dev);
1180 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1181 int pcie_get_readrq(struct pci_dev *dev);
1182 int pcie_set_readrq(struct pci_dev *dev, int rq);
1183 int pcie_get_mps(struct pci_dev *dev);
1184 int pcie_set_mps(struct pci_dev *dev, int mps);
1185 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1186 enum pci_bus_speed *speed,
1187 enum pcie_link_width *width);
1188 void pcie_print_link_status(struct pci_dev *dev);
1189 bool pcie_has_flr(struct pci_dev *dev);
1190 int pcie_flr(struct pci_dev *dev);
1191 int __pci_reset_function_locked(struct pci_dev *dev);
1192 int pci_reset_function(struct pci_dev *dev);
1193 int pci_reset_function_locked(struct pci_dev *dev);
1194 int pci_try_reset_function(struct pci_dev *dev);
1195 int pci_probe_reset_slot(struct pci_slot *slot);
1196 int pci_probe_reset_bus(struct pci_bus *bus);
1197 int pci_reset_bus(struct pci_dev *dev);
1198 void pci_reset_secondary_bus(struct pci_dev *dev);
1199 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1200 void pci_update_resource(struct pci_dev *dev, int resno);
1201 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1202 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1203 void pci_release_resource(struct pci_dev *dev, int resno);
1204 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1205 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1206 bool pci_device_is_present(struct pci_dev *pdev);
1207 void pci_ignore_hotplug(struct pci_dev *dev);
1208
1209 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1210 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1211 const char *fmt, ...);
1212 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1213
1214 /* ROM control related routines */
1215 int pci_enable_rom(struct pci_dev *pdev);
1216 void pci_disable_rom(struct pci_dev *pdev);
1217 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1218 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1219 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1220
1221 /* Power management related routines */
1222 int pci_save_state(struct pci_dev *dev);
1223 void pci_restore_state(struct pci_dev *dev);
1224 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1225 int pci_load_saved_state(struct pci_dev *dev,
1226 struct pci_saved_state *state);
1227 int pci_load_and_free_saved_state(struct pci_dev *dev,
1228 struct pci_saved_state **state);
1229 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
1230 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
1231 u16 cap);
1232 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
1233 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
1234 u16 cap, unsigned int size);
1235 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
1236 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1237 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1238 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1239 void pci_pme_active(struct pci_dev *dev, bool enable);
1240 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1241 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1242 int pci_prepare_to_sleep(struct pci_dev *dev);
1243 int pci_back_from_sleep(struct pci_dev *dev);
1244 bool pci_dev_run_wake(struct pci_dev *dev);
1245 void pci_d3cold_enable(struct pci_dev *dev);
1246 void pci_d3cold_disable(struct pci_dev *dev);
1247 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1248 void pci_wakeup_bus(struct pci_bus *bus);
1249 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1250
1251 /* For use by arch with custom probe code */
1252 void set_pcie_port_type(struct pci_dev *pdev);
1253 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1254
1255 /* Functions for PCI Hotplug drivers to use */
1256 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1257 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1258 unsigned int pci_rescan_bus(struct pci_bus *bus);
1259 void pci_lock_rescan_remove(void);
1260 void pci_unlock_rescan_remove(void);
1261
1262 /* Vital Product Data routines */
1263 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1264 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1265 int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1266
1267 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1268 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1269 void pci_bus_assign_resources(const struct pci_bus *bus);
1270 void pci_bus_claim_resources(struct pci_bus *bus);
1271 void pci_bus_size_bridges(struct pci_bus *bus);
1272 int pci_claim_resource(struct pci_dev *, int);
1273 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1274 void pci_assign_unassigned_resources(void);
1275 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1276 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1277 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1278 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1279 void pdev_enable_device(struct pci_dev *);
1280 int pci_enable_resources(struct pci_dev *, int mask);
1281 void pci_assign_irq(struct pci_dev *dev);
1282 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1283 #define HAVE_PCI_REQ_REGIONS 2
1284 int __must_check pci_request_regions(struct pci_dev *, const char *);
1285 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1286 void pci_release_regions(struct pci_dev *);
1287 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1288 void pci_release_region(struct pci_dev *, int);
1289 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1290 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1291 void pci_release_selected_regions(struct pci_dev *, int);
1292
1293 /* drivers/pci/bus.c */
1294 void pci_add_resource(struct list_head *resources, struct resource *res);
1295 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1296 resource_size_t offset);
1297 void pci_free_resource_list(struct list_head *resources);
1298 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1299 unsigned int flags);
1300 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1301 void pci_bus_remove_resources(struct pci_bus *bus);
1302 int devm_request_pci_bus_resources(struct device *dev,
1303 struct list_head *resources);
1304
1305 /* Temporary until new and working PCI SBR API in place */
1306 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1307
1308 #define pci_bus_for_each_resource(bus, res, i) \
1309 for (i = 0; \
1310 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
1311 i++)
1312
1313 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1314 struct resource *res, resource_size_t size,
1315 resource_size_t align, resource_size_t min,
1316 unsigned long type_mask,
1317 resource_size_t (*alignf)(void *,
1318 const struct resource *,
1319 resource_size_t,
1320 resource_size_t),
1321 void *alignf_data);
1322
1323
1324 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1325 resource_size_t size);
1326 unsigned long pci_address_to_pio(phys_addr_t addr);
1327 phys_addr_t pci_pio_to_address(unsigned long pio);
1328 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1329 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1330 phys_addr_t phys_addr);
1331 void pci_unmap_iospace(struct resource *res);
1332 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1333 resource_size_t offset,
1334 resource_size_t size);
1335 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1336 struct resource *res);
1337
pci_bus_address(struct pci_dev * pdev,int bar)1338 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1339 {
1340 struct pci_bus_region region;
1341
1342 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1343 return region.start;
1344 }
1345
1346 /* Proper probing supporting hot-pluggable devices */
1347 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1348 const char *mod_name);
1349
1350 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1351 #define pci_register_driver(driver) \
1352 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1353
1354 void pci_unregister_driver(struct pci_driver *dev);
1355
1356 /**
1357 * module_pci_driver() - Helper macro for registering a PCI driver
1358 * @__pci_driver: pci_driver struct
1359 *
1360 * Helper macro for PCI drivers which do not do anything special in module
1361 * init/exit. This eliminates a lot of boilerplate. Each module may only
1362 * use this macro once, and calling it replaces module_init() and module_exit()
1363 */
1364 #define module_pci_driver(__pci_driver) \
1365 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1366
1367 /**
1368 * builtin_pci_driver() - Helper macro for registering a PCI driver
1369 * @__pci_driver: pci_driver struct
1370 *
1371 * Helper macro for PCI drivers which do not do anything special in their
1372 * init code. This eliminates a lot of boilerplate. Each driver may only
1373 * use this macro once, and calling it replaces device_initcall(...)
1374 */
1375 #define builtin_pci_driver(__pci_driver) \
1376 builtin_driver(__pci_driver, pci_register_driver)
1377
1378 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1379 int pci_add_dynid(struct pci_driver *drv,
1380 unsigned int vendor, unsigned int device,
1381 unsigned int subvendor, unsigned int subdevice,
1382 unsigned int class, unsigned int class_mask,
1383 unsigned long driver_data);
1384 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1385 struct pci_dev *dev);
1386 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1387 int pass);
1388
1389 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1390 void *userdata);
1391 int pci_cfg_space_size(struct pci_dev *dev);
1392 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1393 void pci_setup_bridge(struct pci_bus *bus);
1394 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1395 unsigned long type);
1396
1397 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1398 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1399
1400 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1401 unsigned int command_bits, u32 flags);
1402
1403 /*
1404 * Virtual interrupts allow for more interrupts to be allocated
1405 * than the device has interrupts for. These are not programmed
1406 * into the device's MSI-X table and must be handled by some
1407 * other driver means.
1408 */
1409 #define PCI_IRQ_VIRTUAL (1 << 4)
1410
1411 #define PCI_IRQ_ALL_TYPES \
1412 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1413
1414 /* kmem_cache style wrapper around pci_alloc_consistent() */
1415
1416 #include <linux/dmapool.h>
1417
1418 #define pci_pool dma_pool
1419 #define pci_pool_create(name, pdev, size, align, allocation) \
1420 dma_pool_create(name, &pdev->dev, size, align, allocation)
1421 #define pci_pool_destroy(pool) dma_pool_destroy(pool)
1422 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1423 #define pci_pool_zalloc(pool, flags, handle) \
1424 dma_pool_zalloc(pool, flags, handle)
1425 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1426
1427 struct msix_entry {
1428 u32 vector; /* Kernel uses to write allocated vector */
1429 u16 entry; /* Driver uses to specify entry, OS writes */
1430 };
1431
1432 #ifdef CONFIG_PCI_MSI
1433 int pci_msi_vec_count(struct pci_dev *dev);
1434 void pci_disable_msi(struct pci_dev *dev);
1435 int pci_msix_vec_count(struct pci_dev *dev);
1436 void pci_disable_msix(struct pci_dev *dev);
1437 void pci_restore_msi_state(struct pci_dev *dev);
1438 int pci_msi_enabled(void);
1439 int pci_enable_msi(struct pci_dev *dev);
1440 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1441 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1442 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1443 struct msix_entry *entries, int nvec)
1444 {
1445 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1446 if (rc < 0)
1447 return rc;
1448 return 0;
1449 }
1450 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1451 unsigned int max_vecs, unsigned int flags,
1452 struct irq_affinity *affd);
1453
1454 void pci_free_irq_vectors(struct pci_dev *dev);
1455 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1456 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1457 int pci_irq_get_node(struct pci_dev *pdev, int vec);
1458
1459 #else
pci_msi_vec_count(struct pci_dev * dev)1460 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1461 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1462 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1463 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1464 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1465 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1466 static inline int pci_enable_msi(struct pci_dev *dev)
1467 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1468 static inline int pci_enable_msix_range(struct pci_dev *dev,
1469 struct msix_entry *entries, int minvec, int maxvec)
1470 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1471 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1472 struct msix_entry *entries, int nvec)
1473 { return -ENOSYS; }
1474
1475 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1476 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1477 unsigned int max_vecs, unsigned int flags,
1478 struct irq_affinity *aff_desc)
1479 {
1480 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1481 return 1;
1482 return -ENOSPC;
1483 }
1484
pci_free_irq_vectors(struct pci_dev * dev)1485 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1486 {
1487 }
1488
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1489 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1490 {
1491 if (WARN_ON_ONCE(nr > 0))
1492 return -EINVAL;
1493 return dev->irq;
1494 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1495 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1496 int vec)
1497 {
1498 return cpu_possible_mask;
1499 }
1500
pci_irq_get_node(struct pci_dev * pdev,int vec)1501 static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
1502 {
1503 return first_online_node;
1504 }
1505 #endif
1506
1507 /**
1508 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1509 * @d: the INTx IRQ domain
1510 * @node: the DT node for the device whose interrupt we're translating
1511 * @intspec: the interrupt specifier data from the DT
1512 * @intsize: the number of entries in @intspec
1513 * @out_hwirq: pointer at which to write the hwirq number
1514 * @out_type: pointer at which to write the interrupt type
1515 *
1516 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1517 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1518 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1519 * INTx value to obtain the hwirq number.
1520 *
1521 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1522 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1523 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1524 struct device_node *node,
1525 const u32 *intspec,
1526 unsigned int intsize,
1527 unsigned long *out_hwirq,
1528 unsigned int *out_type)
1529 {
1530 const u32 intx = intspec[0];
1531
1532 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1533 return -EINVAL;
1534
1535 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1536 return 0;
1537 }
1538
1539 #ifdef CONFIG_PCIEPORTBUS
1540 extern bool pcie_ports_disabled;
1541 extern bool pcie_ports_native;
1542 #else
1543 #define pcie_ports_disabled true
1544 #define pcie_ports_native false
1545 #endif
1546
1547 #define PCIE_LINK_STATE_L0S 1
1548 #define PCIE_LINK_STATE_L1 2
1549 #define PCIE_LINK_STATE_CLKPM 4
1550
1551 #ifdef CONFIG_PCIEASPM
1552 int pci_disable_link_state(struct pci_dev *pdev, int state);
1553 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1554 void pcie_no_aspm(void);
1555 bool pcie_aspm_support_enabled(void);
1556 bool pcie_aspm_enabled(struct pci_dev *pdev);
1557 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1558 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1559 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1560 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1561 { return 0; }
pcie_no_aspm(void)1562 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1563 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1564 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1565 #endif
1566
1567 #ifdef CONFIG_PCIEAER
1568 bool pci_aer_available(void);
1569 #else
pci_aer_available(void)1570 static inline bool pci_aer_available(void) { return false; }
1571 #endif
1572
1573 bool pci_ats_disabled(void);
1574
1575 void pci_cfg_access_lock(struct pci_dev *dev);
1576 bool pci_cfg_access_trylock(struct pci_dev *dev);
1577 void pci_cfg_access_unlock(struct pci_dev *dev);
1578
1579 /*
1580 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1581 * a PCI domain is defined to be a set of PCI buses which share
1582 * configuration space.
1583 */
1584 #ifdef CONFIG_PCI_DOMAINS
1585 extern int pci_domains_supported;
1586 #else
1587 enum { pci_domains_supported = 0 };
pci_domain_nr(struct pci_bus * bus)1588 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_proc_domain(struct pci_bus * bus)1589 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1590 #endif /* CONFIG_PCI_DOMAINS */
1591
1592 /*
1593 * Generic implementation for PCI domain support. If your
1594 * architecture does not need custom management of PCI
1595 * domains then this implementation will be used
1596 */
1597 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1598 static inline int pci_domain_nr(struct pci_bus *bus)
1599 {
1600 return bus->domain_nr;
1601 }
1602 #ifdef CONFIG_ACPI
1603 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1604 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1605 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1606 { return 0; }
1607 #endif
1608 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1609 #endif
1610
1611 /* Some architectures require additional setup to direct VGA traffic */
1612 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1613 unsigned int command_bits, u32 flags);
1614 void pci_register_set_vga_state(arch_set_vga_state_t func);
1615
1616 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1617 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1618 {
1619 return pci_request_selected_regions(pdev,
1620 pci_select_bars(pdev, IORESOURCE_IO), name);
1621 }
1622
1623 static inline void
pci_release_io_regions(struct pci_dev * pdev)1624 pci_release_io_regions(struct pci_dev *pdev)
1625 {
1626 return pci_release_selected_regions(pdev,
1627 pci_select_bars(pdev, IORESOURCE_IO));
1628 }
1629
1630 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1631 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1632 {
1633 return pci_request_selected_regions(pdev,
1634 pci_select_bars(pdev, IORESOURCE_MEM), name);
1635 }
1636
1637 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1638 pci_release_mem_regions(struct pci_dev *pdev)
1639 {
1640 return pci_release_selected_regions(pdev,
1641 pci_select_bars(pdev, IORESOURCE_MEM));
1642 }
1643
1644 #else /* CONFIG_PCI is not enabled */
1645
pci_set_flags(int flags)1646 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1647 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1648 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1649 static inline int pci_has_flag(int flag) { return 0; }
1650
1651 /*
1652 * If the system does not have PCI, clearly these return errors. Define
1653 * these as simple inline functions to avoid hair in drivers.
1654 */
1655 #define _PCI_NOP(o, s, t) \
1656 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1657 int where, t val) \
1658 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1659
1660 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1661 _PCI_NOP(o, word, u16 x) \
1662 _PCI_NOP(o, dword, u32 x)
1663 _PCI_NOP_ALL(read, *)
1664 _PCI_NOP_ALL(write,)
1665
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1666 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1667 unsigned int device,
1668 struct pci_dev *from)
1669 { return NULL; }
1670
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1671 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1672 unsigned int device,
1673 unsigned int ss_vendor,
1674 unsigned int ss_device,
1675 struct pci_dev *from)
1676 { return NULL; }
1677
pci_get_class(unsigned int class,struct pci_dev * from)1678 static inline struct pci_dev *pci_get_class(unsigned int class,
1679 struct pci_dev *from)
1680 { return NULL; }
1681
1682 #define pci_dev_present(ids) (0)
1683 #define no_pci_devices() (1)
1684 #define pci_dev_put(dev) do { } while (0)
1685
pci_set_master(struct pci_dev * dev)1686 static inline void pci_set_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1687 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1688 static inline void pci_disable_device(struct pci_dev *dev) { }
pci_assign_resource(struct pci_dev * dev,int i)1689 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1690 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner)1691 static inline int __pci_register_driver(struct pci_driver *drv,
1692 struct module *owner)
1693 { return 0; }
pci_register_driver(struct pci_driver * drv)1694 static inline int pci_register_driver(struct pci_driver *drv)
1695 { return 0; }
pci_unregister_driver(struct pci_driver * drv)1696 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)1697 static inline int pci_find_capability(struct pci_dev *dev, int cap)
1698 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)1699 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1700 int cap)
1701 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)1702 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1703 { return 0; }
1704
1705 /* Power management related routines */
pci_save_state(struct pci_dev * dev)1706 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)1707 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1708 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1709 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)1710 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1711 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)1712 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1713 pm_message_t state)
1714 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)1715 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1716 int enable)
1717 { return 0; }
1718
pci_find_resource(struct pci_dev * dev,struct resource * res)1719 static inline struct resource *pci_find_resource(struct pci_dev *dev,
1720 struct resource *res)
1721 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)1722 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1723 { return -EIO; }
pci_release_regions(struct pci_dev * dev)1724 static inline void pci_release_regions(struct pci_dev *dev) { }
1725
pci_address_to_pio(phys_addr_t addr)1726 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1727
pci_find_next_bus(const struct pci_bus * from)1728 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1729 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)1730 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1731 unsigned int devfn)
1732 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)1733 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1734 unsigned int bus, unsigned int devfn)
1735 { return NULL; }
1736
pci_domain_nr(struct pci_bus * bus)1737 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)1738 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1739
1740 #define dev_is_pci(d) (false)
1741 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)1742 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1743 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1744 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1745 struct device_node *node,
1746 const u32 *intspec,
1747 unsigned int intsize,
1748 unsigned long *out_hwirq,
1749 unsigned int *out_type)
1750 { return -EINVAL; }
1751
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)1752 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1753 struct pci_dev *dev)
1754 { return NULL; }
pci_ats_disabled(void)1755 static inline bool pci_ats_disabled(void) { return true; }
1756
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1757 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1758 {
1759 return -EINVAL;
1760 }
1761
1762 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1763 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1764 unsigned int max_vecs, unsigned int flags,
1765 struct irq_affinity *aff_desc)
1766 {
1767 return -ENOSPC;
1768 }
1769 #endif /* CONFIG_PCI */
1770
1771 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1772 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1773 unsigned int max_vecs, unsigned int flags)
1774 {
1775 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1776 NULL);
1777 }
1778
1779 #ifdef CONFIG_PCI_ATS
1780 /* Address Translation Service */
1781 int pci_enable_ats(struct pci_dev *dev, int ps);
1782 void pci_disable_ats(struct pci_dev *dev);
1783 int pci_ats_queue_depth(struct pci_dev *dev);
1784 int pci_ats_page_aligned(struct pci_dev *dev);
1785 #else
pci_enable_ats(struct pci_dev * d,int ps)1786 static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
pci_disable_ats(struct pci_dev * d)1787 static inline void pci_disable_ats(struct pci_dev *d) { }
pci_ats_queue_depth(struct pci_dev * d)1788 static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
pci_ats_page_aligned(struct pci_dev * dev)1789 static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
1790 #endif
1791
1792 /* Include architecture-dependent settings and functions */
1793
1794 #include <asm/pci.h>
1795
1796 /* These two functions provide almost identical functionality. Depending
1797 * on the architecture, one will be implemented as a wrapper around the
1798 * other (in drivers/pci/mmap.c).
1799 *
1800 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
1801 * is expected to be an offset within that region.
1802 *
1803 * pci_mmap_page_range() is the legacy architecture-specific interface,
1804 * which accepts a "user visible" resource address converted by
1805 * pci_resource_to_user(), as used in the legacy mmap() interface in
1806 * /proc/bus/pci/.
1807 */
1808 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
1809 struct vm_area_struct *vma,
1810 enum pci_mmap_state mmap_state, int write_combine);
1811 int pci_mmap_page_range(struct pci_dev *pdev, int bar,
1812 struct vm_area_struct *vma,
1813 enum pci_mmap_state mmap_state, int write_combine);
1814
1815 #ifndef arch_can_pci_mmap_wc
1816 #define arch_can_pci_mmap_wc() 0
1817 #endif
1818
1819 #ifndef arch_can_pci_mmap_io
1820 #define arch_can_pci_mmap_io() 0
1821 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
1822 #else
1823 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1824 #endif
1825
1826 #ifndef pci_root_bus_fwnode
1827 #define pci_root_bus_fwnode(bus) NULL
1828 #endif
1829
1830 /*
1831 * These helpers provide future and backwards compatibility
1832 * for accessing popular PCI BAR info
1833 */
1834 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1835 #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1836 #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
1837 #define pci_resource_len(dev,bar) \
1838 ((pci_resource_start((dev), (bar)) == 0 && \
1839 pci_resource_end((dev), (bar)) == \
1840 pci_resource_start((dev), (bar))) ? 0 : \
1841 \
1842 (pci_resource_end((dev), (bar)) - \
1843 pci_resource_start((dev), (bar)) + 1))
1844
1845 /*
1846 * Similar to the helpers above, these manipulate per-pci_dev
1847 * driver-specific data. They are really just a wrapper around
1848 * the generic device structure functions of these calls.
1849 */
pci_get_drvdata(struct pci_dev * pdev)1850 static inline void *pci_get_drvdata(struct pci_dev *pdev)
1851 {
1852 return dev_get_drvdata(&pdev->dev);
1853 }
1854
pci_set_drvdata(struct pci_dev * pdev,void * data)1855 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1856 {
1857 dev_set_drvdata(&pdev->dev, data);
1858 }
1859
pci_name(const struct pci_dev * pdev)1860 static inline const char *pci_name(const struct pci_dev *pdev)
1861 {
1862 return dev_name(&pdev->dev);
1863 }
1864
1865 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1866 const struct resource *rsrc,
1867 resource_size_t *start, resource_size_t *end);
1868
1869 /*
1870 * The world is not perfect and supplies us with broken PCI devices.
1871 * For at least a part of these bugs we need a work-around, so both
1872 * generic (drivers/pci/quirks.c) and per-architecture code can define
1873 * fixup hooks to be called for particular buggy devices.
1874 */
1875
1876 struct pci_fixup {
1877 u16 vendor; /* Or PCI_ANY_ID */
1878 u16 device; /* Or PCI_ANY_ID */
1879 u32 class; /* Or PCI_ANY_ID */
1880 unsigned int class_shift; /* should be 0, 8, 16 */
1881 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1882 int hook_offset;
1883 #else
1884 void (*hook)(struct pci_dev *dev);
1885 #endif
1886 };
1887
1888 enum pci_fixup_pass {
1889 pci_fixup_early, /* Before probing BARs */
1890 pci_fixup_header, /* After reading configuration header */
1891 pci_fixup_final, /* Final phase of device fixups */
1892 pci_fixup_enable, /* pci_enable_device() time */
1893 pci_fixup_resume, /* pci_device_resume() */
1894 pci_fixup_suspend, /* pci_device_suspend() */
1895 pci_fixup_resume_early, /* pci_device_resume_early() */
1896 pci_fixup_suspend_late, /* pci_device_suspend_late() */
1897 };
1898
1899 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1900 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1901 class_shift, hook) \
1902 __ADDRESSABLE(hook) \
1903 asm(".section " #sec ", \"a\" \n" \
1904 ".balign 16 \n" \
1905 ".short " #vendor ", " #device " \n" \
1906 ".long " #class ", " #class_shift " \n" \
1907 ".long " #hook " - . \n" \
1908 ".previous \n");
1909 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1910 class_shift, hook) \
1911 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1912 class_shift, hook)
1913 #else
1914 /* Anonymous variables would be nice... */
1915 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1916 class_shift, hook) \
1917 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1918 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1919 = { vendor, device, class, class_shift, hook };
1920 #endif
1921
1922 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1923 class_shift, hook) \
1924 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1925 hook, vendor, device, class, class_shift, hook)
1926 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1927 class_shift, hook) \
1928 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1929 hook, vendor, device, class, class_shift, hook)
1930 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1931 class_shift, hook) \
1932 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1933 hook, vendor, device, class, class_shift, hook)
1934 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1935 class_shift, hook) \
1936 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1937 hook, vendor, device, class, class_shift, hook)
1938 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1939 class_shift, hook) \
1940 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1941 resume##hook, vendor, device, class, class_shift, hook)
1942 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1943 class_shift, hook) \
1944 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1945 resume_early##hook, vendor, device, class, class_shift, hook)
1946 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1947 class_shift, hook) \
1948 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1949 suspend##hook, vendor, device, class, class_shift, hook)
1950 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1951 class_shift, hook) \
1952 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1953 suspend_late##hook, vendor, device, class, class_shift, hook)
1954
1955 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1956 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1957 hook, vendor, device, PCI_ANY_ID, 0, hook)
1958 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1959 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1960 hook, vendor, device, PCI_ANY_ID, 0, hook)
1961 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1962 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1963 hook, vendor, device, PCI_ANY_ID, 0, hook)
1964 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1965 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1966 hook, vendor, device, PCI_ANY_ID, 0, hook)
1967 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1968 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1969 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
1970 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1971 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1972 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
1973 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1974 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1975 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
1976 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1977 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1978 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
1979
1980 #ifdef CONFIG_PCI_QUIRKS
1981 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1982 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)1983 static inline void pci_fixup_device(enum pci_fixup_pass pass,
1984 struct pci_dev *dev) { }
1985 #endif
1986
1987 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
1988 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
1989 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
1990 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
1991 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
1992 const char *name);
1993 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
1994
1995 extern int pci_pci_problems;
1996 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
1997 #define PCIPCI_TRITON 2
1998 #define PCIPCI_NATOMA 4
1999 #define PCIPCI_VIAETBF 8
2000 #define PCIPCI_VSFX 16
2001 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2002 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2003
2004 extern unsigned long pci_cardbus_io_size;
2005 extern unsigned long pci_cardbus_mem_size;
2006 extern u8 pci_dfl_cache_line_size;
2007 extern u8 pci_cache_line_size;
2008
2009 /* Architecture-specific versions may override these (weak) */
2010 void pcibios_disable_device(struct pci_dev *dev);
2011 void pcibios_set_master(struct pci_dev *dev);
2012 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2013 enum pcie_reset_state state);
2014 int pcibios_add_device(struct pci_dev *dev);
2015 void pcibios_release_device(struct pci_dev *dev);
2016 #ifdef CONFIG_PCI
2017 void pcibios_penalize_isa_irq(int irq, int active);
2018 #else
pcibios_penalize_isa_irq(int irq,int active)2019 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2020 #endif
2021 int pcibios_alloc_irq(struct pci_dev *dev);
2022 void pcibios_free_irq(struct pci_dev *dev);
2023 resource_size_t pcibios_default_alignment(void);
2024
2025 #ifdef CONFIG_HIBERNATE_CALLBACKS
2026 extern struct dev_pm_ops pcibios_pm_ops;
2027 #endif
2028
2029 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2030 void __init pci_mmcfg_early_init(void);
2031 void __init pci_mmcfg_late_init(void);
2032 #else
pci_mmcfg_early_init(void)2033 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2034 static inline void pci_mmcfg_late_init(void) { }
2035 #endif
2036
2037 int pci_ext_cfg_avail(void);
2038
2039 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2040 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2041
2042 #ifdef CONFIG_PCI_IOV
2043 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2044 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2045
2046 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2047 void pci_disable_sriov(struct pci_dev *dev);
2048 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2049 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2050 int pci_num_vf(struct pci_dev *dev);
2051 int pci_vfs_assigned(struct pci_dev *dev);
2052 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2053 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2054 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2055 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2056 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2057
2058 /* Arch may override these (weak) */
2059 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2060 int pcibios_sriov_disable(struct pci_dev *pdev);
2061 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2062 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2063 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2064 {
2065 return -ENOSYS;
2066 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2067 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2068 {
2069 return -ENOSYS;
2070 }
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2071 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2072 { return -ENODEV; }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2073 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2074 {
2075 return -ENOSYS;
2076 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2077 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2078 int id) { }
pci_disable_sriov(struct pci_dev * dev)2079 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2080 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2081 static inline int pci_vfs_assigned(struct pci_dev *dev)
2082 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2083 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2084 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2085 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2086 { return 0; }
2087 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2088 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2089 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2090 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2091 #endif
2092
2093 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2094 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2095 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2096 #endif
2097
2098 /**
2099 * pci_pcie_cap - get the saved PCIe capability offset
2100 * @dev: PCI device
2101 *
2102 * PCIe capability offset is calculated at PCI device initialization
2103 * time and saved in the data structure. This function returns saved
2104 * PCIe capability offset. Using this instead of pci_find_capability()
2105 * reduces unnecessary search in the PCI configuration space. If you
2106 * need to calculate PCIe capability offset from raw device for some
2107 * reasons, please use pci_find_capability() instead.
2108 */
pci_pcie_cap(struct pci_dev * dev)2109 static inline int pci_pcie_cap(struct pci_dev *dev)
2110 {
2111 return dev->pcie_cap;
2112 }
2113
2114 /**
2115 * pci_is_pcie - check if the PCI device is PCI Express capable
2116 * @dev: PCI device
2117 *
2118 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2119 */
pci_is_pcie(struct pci_dev * dev)2120 static inline bool pci_is_pcie(struct pci_dev *dev)
2121 {
2122 return pci_pcie_cap(dev);
2123 }
2124
2125 /**
2126 * pcie_caps_reg - get the PCIe Capabilities Register
2127 * @dev: PCI device
2128 */
pcie_caps_reg(const struct pci_dev * dev)2129 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2130 {
2131 return dev->pcie_flags_reg;
2132 }
2133
2134 /**
2135 * pci_pcie_type - get the PCIe device/port type
2136 * @dev: PCI device
2137 */
pci_pcie_type(const struct pci_dev * dev)2138 static inline int pci_pcie_type(const struct pci_dev *dev)
2139 {
2140 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2141 }
2142
pcie_find_root_port(struct pci_dev * dev)2143 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2144 {
2145 while (1) {
2146 if (!pci_is_pcie(dev))
2147 break;
2148 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2149 return dev;
2150 if (!dev->bus->self)
2151 break;
2152 dev = dev->bus->self;
2153 }
2154 return NULL;
2155 }
2156
2157 void pci_request_acs(void);
2158 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2159 bool pci_acs_path_enabled(struct pci_dev *start,
2160 struct pci_dev *end, u16 acs_flags);
2161 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2162
2163 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2164 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2165
2166 /* Large Resource Data Type Tag Item Names */
2167 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2168 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2169 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2170
2171 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2172 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2173 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2174
2175 /* Small Resource Data Type Tag Item Names */
2176 #define PCI_VPD_STIN_END 0x0f /* End */
2177
2178 #define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
2179
2180 #define PCI_VPD_SRDT_TIN_MASK 0x78
2181 #define PCI_VPD_SRDT_LEN_MASK 0x07
2182 #define PCI_VPD_LRDT_TIN_MASK 0x7f
2183
2184 #define PCI_VPD_LRDT_TAG_SIZE 3
2185 #define PCI_VPD_SRDT_TAG_SIZE 1
2186
2187 #define PCI_VPD_INFO_FLD_HDR_SIZE 3
2188
2189 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2190 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2191 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2192 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2193
2194 /**
2195 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
2196 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2197 *
2198 * Returns the extracted Large Resource Data Type length.
2199 */
pci_vpd_lrdt_size(const u8 * lrdt)2200 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2201 {
2202 return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
2203 }
2204
2205 /**
2206 * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
2207 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2208 *
2209 * Returns the extracted Large Resource Data Type Tag item.
2210 */
pci_vpd_lrdt_tag(const u8 * lrdt)2211 static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2212 {
2213 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2214 }
2215
2216 /**
2217 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
2218 * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2219 *
2220 * Returns the extracted Small Resource Data Type length.
2221 */
pci_vpd_srdt_size(const u8 * srdt)2222 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
2223 {
2224 return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
2225 }
2226
2227 /**
2228 * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
2229 * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2230 *
2231 * Returns the extracted Small Resource Data Type Tag Item.
2232 */
pci_vpd_srdt_tag(const u8 * srdt)2233 static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
2234 {
2235 return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
2236 }
2237
2238 /**
2239 * pci_vpd_info_field_size - Extracts the information field length
2240 * @info_field: Pointer to the beginning of an information field header
2241 *
2242 * Returns the extracted information field length.
2243 */
pci_vpd_info_field_size(const u8 * info_field)2244 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
2245 {
2246 return info_field[2];
2247 }
2248
2249 /**
2250 * pci_vpd_find_tag - Locates the Resource Data Type tag provided
2251 * @buf: Pointer to buffered vpd data
2252 * @off: The offset into the buffer at which to begin the search
2253 * @len: The length of the vpd buffer
2254 * @rdt: The Resource Data Type to search for
2255 *
2256 * Returns the index where the Resource Data Type was found or
2257 * -ENOENT otherwise.
2258 */
2259 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
2260
2261 /**
2262 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
2263 * @buf: Pointer to buffered vpd data
2264 * @off: The offset into the buffer at which to begin the search
2265 * @len: The length of the buffer area, relative to off, in which to search
2266 * @kw: The keyword to search for
2267 *
2268 * Returns the index where the information field keyword was found or
2269 * -ENOENT otherwise.
2270 */
2271 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2272 unsigned int len, const char *kw);
2273
2274 /* PCI <-> OF binding helpers */
2275 #ifdef CONFIG_OF
2276 struct device_node;
2277 struct irq_domain;
2278 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2279 int pci_parse_request_of_pci_ranges(struct device *dev,
2280 struct list_head *resources,
2281 struct resource **bus_range);
2282
2283 /* Arch may override this (weak) */
2284 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2285
2286 #else /* CONFIG_OF */
2287 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2288 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_parse_request_of_pci_ranges(struct device * dev,struct list_head * resources,struct resource ** bus_range)2289 static inline int pci_parse_request_of_pci_ranges(struct device *dev,
2290 struct list_head *resources,
2291 struct resource **bus_range)
2292 {
2293 return -EINVAL;
2294 }
2295 #endif /* CONFIG_OF */
2296
2297 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2298 pci_device_to_OF_node(const struct pci_dev *pdev)
2299 {
2300 return pdev ? pdev->dev.of_node : NULL;
2301 }
2302
pci_bus_to_OF_node(struct pci_bus * bus)2303 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2304 {
2305 return bus ? bus->dev.of_node : NULL;
2306 }
2307
2308 #ifdef CONFIG_ACPI
2309 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2310
2311 void
2312 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2313 #else
2314 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2315 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
2316 #endif
2317
2318 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2319 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2320 {
2321 return pdev->dev.archdata.edev;
2322 }
2323 #endif
2324
2325 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
2326 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2327 int pci_for_each_dma_alias(struct pci_dev *pdev,
2328 int (*fn)(struct pci_dev *pdev,
2329 u16 alias, void *data), void *data);
2330
2331 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2332 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2333 {
2334 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2335 }
pci_clear_dev_assigned(struct pci_dev * pdev)2336 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2337 {
2338 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2339 }
pci_is_dev_assigned(struct pci_dev * pdev)2340 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2341 {
2342 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2343 }
2344
2345 /**
2346 * pci_ari_enabled - query ARI forwarding status
2347 * @bus: the PCI bus
2348 *
2349 * Returns true if ARI forwarding is enabled.
2350 */
pci_ari_enabled(struct pci_bus * bus)2351 static inline bool pci_ari_enabled(struct pci_bus *bus)
2352 {
2353 return bus->self && bus->self->ari_enabled;
2354 }
2355
2356 /**
2357 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2358 * @pdev: PCI device to check
2359 *
2360 * Walk upwards from @pdev and check for each encountered bridge if it's part
2361 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2362 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2363 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2364 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2365 {
2366 struct pci_dev *parent = pdev;
2367
2368 if (pdev->is_thunderbolt)
2369 return true;
2370
2371 while ((parent = pci_upstream_bridge(parent)))
2372 if (parent->is_thunderbolt)
2373 return true;
2374
2375 return false;
2376 }
2377
2378 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2379 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2380 #endif
2381
2382 /* Provide the legacy pci_dma_* API */
2383 #include <linux/pci-dma-compat.h>
2384
2385 #define pci_printk(level, pdev, fmt, arg...) \
2386 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2387
2388 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2389 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2390 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2391 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2392 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2393 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2394 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2395 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2396
2397 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2398 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2399
2400 #define pci_info_ratelimited(pdev, fmt, arg...) \
2401 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2402
2403 #endif /* LINUX_PCI_H */
2404