1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 /*
6 * This header file contains MSI data structures and functions which are
7 * only relevant for:
8 * - Interrupt core code
9 * - PCI/MSI core code
10 * - MSI interrupt domain implementations
11 * - IOMMU, low level VFIO, NTB and other justified exceptions
12 * dealing with low level MSI details.
13 *
14 * Regular device drivers have no business with any of these functions and
15 * especially storing MSI descriptor pointers in random code is considered
16 * abuse.
17 *
18 * Device driver relevant functions are available in <linux/msi_api.h>
19 */
20
21 #include <linux/irqdomain_defs.h>
22 #include <linux/cpumask.h>
23 #include <linux/msi_api.h>
24 #include <linux/xarray.h>
25 #include <linux/mutex.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/bits.h>
29
30 #include <asm/msi.h>
31
32 /* Dummy shadow structures if an architecture does not define them */
33 #ifndef arch_msi_msg_addr_lo
34 typedef struct arch_msi_msg_addr_lo {
35 u32 address_lo;
36 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
37 #endif
38
39 #ifndef arch_msi_msg_addr_hi
40 typedef struct arch_msi_msg_addr_hi {
41 u32 address_hi;
42 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
43 #endif
44
45 #ifndef arch_msi_msg_data
46 typedef struct arch_msi_msg_data {
47 u32 data;
48 } __attribute__ ((packed)) arch_msi_msg_data_t;
49 #endif
50
51 #ifndef arch_is_isolated_msi
52 #define arch_is_isolated_msi() false
53 #endif
54
55 /**
56 * msi_msg - Representation of a MSI message
57 * @address_lo: Low 32 bits of msi message address
58 * @arch_addrlo: Architecture specific shadow of @address_lo
59 * @address_hi: High 32 bits of msi message address
60 * (only used when device supports it)
61 * @arch_addrhi: Architecture specific shadow of @address_hi
62 * @data: MSI message data (usually 16 bits)
63 * @arch_data: Architecture specific shadow of @data
64 */
65 struct msi_msg {
66 union {
67 u32 address_lo;
68 arch_msi_msg_addr_lo_t arch_addr_lo;
69 };
70 union {
71 u32 address_hi;
72 arch_msi_msg_addr_hi_t arch_addr_hi;
73 };
74 union {
75 u32 data;
76 arch_msi_msg_data_t arch_data;
77 };
78 };
79
80 extern int pci_msi_ignore_mask;
81 /* Helper functions */
82 struct msi_desc;
83 struct pci_dev;
84 struct platform_msi_priv_data;
85 struct device_attribute;
86 struct irq_domain;
87 struct irq_affinity_desc;
88
89 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
90 #ifdef CONFIG_GENERIC_MSI_IRQ
91 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
92 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)93 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
94 #endif
95
96 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
97 struct msi_msg *msg);
98
99 /**
100 * pci_msi_desc - PCI/MSI specific MSI descriptor data
101 *
102 * @msi_mask: [PCI MSI] MSI cached mask bits
103 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
104 * @is_msix: [PCI MSI/X] True if MSI-X
105 * @multiple: [PCI MSI/X] log2 num of messages allocated
106 * @multi_cap: [PCI MSI/X] log2 num of messages supported
107 * @can_mask: [PCI MSI/X] Masking supported?
108 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
109 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
110 * @mask_pos: [PCI MSI] Mask register position
111 * @mask_base: [PCI MSI-X] Mask register base address
112 */
113 struct pci_msi_desc {
114 union {
115 u32 msi_mask;
116 u32 msix_ctrl;
117 };
118 struct {
119 u8 is_msix : 1;
120 u8 multiple : 3;
121 u8 multi_cap : 3;
122 u8 can_mask : 1;
123 u8 is_64 : 1;
124 u8 is_virtual : 1;
125 unsigned default_irq;
126 } msi_attrib;
127 union {
128 u8 mask_pos;
129 void __iomem *mask_base;
130 };
131 };
132
133 /**
134 * union msi_domain_cookie - Opaque MSI domain specific data
135 * @value: u64 value store
136 * @ptr: Pointer to domain specific data
137 * @iobase: Domain specific IOmem pointer
138 *
139 * The content of this data is implementation defined and used by the MSI
140 * domain to store domain specific information which is requried for
141 * interrupt chip callbacks.
142 */
143 union msi_domain_cookie {
144 u64 value;
145 void *ptr;
146 void __iomem *iobase;
147 };
148
149 /**
150 * struct msi_desc_data - Generic MSI descriptor data
151 * @dcookie: Cookie for MSI domain specific data which is required
152 * for irq_chip callbacks
153 * @icookie: Cookie for the MSI interrupt instance provided by
154 * the usage site to the allocation function
155 *
156 * The content of this data is implementation defined, e.g. PCI/IMS
157 * implementations define the meaning of the data. The MSI core ignores
158 * this data completely.
159 */
160 struct msi_desc_data {
161 union msi_domain_cookie dcookie;
162 union msi_instance_cookie icookie;
163 };
164
165 #define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
166
167 /**
168 * struct msi_desc - Descriptor structure for MSI based interrupts
169 * @irq: The base interrupt number
170 * @nvec_used: The number of vectors used
171 * @dev: Pointer to the device which uses this descriptor
172 * @msg: The last set MSI message cached for reuse
173 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
174 * @sysfs_attr: Pointer to sysfs device attribute
175 *
176 * @write_msi_msg: Callback that may be called when the MSI message
177 * address or data changes
178 * @write_msi_msg_data: Data parameter for the callback.
179 *
180 * @msi_index: Index of the msi descriptor
181 * @pci: PCI specific msi descriptor data
182 * @data: Generic MSI descriptor data
183 */
184 struct msi_desc {
185 /* Shared device/bus type independent data */
186 unsigned int irq;
187 unsigned int nvec_used;
188 struct device *dev;
189 struct msi_msg msg;
190 struct irq_affinity_desc *affinity;
191 #ifdef CONFIG_IRQ_MSI_IOMMU
192 const void *iommu_cookie;
193 #endif
194 #ifdef CONFIG_SYSFS
195 struct device_attribute *sysfs_attrs;
196 #endif
197
198 void (*write_msi_msg)(struct msi_desc *entry, void *data);
199 void *write_msi_msg_data;
200
201 u16 msi_index;
202 union {
203 struct pci_msi_desc pci;
204 struct msi_desc_data data;
205 };
206 };
207
208 /*
209 * Filter values for the MSI descriptor iterators and accessor functions.
210 */
211 enum msi_desc_filter {
212 /* All descriptors */
213 MSI_DESC_ALL,
214 /* Descriptors which have no interrupt associated */
215 MSI_DESC_NOTASSOCIATED,
216 /* Descriptors which have an interrupt associated */
217 MSI_DESC_ASSOCIATED,
218 };
219
220
221 /**
222 * struct msi_dev_domain - The internals of MSI domain info per device
223 * @store: Xarray for storing MSI descriptor pointers
224 * @irqdomain: Pointer to a per device interrupt domain
225 */
226 struct msi_dev_domain {
227 struct xarray store;
228 struct irq_domain *domain;
229 };
230
231 /**
232 * msi_device_data - MSI per device data
233 * @properties: MSI properties which are interesting to drivers
234 * @platform_data: Platform-MSI specific data
235 * @mutex: Mutex protecting the MSI descriptor store
236 * @__domains: Internal data for per device MSI domains
237 * @__iter_idx: Index to search the next entry for iterators
238 */
239 struct msi_device_data {
240 unsigned long properties;
241 struct platform_msi_priv_data *platform_data;
242 struct mutex mutex;
243 struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
244 unsigned long __iter_idx;
245 };
246
247 int msi_setup_device_data(struct device *dev);
248
249 void msi_lock_descs(struct device *dev);
250 void msi_unlock_descs(struct device *dev);
251
252 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
253 enum msi_desc_filter filter);
254
255 /**
256 * msi_first_desc - Get the first MSI descriptor of the default irqdomain
257 * @dev: Device to operate on
258 * @filter: Descriptor state filter
259 *
260 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
261 * must be invoked before the call.
262 *
263 * Return: Pointer to the first MSI descriptor matching the search
264 * criteria, NULL if none found.
265 */
msi_first_desc(struct device * dev,enum msi_desc_filter filter)266 static inline struct msi_desc *msi_first_desc(struct device *dev,
267 enum msi_desc_filter filter)
268 {
269 return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
270 }
271
272 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
273 enum msi_desc_filter filter);
274
275 /**
276 * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
277 *
278 * @desc: struct msi_desc pointer used as iterator
279 * @dev: struct device pointer - device to iterate
280 * @domid: The id of the interrupt domain which should be walked.
281 * @filter: Filter for descriptor selection
282 *
283 * Notes:
284 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
285 * pair.
286 * - It is safe to remove a retrieved MSI descriptor in the loop.
287 */
288 #define msi_domain_for_each_desc(desc, dev, domid, filter) \
289 for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
290 (desc) = msi_next_desc((dev), (domid), (filter)))
291
292 /**
293 * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
294 *
295 * @desc: struct msi_desc pointer used as iterator
296 * @dev: struct device pointer - device to iterate
297 * @filter: Filter for descriptor selection
298 *
299 * Notes:
300 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
301 * pair.
302 * - It is safe to remove a retrieved MSI descriptor in the loop.
303 */
304 #define msi_for_each_desc(desc, dev, filter) \
305 msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
306
307 #define msi_desc_to_dev(desc) ((desc)->dev)
308
309 #ifdef CONFIG_IRQ_MSI_IOMMU
msi_desc_get_iommu_cookie(struct msi_desc * desc)310 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
311 {
312 return desc->iommu_cookie;
313 }
314
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)315 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
316 const void *iommu_cookie)
317 {
318 desc->iommu_cookie = iommu_cookie;
319 }
320 #else
msi_desc_get_iommu_cookie(struct msi_desc * desc)321 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
322 {
323 return NULL;
324 }
325
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)326 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
327 const void *iommu_cookie)
328 {
329 }
330 #endif
331
332 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
333 struct msi_desc *init_desc);
334 /**
335 * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
336 * default irqdomain and insert it at @init_desc->msi_index
337 * @dev: Pointer to the device for which the descriptor is allocated
338 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
339 *
340 * Return: 0 on success or an appropriate failure code.
341 */
msi_insert_msi_desc(struct device * dev,struct msi_desc * init_desc)342 static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
343 {
344 return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
345 }
346
347 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
348 unsigned int first, unsigned int last);
349
350 /**
351 * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
352 * in the default irqdomain
353 *
354 * @dev: Device for which to free the descriptors
355 * @first: Index to start freeing from (inclusive)
356 * @last: Last index to be freed (inclusive)
357 */
msi_free_msi_descs_range(struct device * dev,unsigned int first,unsigned int last)358 static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
359 unsigned int last)
360 {
361 msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
362 }
363
364 /**
365 * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
366 * @dev: Device to free the descriptors
367 */
msi_free_msi_descs(struct device * dev)368 static inline void msi_free_msi_descs(struct device *dev)
369 {
370 msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
371 }
372
373 /*
374 * The arch hooks to setup up msi irqs. Default functions are implemented
375 * as weak symbols so that they /can/ be overriden by architecture specific
376 * code if needed. These hooks can only be enabled by the architecture.
377 *
378 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
379 * stubs with warnings.
380 */
381 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
382 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
383 void arch_teardown_msi_irq(unsigned int irq);
384 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
385 void arch_teardown_msi_irqs(struct pci_dev *dev);
386 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
387
388 /*
389 * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
390 * entries of MSI IRQs.
391 */
392 #if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
393 #ifdef CONFIG_SYSFS
394 int msi_device_populate_sysfs(struct device *dev);
395 void msi_device_destroy_sysfs(struct device *dev);
396 #else /* CONFIG_SYSFS */
msi_device_populate_sysfs(struct device * dev)397 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
msi_device_destroy_sysfs(struct device * dev)398 static inline void msi_device_destroy_sysfs(struct device *dev) { }
399 #endif /* !CONFIG_SYSFS */
400 #endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
401
402 /*
403 * The restore hook is still available even for fully irq domain based
404 * setups. Courtesy to XEN/X86.
405 */
406 bool arch_restore_msi_irqs(struct pci_dev *dev);
407
408 #ifdef CONFIG_GENERIC_MSI_IRQ
409
410 #include <linux/irqhandler.h>
411
412 struct irq_domain;
413 struct irq_domain_ops;
414 struct irq_chip;
415 struct device_node;
416 struct fwnode_handle;
417 struct msi_domain_info;
418
419 /**
420 * struct msi_domain_ops - MSI interrupt domain callbacks
421 * @get_hwirq: Retrieve the resulting hw irq number
422 * @msi_init: Domain specific init function for MSI interrupts
423 * @msi_free: Domain specific function to free a MSI interrupts
424 * @msi_prepare: Prepare the allocation of the interrupts in the domain
425 * @prepare_desc: Optional function to prepare the allocated MSI descriptor
426 * in the domain
427 * @set_desc: Set the msi descriptor for an interrupt
428 * @domain_alloc_irqs: Optional function to override the default allocation
429 * function.
430 * @domain_free_irqs: Optional function to override the default free
431 * function.
432 * @msi_post_free: Optional function which is invoked after freeing
433 * all interrupts.
434 *
435 * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
436 * irqdomain.
437 *
438 * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
439 * msi_domain_alloc/free_irqs*() variants.
440 *
441 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
442 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
443 * is initially for a wrapper around XENs seperate MSI universe which can't
444 * be wrapped into the regular irq domains concepts by mere mortals. This
445 * allows to universally use msi_domain_alloc/free_irqs without having to
446 * special case XEN all over the place.
447 */
448 struct msi_domain_ops {
449 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
450 msi_alloc_info_t *arg);
451 int (*msi_init)(struct irq_domain *domain,
452 struct msi_domain_info *info,
453 unsigned int virq, irq_hw_number_t hwirq,
454 msi_alloc_info_t *arg);
455 void (*msi_free)(struct irq_domain *domain,
456 struct msi_domain_info *info,
457 unsigned int virq);
458 int (*msi_prepare)(struct irq_domain *domain,
459 struct device *dev, int nvec,
460 msi_alloc_info_t *arg);
461 void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
462 struct msi_desc *desc);
463 void (*set_desc)(msi_alloc_info_t *arg,
464 struct msi_desc *desc);
465 int (*domain_alloc_irqs)(struct irq_domain *domain,
466 struct device *dev, int nvec);
467 void (*domain_free_irqs)(struct irq_domain *domain,
468 struct device *dev);
469 void (*msi_post_free)(struct irq_domain *domain,
470 struct device *dev);
471 };
472
473 /**
474 * struct msi_domain_info - MSI interrupt domain data
475 * @flags: Flags to decribe features and capabilities
476 * @bus_token: The domain bus token
477 * @hwsize: The hardware table size or the software index limit.
478 * If 0 then the size is considered unlimited and
479 * gets initialized to the maximum software index limit
480 * by the domain creation code.
481 * @ops: The callback data structure
482 * @chip: Optional: associated interrupt chip
483 * @chip_data: Optional: associated interrupt chip data
484 * @handler: Optional: associated interrupt flow handler
485 * @handler_data: Optional: associated interrupt flow handler data
486 * @handler_name: Optional: associated interrupt flow handler name
487 * @data: Optional: domain specific data
488 */
489 struct msi_domain_info {
490 u32 flags;
491 enum irq_domain_bus_token bus_token;
492 unsigned int hwsize;
493 struct msi_domain_ops *ops;
494 struct irq_chip *chip;
495 void *chip_data;
496 irq_flow_handler_t handler;
497 void *handler_data;
498 const char *handler_name;
499 void *data;
500 };
501
502 /**
503 * struct msi_domain_template - Template for MSI device domains
504 * @name: Storage for the resulting name. Filled in by the core.
505 * @chip: Interrupt chip for this domain
506 * @ops: MSI domain ops
507 * @info: MSI domain info data
508 */
509 struct msi_domain_template {
510 char name[48];
511 struct irq_chip chip;
512 struct msi_domain_ops ops;
513 struct msi_domain_info info;
514 };
515
516 /*
517 * Flags for msi_domain_info
518 *
519 * Bit 0-15: Generic MSI functionality which is not subject to restriction
520 * by parent domains
521 *
522 * Bit 16-31: Functionality which depends on the underlying parent domain and
523 * can be masked out by msi_parent_ops::init_dev_msi_info() when
524 * a device MSI domain is initialized.
525 */
526 enum {
527 /*
528 * Init non implemented ops callbacks with default MSI domain
529 * callbacks.
530 */
531 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
532 /*
533 * Init non implemented chip callbacks with default MSI chip
534 * callbacks.
535 */
536 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
537 /* Needs early activate, required for PCI */
538 MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
539 /*
540 * Must reactivate when irq is started even when
541 * MSI_FLAG_ACTIVATE_EARLY has been set.
542 */
543 MSI_FLAG_MUST_REACTIVATE = (1 << 3),
544 /* Populate sysfs on alloc() and destroy it on free() */
545 MSI_FLAG_DEV_SYSFS = (1 << 4),
546 /* Allocate simple MSI descriptors */
547 MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
548 /* Free MSI descriptors */
549 MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
550 /*
551 * Quirk to handle MSI implementations which do not provide
552 * masking. Currently known to affect x86, but has to be partially
553 * handled in the core MSI code.
554 */
555 MSI_FLAG_NOMASK_QUIRK = (1 << 7),
556
557 /* Mask for the generic functionality */
558 MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
559
560 /* Mask for the domain specific functionality */
561 MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
562
563 /* Support multiple PCI MSI interrupts */
564 MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
565 /* Support PCI MSIX interrupts */
566 MSI_FLAG_PCI_MSIX = (1 << 17),
567 /* Is level-triggered capable, using two messages */
568 MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
569 /* MSI-X entries must be contiguous */
570 MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
571 /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
572 MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
573 /* Support for PCI/IMS */
574 MSI_FLAG_PCI_IMS = (1 << 21),
575 };
576
577 /**
578 * struct msi_parent_ops - MSI parent domain callbacks and configuration info
579 *
580 * @supported_flags: Required: The supported MSI flags of the parent domain
581 * @prefix: Optional: Prefix for the domain and chip name
582 * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
583 * domain specific domain flags, domain ops and interrupt chip
584 * callbacks when a per device domain is created.
585 */
586 struct msi_parent_ops {
587 u32 supported_flags;
588 const char *prefix;
589 bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
590 struct irq_domain *msi_parent_domain,
591 struct msi_domain_info *msi_child_info);
592 };
593
594 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
595 struct irq_domain *msi_parent_domain,
596 struct msi_domain_info *msi_child_info);
597
598 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
599 bool force);
600
601 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
602 struct msi_domain_info *info,
603 struct irq_domain *parent);
604
605 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
606 const struct msi_domain_template *template,
607 unsigned int hwsize, void *domain_data,
608 void *chip_data);
609 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
610
611 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
612 enum irq_domain_bus_token bus_token);
613
614 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
615 unsigned int first, unsigned int last);
616 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
617 unsigned int first, unsigned int last);
618 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
619
620 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
621 const struct irq_affinity_desc *affdesc,
622 union msi_instance_cookie *cookie);
623
624 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
625 unsigned int first, unsigned int last);
626 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
627 unsigned int first, unsigned int last);
628 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
629 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
630
631 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
632
633 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
634 struct msi_domain_info *info,
635 struct irq_domain *parent);
636 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
637 irq_write_msi_msg_t write_msi_msg);
638 void platform_msi_domain_free_irqs(struct device *dev);
639
640 /* When an MSI domain is used as an intermediate domain */
641 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
642 int nvec, msi_alloc_info_t *args);
643 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
644 int virq, int nvec, msi_alloc_info_t *args);
645 void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
646
647 struct irq_domain *
648 __platform_msi_create_device_domain(struct device *dev,
649 unsigned int nvec,
650 bool is_tree,
651 irq_write_msi_msg_t write_msi_msg,
652 const struct irq_domain_ops *ops,
653 void *host_data);
654
655 #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
656 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
657 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
658 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
659
660 int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
661 unsigned int nr_irqs);
662 void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
663 unsigned int nvec);
664 void *platform_msi_get_host_data(struct irq_domain *domain);
665
666 bool msi_device_has_isolated_msi(struct device *dev);
667 #else /* CONFIG_GENERIC_MSI_IRQ */
msi_device_has_isolated_msi(struct device * dev)668 static inline bool msi_device_has_isolated_msi(struct device *dev)
669 {
670 /*
671 * Arguably if the platform does not enable MSI support then it has
672 * "isolated MSI", as an interrupt controller that cannot receive MSIs
673 * is inherently isolated by our definition. The default definition for
674 * arch_is_isolated_msi() is conservative and returns false anyhow.
675 */
676 return arch_is_isolated_msi();
677 }
678 #endif /* CONFIG_GENERIC_MSI_IRQ */
679
680 /* PCI specific interfaces */
681 #ifdef CONFIG_PCI_MSI
682 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
683 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
684 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
685 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
686 void pci_msi_mask_irq(struct irq_data *data);
687 void pci_msi_unmask_irq(struct irq_data *data);
688 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
689 struct msi_domain_info *info,
690 struct irq_domain *parent);
691 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
692 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
693 #else /* CONFIG_PCI_MSI */
pci_msi_get_device_domain(struct pci_dev * pdev)694 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
695 {
696 return NULL;
697 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)698 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
699 #endif /* !CONFIG_PCI_MSI */
700
701 #endif /* LINUX_MSI_H */
702