1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7
8 struct msi_msg {
9 u32 address_lo; /* low 32 bits of msi message address */
10 u32 address_hi; /* high 32 bits of msi message address */
11 u32 data; /* 16 bits of msi message data */
12 };
13
14 extern int pci_msi_ignore_mask;
15 /* Helper functions */
16 struct irq_data;
17 struct msi_desc;
18 struct pci_dev;
19 struct platform_msi_priv_data;
20 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21 #ifdef CONFIG_GENERIC_MSI_IRQ
22 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)24 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25 {
26 }
27 #endif
28
29 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 struct msi_msg *msg);
31
32 /**
33 * platform_msi_desc - Platform device specific msi descriptor data
34 * @msi_priv_data: Pointer to platform private data
35 * @msi_index: The index of the MSI descriptor for multi MSI
36 */
37 struct platform_msi_desc {
38 struct platform_msi_priv_data *msi_priv_data;
39 u16 msi_index;
40 };
41
42 /**
43 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44 * @msi_index: The index of the MSI descriptor
45 */
46 struct fsl_mc_msi_desc {
47 u16 msi_index;
48 };
49
50 /**
51 * struct msi_desc - Descriptor structure for MSI based interrupts
52 * @list: List head for management
53 * @irq: The base interrupt number
54 * @nvec_used: The number of vectors used
55 * @dev: Pointer to the device which uses this descriptor
56 * @msg: The last set MSI message cached for reuse
57 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
58 *
59 * @masked: [PCI MSI/X] Mask bits
60 * @is_msix: [PCI MSI/X] True if MSI-X
61 * @multiple: [PCI MSI/X] log2 num of messages allocated
62 * @multi_cap: [PCI MSI/X] log2 num of messages supported
63 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
64 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
65 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
66 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
67 * @mask_pos: [PCI MSI] Mask register position
68 * @mask_base: [PCI MSI-X] Mask register base address
69 * @platform: [platform] Platform device specific msi descriptor data
70 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
71 */
72 struct msi_desc {
73 /* Shared device/bus type independent data */
74 struct list_head list;
75 unsigned int irq;
76 unsigned int nvec_used;
77 struct device *dev;
78 struct msi_msg msg;
79 struct cpumask *affinity;
80
81 union {
82 /* PCI MSI/X specific data */
83 struct {
84 u32 masked;
85 struct {
86 __u8 is_msix : 1;
87 __u8 multiple : 3;
88 __u8 multi_cap : 3;
89 __u8 maskbit : 1;
90 __u8 is_64 : 1;
91 __u16 entry_nr;
92 unsigned default_irq;
93 } msi_attrib;
94 union {
95 u8 mask_pos;
96 void __iomem *mask_base;
97 };
98 };
99
100 /*
101 * Non PCI variants add their data structure here. New
102 * entries need to use a named structure. We want
103 * proper name spaces for this. The PCI part is
104 * anonymous for now as it would require an immediate
105 * tree wide cleanup.
106 */
107 struct platform_msi_desc platform;
108 struct fsl_mc_msi_desc fsl_mc;
109 };
110 };
111
112 /* Helpers to hide struct msi_desc implementation details */
113 #define msi_desc_to_dev(desc) ((desc)->dev)
114 #define dev_to_msi_list(dev) (&(dev)->msi_list)
115 #define first_msi_entry(dev) \
116 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
117 #define for_each_msi_entry(desc, dev) \
118 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
119
120 #ifdef CONFIG_PCI_MSI
121 #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
122 #define for_each_pci_msi_entry(desc, pdev) \
123 for_each_msi_entry((desc), &(pdev)->dev)
124
125 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
126 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
127 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
128 #else /* CONFIG_PCI_MSI */
msi_desc_to_pci_sysdata(struct msi_desc * desc)129 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
130 {
131 return NULL;
132 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)133 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
134 {
135 }
136 #endif /* CONFIG_PCI_MSI */
137
138 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
139 const struct cpumask *affinity);
140 void free_msi_entry(struct msi_desc *entry);
141 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
142 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
143
144 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
145 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
146 void pci_msi_mask_irq(struct irq_data *data);
147 void pci_msi_unmask_irq(struct irq_data *data);
148
149 /* Conversion helpers. Should be removed after merging */
__write_msi_msg(struct msi_desc * entry,struct msi_msg * msg)150 static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
151 {
152 __pci_write_msi_msg(entry, msg);
153 }
write_msi_msg(int irq,struct msi_msg * msg)154 static inline void write_msi_msg(int irq, struct msi_msg *msg)
155 {
156 pci_write_msi_msg(irq, msg);
157 }
mask_msi_irq(struct irq_data * data)158 static inline void mask_msi_irq(struct irq_data *data)
159 {
160 pci_msi_mask_irq(data);
161 }
unmask_msi_irq(struct irq_data * data)162 static inline void unmask_msi_irq(struct irq_data *data)
163 {
164 pci_msi_unmask_irq(data);
165 }
166
167 /*
168 * The arch hooks to setup up msi irqs. Those functions are
169 * implemented as weak symbols so that they /can/ be overriden by
170 * architecture specific code if needed.
171 */
172 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
173 void arch_teardown_msi_irq(unsigned int irq);
174 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
175 void arch_teardown_msi_irqs(struct pci_dev *dev);
176 void arch_restore_msi_irqs(struct pci_dev *dev);
177
178 void default_teardown_msi_irqs(struct pci_dev *dev);
179 void default_restore_msi_irqs(struct pci_dev *dev);
180
181 struct msi_controller {
182 struct module *owner;
183 struct device *dev;
184 struct device_node *of_node;
185 struct list_head list;
186
187 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
188 struct msi_desc *desc);
189 int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
190 int nvec, int type);
191 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
192 };
193
194 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
195
196 #include <linux/irqhandler.h>
197 #include <asm/msi.h>
198
199 struct irq_domain;
200 struct irq_domain_ops;
201 struct irq_chip;
202 struct device_node;
203 struct fwnode_handle;
204 struct msi_domain_info;
205
206 /**
207 * struct msi_domain_ops - MSI interrupt domain callbacks
208 * @get_hwirq: Retrieve the resulting hw irq number
209 * @msi_init: Domain specific init function for MSI interrupts
210 * @msi_free: Domain specific function to free a MSI interrupts
211 * @msi_check: Callback for verification of the domain/info/dev data
212 * @msi_prepare: Prepare the allocation of the interrupts in the domain
213 * @msi_finish: Optional callback to finalize the allocation
214 * @set_desc: Set the msi descriptor for an interrupt
215 * @handle_error: Optional error handler if the allocation fails
216 *
217 * @get_hwirq, @msi_init and @msi_free are callbacks used by
218 * msi_create_irq_domain() and related interfaces
219 *
220 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
221 * are callbacks used by msi_domain_alloc_irqs() and related
222 * interfaces which are based on msi_desc.
223 */
224 struct msi_domain_ops {
225 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
226 msi_alloc_info_t *arg);
227 int (*msi_init)(struct irq_domain *domain,
228 struct msi_domain_info *info,
229 unsigned int virq, irq_hw_number_t hwirq,
230 msi_alloc_info_t *arg);
231 void (*msi_free)(struct irq_domain *domain,
232 struct msi_domain_info *info,
233 unsigned int virq);
234 int (*msi_check)(struct irq_domain *domain,
235 struct msi_domain_info *info,
236 struct device *dev);
237 int (*msi_prepare)(struct irq_domain *domain,
238 struct device *dev, int nvec,
239 msi_alloc_info_t *arg);
240 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
241 void (*set_desc)(msi_alloc_info_t *arg,
242 struct msi_desc *desc);
243 int (*handle_error)(struct irq_domain *domain,
244 struct msi_desc *desc, int error);
245 };
246
247 /**
248 * struct msi_domain_info - MSI interrupt domain data
249 * @flags: Flags to decribe features and capabilities
250 * @ops: The callback data structure
251 * @chip: Optional: associated interrupt chip
252 * @chip_data: Optional: associated interrupt chip data
253 * @handler: Optional: associated interrupt flow handler
254 * @handler_data: Optional: associated interrupt flow handler data
255 * @handler_name: Optional: associated interrupt flow handler name
256 * @data: Optional: domain specific data
257 */
258 struct msi_domain_info {
259 u32 flags;
260 struct msi_domain_ops *ops;
261 struct irq_chip *chip;
262 void *chip_data;
263 irq_flow_handler_t handler;
264 void *handler_data;
265 const char *handler_name;
266 void *data;
267 };
268
269 /* Flags for msi_domain_info */
270 enum {
271 /*
272 * Init non implemented ops callbacks with default MSI domain
273 * callbacks.
274 */
275 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
276 /*
277 * Init non implemented chip callbacks with default MSI chip
278 * callbacks.
279 */
280 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
281 /* Support multiple PCI MSI interrupts */
282 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
283 /* Support PCI MSIX interrupts */
284 MSI_FLAG_PCI_MSIX = (1 << 3),
285 /* Needs early activate, required for PCI */
286 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
287 /*
288 * Must reactivate when irq is started even when
289 * MSI_FLAG_ACTIVATE_EARLY has been set.
290 */
291 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
292 /* Is level-triggered capable, using two messages */
293 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
294 };
295
296 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
297 bool force);
298
299 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
300 struct msi_domain_info *info,
301 struct irq_domain *parent);
302 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
303 int nvec);
304 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
305 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
306
307 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
308 struct msi_domain_info *info,
309 struct irq_domain *parent);
310 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
311 irq_write_msi_msg_t write_msi_msg);
312 void platform_msi_domain_free_irqs(struct device *dev);
313
314 /* When an MSI domain is used as an intermediate domain */
315 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
316 int nvec, msi_alloc_info_t *args);
317 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
318 int virq, int nvec, msi_alloc_info_t *args);
319 struct irq_domain *
320 platform_msi_create_device_domain(struct device *dev,
321 unsigned int nvec,
322 irq_write_msi_msg_t write_msi_msg,
323 const struct irq_domain_ops *ops,
324 void *host_data);
325 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
326 unsigned int nr_irqs);
327 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
328 unsigned int nvec);
329 void *platform_msi_get_host_data(struct irq_domain *domain);
330 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
331
332 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
333 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
334 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
335 struct msi_domain_info *info,
336 struct irq_domain *parent);
337 irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
338 struct msi_desc *desc);
339 int pci_msi_domain_check_cap(struct irq_domain *domain,
340 struct msi_domain_info *info, struct device *dev);
341 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
342 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
343 #else
pci_msi_get_device_domain(struct pci_dev * pdev)344 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
345 {
346 return NULL;
347 }
348 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
349
350 #endif /* LINUX_MSI_H */
351