1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Endpoint *Controller* (EPC) library
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
15 #include <linux/pci-ep-cfs.h>
16 
17 static struct class *pci_epc_class;
18 
devm_pci_epc_release(struct device * dev,void * res)19 static void devm_pci_epc_release(struct device *dev, void *res)
20 {
21 	struct pci_epc *epc = *(struct pci_epc **)res;
22 
23 	pci_epc_destroy(epc);
24 }
25 
devm_pci_epc_match(struct device * dev,void * res,void * match_data)26 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
27 {
28 	struct pci_epc **epc = res;
29 
30 	return *epc == match_data;
31 }
32 
33 /**
34  * pci_epc_put() - release the PCI endpoint controller
35  * @epc: epc returned by pci_epc_get()
36  *
37  * release the refcount the caller obtained by invoking pci_epc_get()
38  */
pci_epc_put(struct pci_epc * epc)39 void pci_epc_put(struct pci_epc *epc)
40 {
41 	if (!epc || IS_ERR(epc))
42 		return;
43 
44 	module_put(epc->ops->owner);
45 	put_device(&epc->dev);
46 }
47 EXPORT_SYMBOL_GPL(pci_epc_put);
48 
49 /**
50  * pci_epc_get() - get the PCI endpoint controller
51  * @epc_name: device name of the endpoint controller
52  *
53  * Invoke to get struct pci_epc * corresponding to the device name of the
54  * endpoint controller
55  */
pci_epc_get(const char * epc_name)56 struct pci_epc *pci_epc_get(const char *epc_name)
57 {
58 	int ret = -EINVAL;
59 	struct pci_epc *epc;
60 	struct device *dev;
61 	struct class_dev_iter iter;
62 
63 	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
64 	while ((dev = class_dev_iter_next(&iter))) {
65 		if (strcmp(epc_name, dev_name(dev)))
66 			continue;
67 
68 		epc = to_pci_epc(dev);
69 		if (!try_module_get(epc->ops->owner)) {
70 			ret = -EINVAL;
71 			goto err;
72 		}
73 
74 		class_dev_iter_exit(&iter);
75 		get_device(&epc->dev);
76 		return epc;
77 	}
78 
79 err:
80 	class_dev_iter_exit(&iter);
81 	return ERR_PTR(ret);
82 }
83 EXPORT_SYMBOL_GPL(pci_epc_get);
84 
85 /**
86  * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
87  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
88  *
89  * Invoke to get the first unreserved BAR that can be used by the endpoint
90  * function. For any incorrect value in reserved_bar return '0'.
91  */
92 enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features * epc_features)93 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
94 {
95 	return pci_epc_get_next_free_bar(epc_features, BAR_0);
96 }
97 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
98 
99 /**
100  * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
101  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
102  * @bar: the starting BAR number from where unreserved BAR should be searched
103  *
104  * Invoke to get the next unreserved BAR starting from @bar that can be used
105  * for endpoint function. For any incorrect value in reserved_bar return '0'.
106  */
pci_epc_get_next_free_bar(const struct pci_epc_features * epc_features,enum pci_barno bar)107 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
108 					 *epc_features, enum pci_barno bar)
109 {
110 	unsigned long free_bar;
111 
112 	if (!epc_features)
113 		return BAR_0;
114 
115 	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
116 	if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
117 		bar++;
118 
119 	/* Find if the reserved BAR is also a 64-bit BAR */
120 	free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
121 
122 	/* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
123 	free_bar <<= 1;
124 	free_bar |= epc_features->reserved_bar;
125 
126 	free_bar = find_next_zero_bit(&free_bar, 6, bar);
127 	if (free_bar > 5)
128 		return NO_BAR;
129 
130 	return free_bar;
131 }
132 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
133 
134 /**
135  * pci_epc_get_features() - get the features supported by EPC
136  * @epc: the features supported by *this* EPC device will be returned
137  * @func_no: the features supported by the EPC device specific to the
138  *	     endpoint function with func_no will be returned
139  * @vfunc_no: the features supported by the EPC device specific to the
140  *	     virtual endpoint function with vfunc_no will be returned
141  *
142  * Invoke to get the features provided by the EPC which may be
143  * specific to an endpoint function. Returns pci_epc_features on success
144  * and NULL for any failures.
145  */
pci_epc_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)146 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
147 						    u8 func_no, u8 vfunc_no)
148 {
149 	const struct pci_epc_features *epc_features;
150 
151 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
152 		return NULL;
153 
154 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
155 		return NULL;
156 
157 	if (!epc->ops->get_features)
158 		return NULL;
159 
160 	mutex_lock(&epc->lock);
161 	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
162 	mutex_unlock(&epc->lock);
163 
164 	return epc_features;
165 }
166 EXPORT_SYMBOL_GPL(pci_epc_get_features);
167 
168 /**
169  * pci_epc_stop() - stop the PCI link
170  * @epc: the link of the EPC device that has to be stopped
171  *
172  * Invoke to stop the PCI link
173  */
pci_epc_stop(struct pci_epc * epc)174 void pci_epc_stop(struct pci_epc *epc)
175 {
176 	if (IS_ERR(epc) || !epc->ops->stop)
177 		return;
178 
179 	mutex_lock(&epc->lock);
180 	epc->ops->stop(epc);
181 	mutex_unlock(&epc->lock);
182 }
183 EXPORT_SYMBOL_GPL(pci_epc_stop);
184 
185 /**
186  * pci_epc_start() - start the PCI link
187  * @epc: the link of *this* EPC device has to be started
188  *
189  * Invoke to start the PCI link
190  */
pci_epc_start(struct pci_epc * epc)191 int pci_epc_start(struct pci_epc *epc)
192 {
193 	int ret;
194 
195 	if (IS_ERR(epc))
196 		return -EINVAL;
197 
198 	if (!epc->ops->start)
199 		return 0;
200 
201 	mutex_lock(&epc->lock);
202 	ret = epc->ops->start(epc);
203 	mutex_unlock(&epc->lock);
204 
205 	return ret;
206 }
207 EXPORT_SYMBOL_GPL(pci_epc_start);
208 
209 /**
210  * pci_epc_raise_irq() - interrupt the host system
211  * @epc: the EPC device which has to interrupt the host
212  * @func_no: the physical endpoint function number in the EPC device
213  * @vfunc_no: the virtual endpoint function number in the physical function
214  * @type: specify the type of interrupt; legacy, MSI or MSI-X
215  * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
216  *
217  * Invoke to raise an legacy, MSI or MSI-X interrupt
218  */
pci_epc_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,enum pci_epc_irq_type type,u16 interrupt_num)219 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
220 		      enum pci_epc_irq_type type, u16 interrupt_num)
221 {
222 	int ret;
223 
224 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
225 		return -EINVAL;
226 
227 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
228 		return -EINVAL;
229 
230 	if (!epc->ops->raise_irq)
231 		return 0;
232 
233 	mutex_lock(&epc->lock);
234 	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
235 	mutex_unlock(&epc->lock);
236 
237 	return ret;
238 }
239 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
240 
241 /**
242  * pci_epc_map_msi_irq() - Map physical address to MSI address and return
243  *                         MSI data
244  * @epc: the EPC device which has the MSI capability
245  * @func_no: the physical endpoint function number in the EPC device
246  * @vfunc_no: the virtual endpoint function number in the physical function
247  * @phys_addr: the physical address of the outbound region
248  * @interrupt_num: the MSI interrupt number with range (1-N)
249  * @entry_size: Size of Outbound address region for each interrupt
250  * @msi_data: the data that should be written in order to raise MSI interrupt
251  *            with interrupt number as 'interrupt num'
252  * @msi_addr_offset: Offset of MSI address from the aligned outbound address
253  *                   to which the MSI address is mapped
254  *
255  * Invoke to map physical address to MSI address and return MSI data. The
256  * physical address should be an address in the outbound region. This is
257  * required to implement doorbell functionality of NTB wherein EPC on either
258  * side of the interface (primary and secondary) can directly write to the
259  * physical address (in outbound region) of the other interface to ring
260  * doorbell.
261  */
pci_epc_map_msi_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u8 interrupt_num,u32 entry_size,u32 * msi_data,u32 * msi_addr_offset)262 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
263 			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
264 			u32 *msi_data, u32 *msi_addr_offset)
265 {
266 	int ret;
267 
268 	if (IS_ERR_OR_NULL(epc))
269 		return -EINVAL;
270 
271 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
272 		return -EINVAL;
273 
274 	if (!epc->ops->map_msi_irq)
275 		return -EINVAL;
276 
277 	mutex_lock(&epc->lock);
278 	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
279 				    interrupt_num, entry_size, msi_data,
280 				    msi_addr_offset);
281 	mutex_unlock(&epc->lock);
282 
283 	return ret;
284 }
285 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
286 
287 /**
288  * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
289  * @epc: the EPC device to which MSI interrupts was requested
290  * @func_no: the physical endpoint function number in the EPC device
291  * @vfunc_no: the virtual endpoint function number in the physical function
292  *
293  * Invoke to get the number of MSI interrupts allocated by the RC
294  */
pci_epc_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)295 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
296 {
297 	int interrupt;
298 
299 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
300 		return 0;
301 
302 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
303 		return 0;
304 
305 	if (!epc->ops->get_msi)
306 		return 0;
307 
308 	mutex_lock(&epc->lock);
309 	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
310 	mutex_unlock(&epc->lock);
311 
312 	if (interrupt < 0)
313 		return 0;
314 
315 	interrupt = 1 << interrupt;
316 
317 	return interrupt;
318 }
319 EXPORT_SYMBOL_GPL(pci_epc_get_msi);
320 
321 /**
322  * pci_epc_set_msi() - set the number of MSI interrupt numbers required
323  * @epc: the EPC device on which MSI has to be configured
324  * @func_no: the physical endpoint function number in the EPC device
325  * @vfunc_no: the virtual endpoint function number in the physical function
326  * @interrupts: number of MSI interrupts required by the EPF
327  *
328  * Invoke to set the required number of MSI interrupts.
329  */
pci_epc_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 interrupts)330 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
331 {
332 	int ret;
333 	u8 encode_int;
334 
335 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
336 	    interrupts < 1 || interrupts > 32)
337 		return -EINVAL;
338 
339 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
340 		return -EINVAL;
341 
342 	if (!epc->ops->set_msi)
343 		return 0;
344 
345 	encode_int = order_base_2(interrupts);
346 
347 	mutex_lock(&epc->lock);
348 	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
349 	mutex_unlock(&epc->lock);
350 
351 	return ret;
352 }
353 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
354 
355 /**
356  * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
357  * @epc: the EPC device to which MSI-X interrupts was requested
358  * @func_no: the physical endpoint function number in the EPC device
359  * @vfunc_no: the virtual endpoint function number in the physical function
360  *
361  * Invoke to get the number of MSI-X interrupts allocated by the RC
362  */
pci_epc_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)363 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
364 {
365 	int interrupt;
366 
367 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
368 		return 0;
369 
370 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
371 		return 0;
372 
373 	if (!epc->ops->get_msix)
374 		return 0;
375 
376 	mutex_lock(&epc->lock);
377 	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
378 	mutex_unlock(&epc->lock);
379 
380 	if (interrupt < 0)
381 		return 0;
382 
383 	return interrupt + 1;
384 }
385 EXPORT_SYMBOL_GPL(pci_epc_get_msix);
386 
387 /**
388  * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
389  * @epc: the EPC device on which MSI-X has to be configured
390  * @func_no: the physical endpoint function number in the EPC device
391  * @vfunc_no: the virtual endpoint function number in the physical function
392  * @interrupts: number of MSI-X interrupts required by the EPF
393  * @bir: BAR where the MSI-X table resides
394  * @offset: Offset pointing to the start of MSI-X table
395  *
396  * Invoke to set the required number of MSI-X interrupts.
397  */
pci_epc_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 interrupts,enum pci_barno bir,u32 offset)398 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
399 		     u16 interrupts, enum pci_barno bir, u32 offset)
400 {
401 	int ret;
402 
403 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
404 	    interrupts < 1 || interrupts > 2048)
405 		return -EINVAL;
406 
407 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
408 		return -EINVAL;
409 
410 	if (!epc->ops->set_msix)
411 		return 0;
412 
413 	mutex_lock(&epc->lock);
414 	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
415 				 offset);
416 	mutex_unlock(&epc->lock);
417 
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(pci_epc_set_msix);
421 
422 /**
423  * pci_epc_unmap_addr() - unmap CPU address from PCI address
424  * @epc: the EPC device on which address is allocated
425  * @func_no: the physical endpoint function number in the EPC device
426  * @vfunc_no: the virtual endpoint function number in the physical function
427  * @phys_addr: physical address of the local system
428  *
429  * Invoke to unmap the CPU address from PCI address.
430  */
pci_epc_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr)431 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
432 			phys_addr_t phys_addr)
433 {
434 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
435 		return;
436 
437 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
438 		return;
439 
440 	if (!epc->ops->unmap_addr)
441 		return;
442 
443 	mutex_lock(&epc->lock);
444 	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
445 	mutex_unlock(&epc->lock);
446 }
447 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
448 
449 /**
450  * pci_epc_map_addr() - map CPU address to PCI address
451  * @epc: the EPC device on which address is allocated
452  * @func_no: the physical endpoint function number in the EPC device
453  * @vfunc_no: the virtual endpoint function number in the physical function
454  * @phys_addr: physical address of the local system
455  * @pci_addr: PCI address to which the physical address should be mapped
456  * @size: the size of the allocation
457  *
458  * Invoke to map CPU address with PCI address.
459  */
pci_epc_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u64 pci_addr,size_t size)460 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
461 		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
462 {
463 	int ret;
464 
465 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
466 		return -EINVAL;
467 
468 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
469 		return -EINVAL;
470 
471 	if (!epc->ops->map_addr)
472 		return 0;
473 
474 	mutex_lock(&epc->lock);
475 	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
476 				 size);
477 	mutex_unlock(&epc->lock);
478 
479 	return ret;
480 }
481 EXPORT_SYMBOL_GPL(pci_epc_map_addr);
482 
483 /**
484  * pci_epc_clear_bar() - reset the BAR
485  * @epc: the EPC device for which the BAR has to be cleared
486  * @func_no: the physical endpoint function number in the EPC device
487  * @vfunc_no: the virtual endpoint function number in the physical function
488  * @epf_bar: the struct epf_bar that contains the BAR information
489  *
490  * Invoke to reset the BAR of the endpoint device.
491  */
pci_epc_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)492 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
493 		       struct pci_epf_bar *epf_bar)
494 {
495 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
496 	    (epf_bar->barno == BAR_5 &&
497 	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
498 		return;
499 
500 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
501 		return;
502 
503 	if (!epc->ops->clear_bar)
504 		return;
505 
506 	mutex_lock(&epc->lock);
507 	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
508 	mutex_unlock(&epc->lock);
509 }
510 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
511 
512 /**
513  * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
514  * @epc: the EPC device on which BAR has to be configured
515  * @func_no: the physical endpoint function number in the EPC device
516  * @vfunc_no: the virtual endpoint function number in the physical function
517  * @epf_bar: the struct epf_bar that contains the BAR information
518  *
519  * Invoke to configure the BAR of the endpoint device.
520  */
pci_epc_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)521 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
522 		    struct pci_epf_bar *epf_bar)
523 {
524 	int ret;
525 	int flags = epf_bar->flags;
526 
527 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
528 	    (epf_bar->barno == BAR_5 &&
529 	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
530 	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
531 	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
532 	    (upper_32_bits(epf_bar->size) &&
533 	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
534 		return -EINVAL;
535 
536 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
537 		return -EINVAL;
538 
539 	if (!epc->ops->set_bar)
540 		return 0;
541 
542 	mutex_lock(&epc->lock);
543 	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
544 	mutex_unlock(&epc->lock);
545 
546 	return ret;
547 }
548 EXPORT_SYMBOL_GPL(pci_epc_set_bar);
549 
550 /**
551  * pci_epc_write_header() - write standard configuration header
552  * @epc: the EPC device to which the configuration header should be written
553  * @func_no: the physical endpoint function number in the EPC device
554  * @vfunc_no: the virtual endpoint function number in the physical function
555  * @header: standard configuration header fields
556  *
557  * Invoke to write the configuration header to the endpoint controller. Every
558  * endpoint controller will have a dedicated location to which the standard
559  * configuration header would be written. The callback function should write
560  * the header fields to this dedicated location.
561  */
pci_epc_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * header)562 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
563 			 struct pci_epf_header *header)
564 {
565 	int ret;
566 
567 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
568 		return -EINVAL;
569 
570 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
571 		return -EINVAL;
572 
573 	/* Only Virtual Function #1 has deviceID */
574 	if (vfunc_no > 1)
575 		return -EINVAL;
576 
577 	if (!epc->ops->write_header)
578 		return 0;
579 
580 	mutex_lock(&epc->lock);
581 	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
582 	mutex_unlock(&epc->lock);
583 
584 	return ret;
585 }
586 EXPORT_SYMBOL_GPL(pci_epc_write_header);
587 
588 /**
589  * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
590  * @epc: the EPC device to which the endpoint function should be added
591  * @epf: the endpoint function to be added
592  * @type: Identifies if the EPC is connected to the primary or secondary
593  *        interface of EPF
594  *
595  * A PCI endpoint device can have one or more functions. In the case of PCIe,
596  * the specification allows up to 8 PCIe endpoint functions. Invoke
597  * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
598  */
pci_epc_add_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)599 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
600 		    enum pci_epc_interface_type type)
601 {
602 	struct list_head *list;
603 	u32 func_no;
604 	int ret = 0;
605 
606 	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
607 		return -EINVAL;
608 
609 	if (type == PRIMARY_INTERFACE && epf->epc)
610 		return -EBUSY;
611 
612 	if (type == SECONDARY_INTERFACE && epf->sec_epc)
613 		return -EBUSY;
614 
615 	mutex_lock(&epc->list_lock);
616 	func_no = find_first_zero_bit(&epc->function_num_map,
617 				      BITS_PER_LONG);
618 	if (func_no >= BITS_PER_LONG) {
619 		ret = -EINVAL;
620 		goto ret;
621 	}
622 
623 	if (func_no > epc->max_functions - 1) {
624 		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
625 		ret = -EINVAL;
626 		goto ret;
627 	}
628 
629 	set_bit(func_no, &epc->function_num_map);
630 	if (type == PRIMARY_INTERFACE) {
631 		epf->func_no = func_no;
632 		epf->epc = epc;
633 		list = &epf->list;
634 	} else {
635 		epf->sec_epc_func_no = func_no;
636 		epf->sec_epc = epc;
637 		list = &epf->sec_epc_list;
638 	}
639 
640 	list_add_tail(list, &epc->pci_epf);
641 ret:
642 	mutex_unlock(&epc->list_lock);
643 
644 	return ret;
645 }
646 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
647 
648 /**
649  * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
650  * @epc: the EPC device from which the endpoint function should be removed
651  * @epf: the endpoint function to be removed
652  * @type: identifies if the EPC is connected to the primary or secondary
653  *        interface of EPF
654  *
655  * Invoke to remove PCI endpoint function from the endpoint controller.
656  */
pci_epc_remove_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)657 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
658 			enum pci_epc_interface_type type)
659 {
660 	struct list_head *list;
661 	u32 func_no = 0;
662 
663 	if (!epc || IS_ERR(epc) || !epf)
664 		return;
665 
666 	if (type == PRIMARY_INTERFACE) {
667 		func_no = epf->func_no;
668 		list = &epf->list;
669 	} else {
670 		func_no = epf->sec_epc_func_no;
671 		list = &epf->sec_epc_list;
672 	}
673 
674 	mutex_lock(&epc->list_lock);
675 	clear_bit(func_no, &epc->function_num_map);
676 	list_del(list);
677 	epf->epc = NULL;
678 	mutex_unlock(&epc->list_lock);
679 }
680 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
681 
682 /**
683  * pci_epc_linkup() - Notify the EPF device that EPC device has established a
684  *		      connection with the Root Complex.
685  * @epc: the EPC device which has established link with the host
686  *
687  * Invoke to Notify the EPF device that the EPC device has established a
688  * connection with the Root Complex.
689  */
pci_epc_linkup(struct pci_epc * epc)690 void pci_epc_linkup(struct pci_epc *epc)
691 {
692 	struct pci_epf *epf;
693 
694 	if (!epc || IS_ERR(epc))
695 		return;
696 
697 	mutex_lock(&epc->list_lock);
698 	list_for_each_entry(epf, &epc->pci_epf, list) {
699 		mutex_lock(&epf->lock);
700 		if (epf->event_ops && epf->event_ops->link_up)
701 			epf->event_ops->link_up(epf);
702 		mutex_unlock(&epf->lock);
703 	}
704 	mutex_unlock(&epc->list_lock);
705 }
706 EXPORT_SYMBOL_GPL(pci_epc_linkup);
707 
708 /**
709  * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
710  *			connection with the Root Complex.
711  * @epc: the EPC device which has dropped the link with the host
712  *
713  * Invoke to Notify the EPF device that the EPC device has dropped the
714  * connection with the Root Complex.
715  */
pci_epc_linkdown(struct pci_epc * epc)716 void pci_epc_linkdown(struct pci_epc *epc)
717 {
718 	struct pci_epf *epf;
719 
720 	if (!epc || IS_ERR(epc))
721 		return;
722 
723 	mutex_lock(&epc->list_lock);
724 	list_for_each_entry(epf, &epc->pci_epf, list) {
725 		mutex_lock(&epf->lock);
726 		if (epf->event_ops && epf->event_ops->link_down)
727 			epf->event_ops->link_down(epf);
728 		mutex_unlock(&epf->lock);
729 	}
730 	mutex_unlock(&epc->list_lock);
731 }
732 EXPORT_SYMBOL_GPL(pci_epc_linkdown);
733 
734 /**
735  * pci_epc_init_notify() - Notify the EPF device that EPC device's core
736  *			   initialization is completed.
737  * @epc: the EPC device whose core initialization is completed
738  *
739  * Invoke to Notify the EPF device that the EPC device's initialization
740  * is completed.
741  */
pci_epc_init_notify(struct pci_epc * epc)742 void pci_epc_init_notify(struct pci_epc *epc)
743 {
744 	struct pci_epf *epf;
745 
746 	if (!epc || IS_ERR(epc))
747 		return;
748 
749 	mutex_lock(&epc->list_lock);
750 	list_for_each_entry(epf, &epc->pci_epf, list) {
751 		mutex_lock(&epf->lock);
752 		if (epf->event_ops && epf->event_ops->core_init)
753 			epf->event_ops->core_init(epf);
754 		mutex_unlock(&epf->lock);
755 	}
756 	mutex_unlock(&epc->list_lock);
757 }
758 EXPORT_SYMBOL_GPL(pci_epc_init_notify);
759 
760 /**
761  * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
762  *			  the BME event from the Root complex
763  * @epc: the EPC device that received the BME event
764  *
765  * Invoke to Notify the EPF device that the EPC device has received the Bus
766  * Master Enable (BME) event from the Root complex
767  */
pci_epc_bme_notify(struct pci_epc * epc)768 void pci_epc_bme_notify(struct pci_epc *epc)
769 {
770 	struct pci_epf *epf;
771 
772 	if (!epc || IS_ERR(epc))
773 		return;
774 
775 	mutex_lock(&epc->list_lock);
776 	list_for_each_entry(epf, &epc->pci_epf, list) {
777 		mutex_lock(&epf->lock);
778 		if (epf->event_ops && epf->event_ops->bme)
779 			epf->event_ops->bme(epf);
780 		mutex_unlock(&epf->lock);
781 	}
782 	mutex_unlock(&epc->list_lock);
783 }
784 EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
785 
786 /**
787  * pci_epc_destroy() - destroy the EPC device
788  * @epc: the EPC device that has to be destroyed
789  *
790  * Invoke to destroy the PCI EPC device
791  */
pci_epc_destroy(struct pci_epc * epc)792 void pci_epc_destroy(struct pci_epc *epc)
793 {
794 	pci_ep_cfs_remove_epc_group(epc->group);
795 	device_unregister(&epc->dev);
796 }
797 EXPORT_SYMBOL_GPL(pci_epc_destroy);
798 
799 /**
800  * devm_pci_epc_destroy() - destroy the EPC device
801  * @dev: device that wants to destroy the EPC
802  * @epc: the EPC device that has to be destroyed
803  *
804  * Invoke to destroy the devres associated with this
805  * pci_epc and destroy the EPC device.
806  */
devm_pci_epc_destroy(struct device * dev,struct pci_epc * epc)807 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
808 {
809 	int r;
810 
811 	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
812 			   epc);
813 	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
814 }
815 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
816 
pci_epc_release(struct device * dev)817 static void pci_epc_release(struct device *dev)
818 {
819 	kfree(to_pci_epc(dev));
820 }
821 
822 /**
823  * __pci_epc_create() - create a new endpoint controller (EPC) device
824  * @dev: device that is creating the new EPC
825  * @ops: function pointers for performing EPC operations
826  * @owner: the owner of the module that creates the EPC device
827  *
828  * Invoke to create a new EPC device and add it to pci_epc class.
829  */
830 struct pci_epc *
__pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)831 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
832 		 struct module *owner)
833 {
834 	int ret;
835 	struct pci_epc *epc;
836 
837 	if (WARN_ON(!dev)) {
838 		ret = -EINVAL;
839 		goto err_ret;
840 	}
841 
842 	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
843 	if (!epc) {
844 		ret = -ENOMEM;
845 		goto err_ret;
846 	}
847 
848 	mutex_init(&epc->lock);
849 	mutex_init(&epc->list_lock);
850 	INIT_LIST_HEAD(&epc->pci_epf);
851 
852 	device_initialize(&epc->dev);
853 	epc->dev.class = pci_epc_class;
854 	epc->dev.parent = dev;
855 	epc->dev.release = pci_epc_release;
856 	epc->ops = ops;
857 
858 	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
859 	if (ret)
860 		goto put_dev;
861 
862 	ret = device_add(&epc->dev);
863 	if (ret)
864 		goto put_dev;
865 
866 	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
867 
868 	return epc;
869 
870 put_dev:
871 	put_device(&epc->dev);
872 	kfree(epc);
873 
874 err_ret:
875 	return ERR_PTR(ret);
876 }
877 EXPORT_SYMBOL_GPL(__pci_epc_create);
878 
879 /**
880  * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
881  * @dev: device that is creating the new EPC
882  * @ops: function pointers for performing EPC operations
883  * @owner: the owner of the module that creates the EPC device
884  *
885  * Invoke to create a new EPC device and add it to pci_epc class.
886  * While at that, it also associates the device with the pci_epc using devres.
887  * On driver detach, release function is invoked on the devres data,
888  * then, devres data is freed.
889  */
890 struct pci_epc *
__devm_pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)891 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
892 		      struct module *owner)
893 {
894 	struct pci_epc **ptr, *epc;
895 
896 	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
897 	if (!ptr)
898 		return ERR_PTR(-ENOMEM);
899 
900 	epc = __pci_epc_create(dev, ops, owner);
901 	if (!IS_ERR(epc)) {
902 		*ptr = epc;
903 		devres_add(dev, ptr);
904 	} else {
905 		devres_free(ptr);
906 	}
907 
908 	return epc;
909 }
910 EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
911 
pci_epc_init(void)912 static int __init pci_epc_init(void)
913 {
914 	pci_epc_class = class_create("pci_epc");
915 	if (IS_ERR(pci_epc_class)) {
916 		pr_err("failed to create pci epc class --> %ld\n",
917 		       PTR_ERR(pci_epc_class));
918 		return PTR_ERR(pci_epc_class);
919 	}
920 
921 	return 0;
922 }
923 module_init(pci_epc_init);
924 
pci_epc_exit(void)925 static void __exit pci_epc_exit(void)
926 {
927 	class_destroy(pci_epc_class);
928 }
929 module_exit(pci_epc_exit);
930 
931 MODULE_DESCRIPTION("PCI EPC Library");
932 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
933