1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI EPF driver for MHI Endpoint devices
4 *
5 * Copyright (C) 2023 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8
9 #include <linux/dmaengine.h>
10 #include <linux/mhi_ep.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/pci-epc.h>
15 #include <linux/pci-epf.h>
16
17 #define MHI_VERSION_1_0 0x01000000
18
19 #define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
20
21 /* Platform specific flags */
22 #define MHI_EPF_USE_DMA BIT(0)
23
24 struct pci_epf_mhi_ep_info {
25 const struct mhi_ep_cntrl_config *config;
26 struct pci_epf_header *epf_header;
27 enum pci_barno bar_num;
28 u32 epf_flags;
29 u32 msi_count;
30 u32 mru;
31 u32 flags;
32 };
33
34 #define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
35 { \
36 .num = ch_num, \
37 .name = ch_name, \
38 .dir = direction, \
39 }
40
41 #define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
42 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
43
44 #define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
45 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
46
47 static const struct mhi_ep_channel_config mhi_v1_channels[] = {
48 MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
49 MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
50 MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
51 MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
52 MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
53 MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
54 MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
55 MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
56 MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
57 MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
58 MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
59 MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
60 MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
61 MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
62 MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
63 MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
64 MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
65 MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
66 MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
67 MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
68 MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
69 MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
70 MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
71 MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
72 MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
73 MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
74 };
75
76 static const struct mhi_ep_cntrl_config mhi_v1_config = {
77 .max_channels = 128,
78 .num_channels = ARRAY_SIZE(mhi_v1_channels),
79 .ch_cfg = mhi_v1_channels,
80 .mhi_version = MHI_VERSION_1_0,
81 };
82
83 static struct pci_epf_header sdx55_header = {
84 .vendorid = PCI_VENDOR_ID_QCOM,
85 .deviceid = 0x0306,
86 .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
87 .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
88 .interrupt_pin = PCI_INTERRUPT_INTA,
89 };
90
91 static const struct pci_epf_mhi_ep_info sdx55_info = {
92 .config = &mhi_v1_config,
93 .epf_header = &sdx55_header,
94 .bar_num = BAR_0,
95 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
96 .msi_count = 32,
97 .mru = 0x8000,
98 };
99
100 static struct pci_epf_header sm8450_header = {
101 .vendorid = PCI_VENDOR_ID_QCOM,
102 .deviceid = 0x0306,
103 .baseclass_code = PCI_CLASS_OTHERS,
104 .interrupt_pin = PCI_INTERRUPT_INTA,
105 };
106
107 static const struct pci_epf_mhi_ep_info sm8450_info = {
108 .config = &mhi_v1_config,
109 .epf_header = &sm8450_header,
110 .bar_num = BAR_0,
111 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
112 .msi_count = 32,
113 .mru = 0x8000,
114 .flags = MHI_EPF_USE_DMA,
115 };
116
117 struct pci_epf_mhi {
118 const struct pci_epc_features *epc_features;
119 const struct pci_epf_mhi_ep_info *info;
120 struct mhi_ep_cntrl mhi_cntrl;
121 struct pci_epf *epf;
122 struct mutex lock;
123 void __iomem *mmio;
124 resource_size_t mmio_phys;
125 struct dma_chan *dma_chan_tx;
126 struct dma_chan *dma_chan_rx;
127 u32 mmio_size;
128 int irq;
129 };
130
get_align_offset(struct pci_epf_mhi * epf_mhi,u64 addr)131 static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
132 {
133 return addr & (epf_mhi->epc_features->align -1);
134 }
135
__pci_epf_mhi_alloc_map(struct mhi_ep_cntrl * mhi_cntrl,u64 pci_addr,phys_addr_t * paddr,void __iomem ** vaddr,size_t offset,size_t size)136 static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
137 phys_addr_t *paddr, void __iomem **vaddr,
138 size_t offset, size_t size)
139 {
140 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
141 struct pci_epf *epf = epf_mhi->epf;
142 struct pci_epc *epc = epf->epc;
143 int ret;
144
145 *vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
146 if (!*vaddr)
147 return -ENOMEM;
148
149 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
150 pci_addr - offset, size + offset);
151 if (ret) {
152 pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
153 return ret;
154 }
155
156 *paddr = *paddr + offset;
157 *vaddr = *vaddr + offset;
158
159 return 0;
160 }
161
pci_epf_mhi_alloc_map(struct mhi_ep_cntrl * mhi_cntrl,u64 pci_addr,phys_addr_t * paddr,void __iomem ** vaddr,size_t size)162 static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
163 phys_addr_t *paddr, void __iomem **vaddr,
164 size_t size)
165 {
166 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
167 size_t offset = get_align_offset(epf_mhi, pci_addr);
168
169 return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
170 offset, size);
171 }
172
__pci_epf_mhi_unmap_free(struct mhi_ep_cntrl * mhi_cntrl,u64 pci_addr,phys_addr_t paddr,void __iomem * vaddr,size_t offset,size_t size)173 static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
174 u64 pci_addr, phys_addr_t paddr,
175 void __iomem *vaddr, size_t offset,
176 size_t size)
177 {
178 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
179 struct pci_epf *epf = epf_mhi->epf;
180 struct pci_epc *epc = epf->epc;
181
182 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
183 pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
184 size + offset);
185 }
186
pci_epf_mhi_unmap_free(struct mhi_ep_cntrl * mhi_cntrl,u64 pci_addr,phys_addr_t paddr,void __iomem * vaddr,size_t size)187 static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
188 phys_addr_t paddr, void __iomem *vaddr,
189 size_t size)
190 {
191 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
192 size_t offset = get_align_offset(epf_mhi, pci_addr);
193
194 __pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
195 size);
196 }
197
pci_epf_mhi_raise_irq(struct mhi_ep_cntrl * mhi_cntrl,u32 vector)198 static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
199 {
200 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
201 struct pci_epf *epf = epf_mhi->epf;
202 struct pci_epc *epc = epf->epc;
203
204 /*
205 * MHI supplies 0 based MSI vectors but the API expects the vector
206 * number to start from 1, so we need to increment the vector by 1.
207 */
208 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
209 vector + 1);
210 }
211
pci_epf_mhi_iatu_read(struct mhi_ep_cntrl * mhi_cntrl,u64 from,void * to,size_t size)212 static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
213 void *to, size_t size)
214 {
215 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
216 size_t offset = get_align_offset(epf_mhi, from);
217 void __iomem *tre_buf;
218 phys_addr_t tre_phys;
219 int ret;
220
221 mutex_lock(&epf_mhi->lock);
222
223 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
224 offset, size);
225 if (ret) {
226 mutex_unlock(&epf_mhi->lock);
227 return ret;
228 }
229
230 memcpy_fromio(to, tre_buf, size);
231
232 __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
233 size);
234
235 mutex_unlock(&epf_mhi->lock);
236
237 return 0;
238 }
239
pci_epf_mhi_iatu_write(struct mhi_ep_cntrl * mhi_cntrl,void * from,u64 to,size_t size)240 static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
241 void *from, u64 to, size_t size)
242 {
243 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
244 size_t offset = get_align_offset(epf_mhi, to);
245 void __iomem *tre_buf;
246 phys_addr_t tre_phys;
247 int ret;
248
249 mutex_lock(&epf_mhi->lock);
250
251 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
252 offset, size);
253 if (ret) {
254 mutex_unlock(&epf_mhi->lock);
255 return ret;
256 }
257
258 memcpy_toio(tre_buf, from, size);
259
260 __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
261 size);
262
263 mutex_unlock(&epf_mhi->lock);
264
265 return 0;
266 }
267
pci_epf_mhi_dma_callback(void * param)268 static void pci_epf_mhi_dma_callback(void *param)
269 {
270 complete(param);
271 }
272
pci_epf_mhi_edma_read(struct mhi_ep_cntrl * mhi_cntrl,u64 from,void * to,size_t size)273 static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
274 void *to, size_t size)
275 {
276 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
277 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
278 struct dma_chan *chan = epf_mhi->dma_chan_rx;
279 struct device *dev = &epf_mhi->epf->dev;
280 DECLARE_COMPLETION_ONSTACK(complete);
281 struct dma_async_tx_descriptor *desc;
282 struct dma_slave_config config = {};
283 dma_cookie_t cookie;
284 dma_addr_t dst_addr;
285 int ret;
286
287 if (size < SZ_4K)
288 return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
289
290 mutex_lock(&epf_mhi->lock);
291
292 config.direction = DMA_DEV_TO_MEM;
293 config.src_addr = from;
294
295 ret = dmaengine_slave_config(chan, &config);
296 if (ret) {
297 dev_err(dev, "Failed to configure DMA channel\n");
298 goto err_unlock;
299 }
300
301 dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
302 ret = dma_mapping_error(dma_dev, dst_addr);
303 if (ret) {
304 dev_err(dev, "Failed to map remote memory\n");
305 goto err_unlock;
306 }
307
308 desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
309 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
310 if (!desc) {
311 dev_err(dev, "Failed to prepare DMA\n");
312 ret = -EIO;
313 goto err_unmap;
314 }
315
316 desc->callback = pci_epf_mhi_dma_callback;
317 desc->callback_param = &complete;
318
319 cookie = dmaengine_submit(desc);
320 ret = dma_submit_error(cookie);
321 if (ret) {
322 dev_err(dev, "Failed to do DMA submit\n");
323 goto err_unmap;
324 }
325
326 dma_async_issue_pending(chan);
327 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
328 if (!ret) {
329 dev_err(dev, "DMA transfer timeout\n");
330 dmaengine_terminate_sync(chan);
331 ret = -ETIMEDOUT;
332 }
333
334 err_unmap:
335 dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
336 err_unlock:
337 mutex_unlock(&epf_mhi->lock);
338
339 return ret;
340 }
341
pci_epf_mhi_edma_write(struct mhi_ep_cntrl * mhi_cntrl,void * from,u64 to,size_t size)342 static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
343 u64 to, size_t size)
344 {
345 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
346 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
347 struct dma_chan *chan = epf_mhi->dma_chan_tx;
348 struct device *dev = &epf_mhi->epf->dev;
349 DECLARE_COMPLETION_ONSTACK(complete);
350 struct dma_async_tx_descriptor *desc;
351 struct dma_slave_config config = {};
352 dma_cookie_t cookie;
353 dma_addr_t src_addr;
354 int ret;
355
356 if (size < SZ_4K)
357 return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
358
359 mutex_lock(&epf_mhi->lock);
360
361 config.direction = DMA_MEM_TO_DEV;
362 config.dst_addr = to;
363
364 ret = dmaengine_slave_config(chan, &config);
365 if (ret) {
366 dev_err(dev, "Failed to configure DMA channel\n");
367 goto err_unlock;
368 }
369
370 src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
371 ret = dma_mapping_error(dma_dev, src_addr);
372 if (ret) {
373 dev_err(dev, "Failed to map remote memory\n");
374 goto err_unlock;
375 }
376
377 desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
378 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
379 if (!desc) {
380 dev_err(dev, "Failed to prepare DMA\n");
381 ret = -EIO;
382 goto err_unmap;
383 }
384
385 desc->callback = pci_epf_mhi_dma_callback;
386 desc->callback_param = &complete;
387
388 cookie = dmaengine_submit(desc);
389 ret = dma_submit_error(cookie);
390 if (ret) {
391 dev_err(dev, "Failed to do DMA submit\n");
392 goto err_unmap;
393 }
394
395 dma_async_issue_pending(chan);
396 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
397 if (!ret) {
398 dev_err(dev, "DMA transfer timeout\n");
399 dmaengine_terminate_sync(chan);
400 ret = -ETIMEDOUT;
401 }
402
403 err_unmap:
404 dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
405 err_unlock:
406 mutex_unlock(&epf_mhi->lock);
407
408 return ret;
409 }
410
411 struct epf_dma_filter {
412 struct device *dev;
413 u32 dma_mask;
414 };
415
pci_epf_mhi_filter(struct dma_chan * chan,void * node)416 static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
417 {
418 struct epf_dma_filter *filter = node;
419 struct dma_slave_caps caps;
420
421 memset(&caps, 0, sizeof(caps));
422 dma_get_slave_caps(chan, &caps);
423
424 return chan->device->dev == filter->dev && filter->dma_mask &
425 caps.directions;
426 }
427
pci_epf_mhi_dma_init(struct pci_epf_mhi * epf_mhi)428 static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
429 {
430 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
431 struct device *dev = &epf_mhi->epf->dev;
432 struct epf_dma_filter filter;
433 dma_cap_mask_t mask;
434
435 dma_cap_zero(mask);
436 dma_cap_set(DMA_SLAVE, mask);
437
438 filter.dev = dma_dev;
439 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
440 epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
441 &filter);
442 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
443 dev_err(dev, "Failed to request tx channel\n");
444 return -ENODEV;
445 }
446
447 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
448 epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
449 &filter);
450 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
451 dev_err(dev, "Failed to request rx channel\n");
452 dma_release_channel(epf_mhi->dma_chan_tx);
453 epf_mhi->dma_chan_tx = NULL;
454 return -ENODEV;
455 }
456
457 return 0;
458 }
459
pci_epf_mhi_dma_deinit(struct pci_epf_mhi * epf_mhi)460 static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
461 {
462 dma_release_channel(epf_mhi->dma_chan_tx);
463 dma_release_channel(epf_mhi->dma_chan_rx);
464 epf_mhi->dma_chan_tx = NULL;
465 epf_mhi->dma_chan_rx = NULL;
466 }
467
pci_epf_mhi_core_init(struct pci_epf * epf)468 static int pci_epf_mhi_core_init(struct pci_epf *epf)
469 {
470 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
471 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
472 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
473 struct pci_epc *epc = epf->epc;
474 struct device *dev = &epf->dev;
475 int ret;
476
477 epf_bar->phys_addr = epf_mhi->mmio_phys;
478 epf_bar->size = epf_mhi->mmio_size;
479 epf_bar->barno = info->bar_num;
480 epf_bar->flags = info->epf_flags;
481 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
482 if (ret) {
483 dev_err(dev, "Failed to set BAR: %d\n", ret);
484 return ret;
485 }
486
487 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
488 order_base_2(info->msi_count));
489 if (ret) {
490 dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
491 return ret;
492 }
493
494 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
495 epf->header);
496 if (ret) {
497 dev_err(dev, "Failed to set Configuration header: %d\n", ret);
498 return ret;
499 }
500
501 epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
502 if (!epf_mhi->epc_features)
503 return -ENODATA;
504
505 return 0;
506 }
507
pci_epf_mhi_link_up(struct pci_epf * epf)508 static int pci_epf_mhi_link_up(struct pci_epf *epf)
509 {
510 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
511 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
512 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
513 struct pci_epc *epc = epf->epc;
514 struct device *dev = &epf->dev;
515 int ret;
516
517 if (info->flags & MHI_EPF_USE_DMA) {
518 ret = pci_epf_mhi_dma_init(epf_mhi);
519 if (ret) {
520 dev_err(dev, "Failed to initialize DMA: %d\n", ret);
521 return ret;
522 }
523 }
524
525 mhi_cntrl->mmio = epf_mhi->mmio;
526 mhi_cntrl->irq = epf_mhi->irq;
527 mhi_cntrl->mru = info->mru;
528
529 /* Assign the struct dev of PCI EP as MHI controller device */
530 mhi_cntrl->cntrl_dev = epc->dev.parent;
531 mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
532 mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
533 mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
534 if (info->flags & MHI_EPF_USE_DMA) {
535 mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
536 mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
537 } else {
538 mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
539 mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
540 }
541
542 /* Register the MHI EP controller */
543 ret = mhi_ep_register_controller(mhi_cntrl, info->config);
544 if (ret) {
545 dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
546 if (info->flags & MHI_EPF_USE_DMA)
547 pci_epf_mhi_dma_deinit(epf_mhi);
548 return ret;
549 }
550
551 return 0;
552 }
553
pci_epf_mhi_link_down(struct pci_epf * epf)554 static int pci_epf_mhi_link_down(struct pci_epf *epf)
555 {
556 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
557 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
558 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
559
560 if (mhi_cntrl->mhi_dev) {
561 mhi_ep_power_down(mhi_cntrl);
562 if (info->flags & MHI_EPF_USE_DMA)
563 pci_epf_mhi_dma_deinit(epf_mhi);
564 mhi_ep_unregister_controller(mhi_cntrl);
565 }
566
567 return 0;
568 }
569
pci_epf_mhi_bme(struct pci_epf * epf)570 static int pci_epf_mhi_bme(struct pci_epf *epf)
571 {
572 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
573 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
574 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
575 struct device *dev = &epf->dev;
576 int ret;
577
578 /*
579 * Power up the MHI EP stack if link is up and stack is in power down
580 * state.
581 */
582 if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
583 ret = mhi_ep_power_up(mhi_cntrl);
584 if (ret) {
585 dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
586 if (info->flags & MHI_EPF_USE_DMA)
587 pci_epf_mhi_dma_deinit(epf_mhi);
588 mhi_ep_unregister_controller(mhi_cntrl);
589 }
590 }
591
592 return 0;
593 }
594
pci_epf_mhi_bind(struct pci_epf * epf)595 static int pci_epf_mhi_bind(struct pci_epf *epf)
596 {
597 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
598 struct pci_epc *epc = epf->epc;
599 struct platform_device *pdev = to_platform_device(epc->dev.parent);
600 struct resource *res;
601 int ret;
602
603 /* Get MMIO base address from Endpoint controller */
604 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
605 epf_mhi->mmio_phys = res->start;
606 epf_mhi->mmio_size = resource_size(res);
607
608 epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
609 if (!epf_mhi->mmio)
610 return -ENOMEM;
611
612 ret = platform_get_irq_byname(pdev, "doorbell");
613 if (ret < 0) {
614 iounmap(epf_mhi->mmio);
615 return ret;
616 }
617
618 epf_mhi->irq = ret;
619
620 return 0;
621 }
622
pci_epf_mhi_unbind(struct pci_epf * epf)623 static void pci_epf_mhi_unbind(struct pci_epf *epf)
624 {
625 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
626 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
627 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
628 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
629 struct pci_epc *epc = epf->epc;
630
631 /*
632 * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
633 * stack back to working state after successive bind is by getting BME
634 * from host.
635 */
636 if (mhi_cntrl->mhi_dev) {
637 mhi_ep_power_down(mhi_cntrl);
638 if (info->flags & MHI_EPF_USE_DMA)
639 pci_epf_mhi_dma_deinit(epf_mhi);
640 mhi_ep_unregister_controller(mhi_cntrl);
641 }
642
643 iounmap(epf_mhi->mmio);
644 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
645 }
646
647 static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
648 .core_init = pci_epf_mhi_core_init,
649 .link_up = pci_epf_mhi_link_up,
650 .link_down = pci_epf_mhi_link_down,
651 .bme = pci_epf_mhi_bme,
652 };
653
pci_epf_mhi_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)654 static int pci_epf_mhi_probe(struct pci_epf *epf,
655 const struct pci_epf_device_id *id)
656 {
657 struct pci_epf_mhi_ep_info *info =
658 (struct pci_epf_mhi_ep_info *)id->driver_data;
659 struct pci_epf_mhi *epf_mhi;
660 struct device *dev = &epf->dev;
661
662 epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
663 if (!epf_mhi)
664 return -ENOMEM;
665
666 epf->header = info->epf_header;
667 epf_mhi->info = info;
668 epf_mhi->epf = epf;
669
670 epf->event_ops = &pci_epf_mhi_event_ops;
671
672 mutex_init(&epf_mhi->lock);
673
674 epf_set_drvdata(epf, epf_mhi);
675
676 return 0;
677 }
678
679 static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
680 { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
681 { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
682 {},
683 };
684
685 static struct pci_epf_ops pci_epf_mhi_ops = {
686 .unbind = pci_epf_mhi_unbind,
687 .bind = pci_epf_mhi_bind,
688 };
689
690 static struct pci_epf_driver pci_epf_mhi_driver = {
691 .driver.name = "pci_epf_mhi",
692 .probe = pci_epf_mhi_probe,
693 .id_table = pci_epf_mhi_ids,
694 .ops = &pci_epf_mhi_ops,
695 .owner = THIS_MODULE,
696 };
697
pci_epf_mhi_init(void)698 static int __init pci_epf_mhi_init(void)
699 {
700 return pci_epf_register_driver(&pci_epf_mhi_driver);
701 }
702 module_init(pci_epf_mhi_init);
703
pci_epf_mhi_exit(void)704 static void __exit pci_epf_mhi_exit(void)
705 {
706 pci_epf_unregister_driver(&pci_epf_mhi_driver);
707 }
708 module_exit(pci_epf_mhi_exit);
709
710 MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
711 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
712 MODULE_LICENSE("GPL");
713