1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/dma/edma.h>
17 #include <linux/dma-mapping.h>
18
19 #include "dw-edma-core.h"
20 #include "dw-edma-v0-core.h"
21 #include "../dmaengine.h"
22 #include "../virt-dma.h"
23
24 static inline
dchan2dev(struct dma_chan * dchan)25 struct device *dchan2dev(struct dma_chan *dchan)
26 {
27 return &dchan->dev->device;
28 }
29
30 static inline
chan2dev(struct dw_edma_chan * chan)31 struct device *chan2dev(struct dw_edma_chan *chan)
32 {
33 return &chan->vc.chan.dev->device;
34 }
35
36 static inline
vd2dw_edma_desc(struct virt_dma_desc * vd)37 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
38 {
39 return container_of(vd, struct dw_edma_desc, vd);
40 }
41
dw_edma_alloc_burst(struct dw_edma_chunk * chunk)42 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
43 {
44 struct dw_edma_burst *burst;
45
46 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
47 if (unlikely(!burst))
48 return NULL;
49
50 INIT_LIST_HEAD(&burst->list);
51 if (chunk->burst) {
52 /* Create and add new element into the linked list */
53 chunk->bursts_alloc++;
54 list_add_tail(&burst->list, &chunk->burst->list);
55 } else {
56 /* List head */
57 chunk->bursts_alloc = 0;
58 chunk->burst = burst;
59 }
60
61 return burst;
62 }
63
dw_edma_alloc_chunk(struct dw_edma_desc * desc)64 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
65 {
66 struct dw_edma_chip *chip = desc->chan->dw->chip;
67 struct dw_edma_chan *chan = desc->chan;
68 struct dw_edma_chunk *chunk;
69
70 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
71 if (unlikely(!chunk))
72 return NULL;
73
74 INIT_LIST_HEAD(&chunk->list);
75 chunk->chan = chan;
76 /* Toggling change bit (CB) in each chunk, this is a mechanism to
77 * inform the eDMA HW block that this is a new linked list ready
78 * to be consumed.
79 * - Odd chunks originate CB equal to 0
80 * - Even chunks originate CB equal to 1
81 */
82 chunk->cb = !(desc->chunks_alloc % 2);
83 if (chan->dir == EDMA_DIR_WRITE) {
84 chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
85 chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
86 } else {
87 chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
88 chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
89 }
90
91 if (desc->chunk) {
92 /* Create and add new element into the linked list */
93 if (!dw_edma_alloc_burst(chunk)) {
94 kfree(chunk);
95 return NULL;
96 }
97 desc->chunks_alloc++;
98 list_add_tail(&chunk->list, &desc->chunk->list);
99 } else {
100 /* List head */
101 chunk->burst = NULL;
102 desc->chunks_alloc = 0;
103 desc->chunk = chunk;
104 }
105
106 return chunk;
107 }
108
dw_edma_alloc_desc(struct dw_edma_chan * chan)109 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
110 {
111 struct dw_edma_desc *desc;
112
113 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
114 if (unlikely(!desc))
115 return NULL;
116
117 desc->chan = chan;
118 if (!dw_edma_alloc_chunk(desc)) {
119 kfree(desc);
120 return NULL;
121 }
122
123 return desc;
124 }
125
dw_edma_free_burst(struct dw_edma_chunk * chunk)126 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
127 {
128 struct dw_edma_burst *child, *_next;
129
130 /* Remove all the list elements */
131 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
132 list_del(&child->list);
133 kfree(child);
134 chunk->bursts_alloc--;
135 }
136
137 /* Remove the list head */
138 kfree(child);
139 chunk->burst = NULL;
140 }
141
dw_edma_free_chunk(struct dw_edma_desc * desc)142 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
143 {
144 struct dw_edma_chunk *child, *_next;
145
146 if (!desc->chunk)
147 return;
148
149 /* Remove all the list elements */
150 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
151 dw_edma_free_burst(child);
152 list_del(&child->list);
153 kfree(child);
154 desc->chunks_alloc--;
155 }
156
157 /* Remove the list head */
158 kfree(child);
159 desc->chunk = NULL;
160 }
161
dw_edma_free_desc(struct dw_edma_desc * desc)162 static void dw_edma_free_desc(struct dw_edma_desc *desc)
163 {
164 dw_edma_free_chunk(desc);
165 kfree(desc);
166 }
167
vchan_free_desc(struct virt_dma_desc * vdesc)168 static void vchan_free_desc(struct virt_dma_desc *vdesc)
169 {
170 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
171 }
172
dw_edma_start_transfer(struct dw_edma_chan * chan)173 static void dw_edma_start_transfer(struct dw_edma_chan *chan)
174 {
175 struct dw_edma_chunk *child;
176 struct dw_edma_desc *desc;
177 struct virt_dma_desc *vd;
178
179 vd = vchan_next_desc(&chan->vc);
180 if (!vd)
181 return;
182
183 desc = vd2dw_edma_desc(vd);
184 if (!desc)
185 return;
186
187 child = list_first_entry_or_null(&desc->chunk->list,
188 struct dw_edma_chunk, list);
189 if (!child)
190 return;
191
192 dw_edma_v0_core_start(child, !desc->xfer_sz);
193 desc->xfer_sz += child->ll_region.sz;
194 dw_edma_free_burst(child);
195 list_del(&child->list);
196 kfree(child);
197 desc->chunks_alloc--;
198 }
199
dw_edma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)200 static int dw_edma_device_config(struct dma_chan *dchan,
201 struct dma_slave_config *config)
202 {
203 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
204
205 memcpy(&chan->config, config, sizeof(*config));
206 chan->configured = true;
207
208 return 0;
209 }
210
dw_edma_device_pause(struct dma_chan * dchan)211 static int dw_edma_device_pause(struct dma_chan *dchan)
212 {
213 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
214 int err = 0;
215
216 if (!chan->configured)
217 err = -EPERM;
218 else if (chan->status != EDMA_ST_BUSY)
219 err = -EPERM;
220 else if (chan->request != EDMA_REQ_NONE)
221 err = -EPERM;
222 else
223 chan->request = EDMA_REQ_PAUSE;
224
225 return err;
226 }
227
dw_edma_device_resume(struct dma_chan * dchan)228 static int dw_edma_device_resume(struct dma_chan *dchan)
229 {
230 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
231 int err = 0;
232
233 if (!chan->configured) {
234 err = -EPERM;
235 } else if (chan->status != EDMA_ST_PAUSE) {
236 err = -EPERM;
237 } else if (chan->request != EDMA_REQ_NONE) {
238 err = -EPERM;
239 } else {
240 chan->status = EDMA_ST_BUSY;
241 dw_edma_start_transfer(chan);
242 }
243
244 return err;
245 }
246
dw_edma_device_terminate_all(struct dma_chan * dchan)247 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
248 {
249 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
250 int err = 0;
251
252 if (!chan->configured) {
253 /* Do nothing */
254 } else if (chan->status == EDMA_ST_PAUSE) {
255 chan->status = EDMA_ST_IDLE;
256 chan->configured = false;
257 } else if (chan->status == EDMA_ST_IDLE) {
258 chan->configured = false;
259 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
260 /*
261 * The channel is in a false BUSY state, probably didn't
262 * receive or lost an interrupt
263 */
264 chan->status = EDMA_ST_IDLE;
265 chan->configured = false;
266 } else if (chan->request > EDMA_REQ_PAUSE) {
267 err = -EPERM;
268 } else {
269 chan->request = EDMA_REQ_STOP;
270 }
271
272 return err;
273 }
274
dw_edma_device_issue_pending(struct dma_chan * dchan)275 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
276 {
277 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
278 unsigned long flags;
279
280 spin_lock_irqsave(&chan->vc.lock, flags);
281 if (chan->configured && chan->request == EDMA_REQ_NONE &&
282 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
283 chan->status = EDMA_ST_BUSY;
284 dw_edma_start_transfer(chan);
285 }
286 spin_unlock_irqrestore(&chan->vc.lock, flags);
287 }
288
289 static enum dma_status
dw_edma_device_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)290 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
291 struct dma_tx_state *txstate)
292 {
293 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
294 struct dw_edma_desc *desc;
295 struct virt_dma_desc *vd;
296 unsigned long flags;
297 enum dma_status ret;
298 u32 residue = 0;
299
300 ret = dma_cookie_status(dchan, cookie, txstate);
301 if (ret == DMA_COMPLETE)
302 return ret;
303
304 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
305 ret = DMA_PAUSED;
306
307 if (!txstate)
308 goto ret_residue;
309
310 spin_lock_irqsave(&chan->vc.lock, flags);
311 vd = vchan_find_desc(&chan->vc, cookie);
312 if (vd) {
313 desc = vd2dw_edma_desc(vd);
314 if (desc)
315 residue = desc->alloc_sz - desc->xfer_sz;
316 }
317 spin_unlock_irqrestore(&chan->vc.lock, flags);
318
319 ret_residue:
320 dma_set_residue(txstate, residue);
321
322 return ret;
323 }
324
325 static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer * xfer)326 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
327 {
328 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
329 enum dma_transfer_direction dir = xfer->direction;
330 phys_addr_t src_addr, dst_addr;
331 struct scatterlist *sg = NULL;
332 struct dw_edma_chunk *chunk;
333 struct dw_edma_burst *burst;
334 struct dw_edma_desc *desc;
335 u32 cnt = 0;
336 int i;
337
338 if (!chan->configured)
339 return NULL;
340
341 /*
342 * Local Root Port/End-point Remote End-point
343 * +-----------------------+ PCIe bus +----------------------+
344 * | | +-+ | |
345 * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
346 * | | | | | |
347 * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
348 * | | +-+ | |
349 * +-----------------------+ +----------------------+
350 *
351 * 1. Normal logic:
352 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
353 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
354 * for the device read operations (DEV_TO_MEM) and the Tx channel
355 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
356 *
357 * 2. Inverted logic:
358 * If eDMA is embedded into a Remote PCIe EP and is controlled by the
359 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
360 * channel (EDMA_DIR_WRITE) will be used for the device read operations
361 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
362 * operations (MEM_TO_DEV).
363 *
364 * It is the client driver responsibility to choose a proper channel
365 * for the DMA transfers.
366 */
367 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
368 if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
369 (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
370 return NULL;
371 } else {
372 if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
373 (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
374 return NULL;
375 }
376
377 if (xfer->type == EDMA_XFER_CYCLIC) {
378 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
379 return NULL;
380 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
381 if (xfer->xfer.sg.len < 1)
382 return NULL;
383 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
384 if (!xfer->xfer.il->numf)
385 return NULL;
386 if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
387 return NULL;
388 } else {
389 return NULL;
390 }
391
392 desc = dw_edma_alloc_desc(chan);
393 if (unlikely(!desc))
394 goto err_alloc;
395
396 chunk = dw_edma_alloc_chunk(desc);
397 if (unlikely(!chunk))
398 goto err_alloc;
399
400 if (xfer->type == EDMA_XFER_INTERLEAVED) {
401 src_addr = xfer->xfer.il->src_start;
402 dst_addr = xfer->xfer.il->dst_start;
403 } else {
404 src_addr = chan->config.src_addr;
405 dst_addr = chan->config.dst_addr;
406 }
407
408 if (xfer->type == EDMA_XFER_CYCLIC) {
409 cnt = xfer->xfer.cyclic.cnt;
410 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
411 cnt = xfer->xfer.sg.len;
412 sg = xfer->xfer.sg.sgl;
413 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
414 if (xfer->xfer.il->numf > 0)
415 cnt = xfer->xfer.il->numf;
416 else
417 cnt = xfer->xfer.il->frame_size;
418 }
419
420 for (i = 0; i < cnt; i++) {
421 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
422 break;
423
424 if (chunk->bursts_alloc == chan->ll_max) {
425 chunk = dw_edma_alloc_chunk(desc);
426 if (unlikely(!chunk))
427 goto err_alloc;
428 }
429
430 burst = dw_edma_alloc_burst(chunk);
431 if (unlikely(!burst))
432 goto err_alloc;
433
434 if (xfer->type == EDMA_XFER_CYCLIC)
435 burst->sz = xfer->xfer.cyclic.len;
436 else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
437 burst->sz = sg_dma_len(sg);
438 else if (xfer->type == EDMA_XFER_INTERLEAVED)
439 burst->sz = xfer->xfer.il->sgl[i].size;
440
441 chunk->ll_region.sz += burst->sz;
442 desc->alloc_sz += burst->sz;
443
444 if (dir == DMA_DEV_TO_MEM) {
445 burst->sar = src_addr;
446 if (xfer->type == EDMA_XFER_CYCLIC) {
447 burst->dar = xfer->xfer.cyclic.paddr;
448 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
449 src_addr += sg_dma_len(sg);
450 burst->dar = sg_dma_address(sg);
451 /* Unlike the typical assumption by other
452 * drivers/IPs the peripheral memory isn't
453 * a FIFO memory, in this case, it's a
454 * linear memory and that why the source
455 * and destination addresses are increased
456 * by the same portion (data length)
457 */
458 }
459 } else {
460 burst->dar = dst_addr;
461 if (xfer->type == EDMA_XFER_CYCLIC) {
462 burst->sar = xfer->xfer.cyclic.paddr;
463 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
464 dst_addr += sg_dma_len(sg);
465 burst->sar = sg_dma_address(sg);
466 /* Unlike the typical assumption by other
467 * drivers/IPs the peripheral memory isn't
468 * a FIFO memory, in this case, it's a
469 * linear memory and that why the source
470 * and destination addresses are increased
471 * by the same portion (data length)
472 */
473 }
474 }
475
476 if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
477 sg = sg_next(sg);
478 } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
479 xfer->xfer.il->frame_size > 0) {
480 struct dma_interleaved_template *il = xfer->xfer.il;
481 struct data_chunk *dc = &il->sgl[i];
482
483 if (il->src_sgl) {
484 src_addr += burst->sz;
485 src_addr += dmaengine_get_src_icg(il, dc);
486 }
487
488 if (il->dst_sgl) {
489 dst_addr += burst->sz;
490 dst_addr += dmaengine_get_dst_icg(il, dc);
491 }
492 }
493 }
494
495 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
496
497 err_alloc:
498 if (desc)
499 dw_edma_free_desc(desc);
500
501 return NULL;
502 }
503
504 static struct dma_async_tx_descriptor *
dw_edma_device_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int len,enum dma_transfer_direction direction,unsigned long flags,void * context)505 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
506 unsigned int len,
507 enum dma_transfer_direction direction,
508 unsigned long flags, void *context)
509 {
510 struct dw_edma_transfer xfer;
511
512 xfer.dchan = dchan;
513 xfer.direction = direction;
514 xfer.xfer.sg.sgl = sgl;
515 xfer.xfer.sg.len = len;
516 xfer.flags = flags;
517 xfer.type = EDMA_XFER_SCATTER_GATHER;
518
519 return dw_edma_device_transfer(&xfer);
520 }
521
522 static struct dma_async_tx_descriptor *
dw_edma_device_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t paddr,size_t len,size_t count,enum dma_transfer_direction direction,unsigned long flags)523 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
524 size_t len, size_t count,
525 enum dma_transfer_direction direction,
526 unsigned long flags)
527 {
528 struct dw_edma_transfer xfer;
529
530 xfer.dchan = dchan;
531 xfer.direction = direction;
532 xfer.xfer.cyclic.paddr = paddr;
533 xfer.xfer.cyclic.len = len;
534 xfer.xfer.cyclic.cnt = count;
535 xfer.flags = flags;
536 xfer.type = EDMA_XFER_CYCLIC;
537
538 return dw_edma_device_transfer(&xfer);
539 }
540
541 static struct dma_async_tx_descriptor *
dw_edma_device_prep_interleaved_dma(struct dma_chan * dchan,struct dma_interleaved_template * ilt,unsigned long flags)542 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
543 struct dma_interleaved_template *ilt,
544 unsigned long flags)
545 {
546 struct dw_edma_transfer xfer;
547
548 xfer.dchan = dchan;
549 xfer.direction = ilt->dir;
550 xfer.xfer.il = ilt;
551 xfer.flags = flags;
552 xfer.type = EDMA_XFER_INTERLEAVED;
553
554 return dw_edma_device_transfer(&xfer);
555 }
556
dw_edma_done_interrupt(struct dw_edma_chan * chan)557 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
558 {
559 struct dw_edma_desc *desc;
560 struct virt_dma_desc *vd;
561 unsigned long flags;
562
563 dw_edma_v0_core_clear_done_int(chan);
564
565 spin_lock_irqsave(&chan->vc.lock, flags);
566 vd = vchan_next_desc(&chan->vc);
567 if (vd) {
568 switch (chan->request) {
569 case EDMA_REQ_NONE:
570 desc = vd2dw_edma_desc(vd);
571 if (desc->chunks_alloc) {
572 chan->status = EDMA_ST_BUSY;
573 dw_edma_start_transfer(chan);
574 } else {
575 list_del(&vd->node);
576 vchan_cookie_complete(vd);
577 chan->status = EDMA_ST_IDLE;
578 }
579 break;
580
581 case EDMA_REQ_STOP:
582 list_del(&vd->node);
583 vchan_cookie_complete(vd);
584 chan->request = EDMA_REQ_NONE;
585 chan->status = EDMA_ST_IDLE;
586 break;
587
588 case EDMA_REQ_PAUSE:
589 chan->request = EDMA_REQ_NONE;
590 chan->status = EDMA_ST_PAUSE;
591 break;
592
593 default:
594 break;
595 }
596 }
597 spin_unlock_irqrestore(&chan->vc.lock, flags);
598 }
599
dw_edma_abort_interrupt(struct dw_edma_chan * chan)600 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
601 {
602 struct virt_dma_desc *vd;
603 unsigned long flags;
604
605 dw_edma_v0_core_clear_abort_int(chan);
606
607 spin_lock_irqsave(&chan->vc.lock, flags);
608 vd = vchan_next_desc(&chan->vc);
609 if (vd) {
610 list_del(&vd->node);
611 vchan_cookie_complete(vd);
612 }
613 spin_unlock_irqrestore(&chan->vc.lock, flags);
614 chan->request = EDMA_REQ_NONE;
615 chan->status = EDMA_ST_IDLE;
616 }
617
dw_edma_interrupt(int irq,void * data,bool write)618 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
619 {
620 struct dw_edma_irq *dw_irq = data;
621 struct dw_edma *dw = dw_irq->dw;
622 unsigned long total, pos, val;
623 unsigned long off;
624 u32 mask;
625
626 if (write) {
627 total = dw->wr_ch_cnt;
628 off = 0;
629 mask = dw_irq->wr_mask;
630 } else {
631 total = dw->rd_ch_cnt;
632 off = dw->wr_ch_cnt;
633 mask = dw_irq->rd_mask;
634 }
635
636 val = dw_edma_v0_core_status_done_int(dw, write ?
637 EDMA_DIR_WRITE :
638 EDMA_DIR_READ);
639 val &= mask;
640 for_each_set_bit(pos, &val, total) {
641 struct dw_edma_chan *chan = &dw->chan[pos + off];
642
643 dw_edma_done_interrupt(chan);
644 }
645
646 val = dw_edma_v0_core_status_abort_int(dw, write ?
647 EDMA_DIR_WRITE :
648 EDMA_DIR_READ);
649 val &= mask;
650 for_each_set_bit(pos, &val, total) {
651 struct dw_edma_chan *chan = &dw->chan[pos + off];
652
653 dw_edma_abort_interrupt(chan);
654 }
655
656 return IRQ_HANDLED;
657 }
658
dw_edma_interrupt_write(int irq,void * data)659 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
660 {
661 return dw_edma_interrupt(irq, data, true);
662 }
663
dw_edma_interrupt_read(int irq,void * data)664 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
665 {
666 return dw_edma_interrupt(irq, data, false);
667 }
668
dw_edma_interrupt_common(int irq,void * data)669 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
670 {
671 dw_edma_interrupt(irq, data, true);
672 dw_edma_interrupt(irq, data, false);
673
674 return IRQ_HANDLED;
675 }
676
dw_edma_alloc_chan_resources(struct dma_chan * dchan)677 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
678 {
679 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
680
681 if (chan->status != EDMA_ST_IDLE)
682 return -EBUSY;
683
684 return 0;
685 }
686
dw_edma_free_chan_resources(struct dma_chan * dchan)687 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
688 {
689 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
690 int ret;
691
692 while (time_before(jiffies, timeout)) {
693 ret = dw_edma_device_terminate_all(dchan);
694 if (!ret)
695 break;
696
697 if (time_after_eq(jiffies, timeout))
698 return;
699
700 cpu_relax();
701 }
702 }
703
dw_edma_channel_setup(struct dw_edma * dw,bool write,u32 wr_alloc,u32 rd_alloc)704 static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
705 u32 wr_alloc, u32 rd_alloc)
706 {
707 struct dw_edma_chip *chip = dw->chip;
708 struct dw_edma_region *dt_region;
709 struct device *dev = chip->dev;
710 struct dw_edma_chan *chan;
711 struct dw_edma_irq *irq;
712 struct dma_device *dma;
713 u32 alloc, off_alloc;
714 u32 i, j, cnt;
715 int err = 0;
716 u32 pos;
717
718 if (write) {
719 i = 0;
720 cnt = dw->wr_ch_cnt;
721 dma = &dw->wr_edma;
722 alloc = wr_alloc;
723 off_alloc = 0;
724 } else {
725 i = dw->wr_ch_cnt;
726 cnt = dw->rd_ch_cnt;
727 dma = &dw->rd_edma;
728 alloc = rd_alloc;
729 off_alloc = wr_alloc;
730 }
731
732 INIT_LIST_HEAD(&dma->channels);
733 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
734 chan = &dw->chan[i];
735
736 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
737 if (!dt_region)
738 return -ENOMEM;
739
740 chan->vc.chan.private = dt_region;
741
742 chan->dw = dw;
743 chan->id = j;
744 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
745 chan->configured = false;
746 chan->request = EDMA_REQ_NONE;
747 chan->status = EDMA_ST_IDLE;
748
749 if (write)
750 chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
751 else
752 chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
753 chan->ll_max -= 1;
754
755 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
756 write ? "write" : "read", j, chan->ll_max);
757
758 if (dw->nr_irqs == 1)
759 pos = 0;
760 else
761 pos = off_alloc + (j % alloc);
762
763 irq = &dw->irq[pos];
764
765 if (write)
766 irq->wr_mask |= BIT(j);
767 else
768 irq->rd_mask |= BIT(j);
769
770 irq->dw = dw;
771 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
772
773 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
774 write ? "write" : "read", j,
775 chan->msi.address_hi, chan->msi.address_lo,
776 chan->msi.data);
777
778 chan->vc.desc_free = vchan_free_desc;
779 vchan_init(&chan->vc, dma);
780
781 if (write) {
782 dt_region->paddr = chip->dt_region_wr[j].paddr;
783 dt_region->vaddr = chip->dt_region_wr[j].vaddr;
784 dt_region->sz = chip->dt_region_wr[j].sz;
785 } else {
786 dt_region->paddr = chip->dt_region_rd[j].paddr;
787 dt_region->vaddr = chip->dt_region_rd[j].vaddr;
788 dt_region->sz = chip->dt_region_rd[j].sz;
789 }
790
791 dw_edma_v0_core_device_config(chan);
792 }
793
794 /* Set DMA channel capabilities */
795 dma_cap_zero(dma->cap_mask);
796 dma_cap_set(DMA_SLAVE, dma->cap_mask);
797 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
798 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
799 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
800 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
801 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
802 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
803 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
804 dma->chancnt = cnt;
805
806 /* Set DMA channel callbacks */
807 dma->dev = chip->dev;
808 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
809 dma->device_free_chan_resources = dw_edma_free_chan_resources;
810 dma->device_config = dw_edma_device_config;
811 dma->device_pause = dw_edma_device_pause;
812 dma->device_resume = dw_edma_device_resume;
813 dma->device_terminate_all = dw_edma_device_terminate_all;
814 dma->device_issue_pending = dw_edma_device_issue_pending;
815 dma->device_tx_status = dw_edma_device_tx_status;
816 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
817 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
818 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
819
820 dma_set_max_seg_size(dma->dev, U32_MAX);
821
822 /* Register DMA device */
823 err = dma_async_device_register(dma);
824
825 return err;
826 }
827
dw_edma_dec_irq_alloc(int * nr_irqs,u32 * alloc,u16 cnt)828 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
829 {
830 if (*nr_irqs && *alloc < cnt) {
831 (*alloc)++;
832 (*nr_irqs)--;
833 }
834 }
835
dw_edma_add_irq_mask(u32 * mask,u32 alloc,u16 cnt)836 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
837 {
838 while (*mask * alloc < cnt)
839 (*mask)++;
840 }
841
dw_edma_irq_request(struct dw_edma * dw,u32 * wr_alloc,u32 * rd_alloc)842 static int dw_edma_irq_request(struct dw_edma *dw,
843 u32 *wr_alloc, u32 *rd_alloc)
844 {
845 struct dw_edma_chip *chip = dw->chip;
846 struct device *dev = dw->chip->dev;
847 u32 wr_mask = 1;
848 u32 rd_mask = 1;
849 int i, err = 0;
850 u32 ch_cnt;
851 int irq;
852
853 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
854
855 if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
856 return -EINVAL;
857
858 dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
859 if (!dw->irq)
860 return -ENOMEM;
861
862 if (chip->nr_irqs == 1) {
863 /* Common IRQ shared among all channels */
864 irq = chip->ops->irq_vector(dev, 0);
865 err = request_irq(irq, dw_edma_interrupt_common,
866 IRQF_SHARED, dw->name, &dw->irq[0]);
867 if (err) {
868 dw->nr_irqs = 0;
869 return err;
870 }
871
872 if (irq_get_msi_desc(irq))
873 get_cached_msi_msg(irq, &dw->irq[0].msi);
874
875 dw->nr_irqs = 1;
876 } else {
877 /* Distribute IRQs equally among all channels */
878 int tmp = chip->nr_irqs;
879
880 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
881 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
882 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
883 }
884
885 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
886 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
887
888 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
889 irq = chip->ops->irq_vector(dev, i);
890 err = request_irq(irq,
891 i < *wr_alloc ?
892 dw_edma_interrupt_write :
893 dw_edma_interrupt_read,
894 IRQF_SHARED, dw->name,
895 &dw->irq[i]);
896 if (err) {
897 dw->nr_irqs = i;
898 return err;
899 }
900
901 if (irq_get_msi_desc(irq))
902 get_cached_msi_msg(irq, &dw->irq[i].msi);
903 }
904
905 dw->nr_irqs = i;
906 }
907
908 return err;
909 }
910
dw_edma_probe(struct dw_edma_chip * chip)911 int dw_edma_probe(struct dw_edma_chip *chip)
912 {
913 struct device *dev;
914 struct dw_edma *dw;
915 u32 wr_alloc = 0;
916 u32 rd_alloc = 0;
917 int i, err;
918
919 if (!chip)
920 return -EINVAL;
921
922 dev = chip->dev;
923 if (!dev || !chip->ops)
924 return -EINVAL;
925
926 dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
927 if (!dw)
928 return -ENOMEM;
929
930 dw->chip = chip;
931
932 raw_spin_lock_init(&dw->lock);
933
934 dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
935 dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
936 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
937
938 dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
939 dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
940 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
941
942 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
943 return -EINVAL;
944
945 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
946 dw->wr_ch_cnt, dw->rd_ch_cnt);
947
948 /* Allocate channels */
949 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
950 sizeof(*dw->chan), GFP_KERNEL);
951 if (!dw->chan)
952 return -ENOMEM;
953
954 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
955
956 /* Disable eDMA, only to establish the ideal initial conditions */
957 dw_edma_v0_core_off(dw);
958
959 /* Request IRQs */
960 err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
961 if (err)
962 return err;
963
964 /* Setup write channels */
965 err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
966 if (err)
967 goto err_irq_free;
968
969 /* Setup read channels */
970 err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
971 if (err)
972 goto err_irq_free;
973
974 /* Turn debugfs on */
975 dw_edma_v0_core_debugfs_on(dw);
976
977 chip->dw = dw;
978
979 return 0;
980
981 err_irq_free:
982 for (i = (dw->nr_irqs - 1); i >= 0; i--)
983 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
984
985 return err;
986 }
987 EXPORT_SYMBOL_GPL(dw_edma_probe);
988
dw_edma_remove(struct dw_edma_chip * chip)989 int dw_edma_remove(struct dw_edma_chip *chip)
990 {
991 struct dw_edma_chan *chan, *_chan;
992 struct device *dev = chip->dev;
993 struct dw_edma *dw = chip->dw;
994 int i;
995
996 /* Disable eDMA */
997 dw_edma_v0_core_off(dw);
998
999 /* Free irqs */
1000 for (i = (dw->nr_irqs - 1); i >= 0; i--)
1001 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
1002
1003 /* Deregister eDMA device */
1004 dma_async_device_unregister(&dw->wr_edma);
1005 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
1006 vc.chan.device_node) {
1007 tasklet_kill(&chan->vc.task);
1008 list_del(&chan->vc.chan.device_node);
1009 }
1010
1011 dma_async_device_unregister(&dw->rd_edma);
1012 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
1013 vc.chan.device_node) {
1014 tasklet_kill(&chan->vc.task);
1015 list_del(&chan->vc.chan.device_node);
1016 }
1017
1018 /* Turn debugfs off */
1019 dw_edma_v0_core_debugfs_off(dw);
1020
1021 return 0;
1022 }
1023 EXPORT_SYMBOL_GPL(dw_edma_remove);
1024
1025 MODULE_LICENSE("GPL v2");
1026 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1027 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
1028