Lines Matching +full:dma +full:- +full:channel +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
11 #include <dt-bindings/dma/at91.h>
53 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
56 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
57 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
60 /* Channel relative registers offsets */
61 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
69 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
77 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
78 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
79 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
80 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
81 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
82 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
83 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
84 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
85 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
93 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
94 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
95 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
96 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
97 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
98 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
99 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
100 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
101 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
102 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
103 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
104 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
105 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
106 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
107 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
108 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
109 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
117 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
137 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
138 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
139 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
144 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
149 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
158 #define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
159 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
160 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
161 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
191 /* Global Channel Read Suspend Register */
195 /* Global Channel Read Write Suspend Register */
197 /* Global Channel Read Write Resume Register */
199 /* Global Channel Software Request Register */
201 /* Global channel Software Request Status Register */
203 /* Global Channel Software Flush Request Register */
205 /* Channel reg base */
213 /* ----- Channels ----- */
217 u32 mask; /* Channel Mask */ member
218 u32 cfg; /* Channel Configuration Register */
238 /* ----- Controller ----- */
240 struct dma_device dma; member
251 /* ----- Descriptors ----- */
266 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
307 return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40); in at_xdmac_chan_reg_base()
310 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
312 writel_relaxed((value), (atxdmac)->regs + (reg))
314 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
315 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
324 return &chan->dev->device; in chan2dev()
329 return container_of(ddev, struct at_xdmac, dma); in to_at_xdmac()
339 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_chan_is_cyclic()
344 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_chan_is_paused()
360 "initial descriptors per channel (default: 64)");
365 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; in at_xdmac_chan_is_enabled()
370 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); in at_xdmac_off()
376 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); in at_xdmac_off()
383 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_start_xfer()
386 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); in at_xdmac_start_xfer()
392 first->active_xfer = true; in at_xdmac_start_xfer()
395 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys); in at_xdmac_start_xfer()
396 if (atxdmac->layout->sdif) in at_xdmac_start_xfer()
397 reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif); in at_xdmac_start_xfer()
408 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) in at_xdmac_start_xfer()
415 * properly. This bit can be modified only by using the channel in at_xdmac_start_xfer()
418 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); in at_xdmac_start_xfer()
425 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
439 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg)) in at_xdmac_start_xfer()
443 * There is no end of list when doing cyclic dma, we need to get in at_xdmac_start_xfer()
452 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); in at_xdmac_start_xfer()
453 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
454 "%s: enable channel (0x%08x)\n", __func__, atchan->mask); in at_xdmac_start_xfer()
456 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in at_xdmac_start_xfer()
458 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
472 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); in at_xdmac_tx_submit()
476 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_tx_submit()
479 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", in at_xdmac_tx_submit()
481 list_add_tail(&desc->xfer_node, &atchan->xfers_list); in at_xdmac_tx_submit()
482 if (list_is_singular(&atchan->xfers_list)) in at_xdmac_tx_submit()
485 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_tx_submit()
493 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_alloc_desc()
496 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); in at_xdmac_alloc_desc()
498 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_alloc_desc()
499 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); in at_xdmac_alloc_desc()
500 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; in at_xdmac_alloc_desc()
501 desc->tx_dma_desc.phys = phys; in at_xdmac_alloc_desc()
509 memset(&desc->lld, 0, sizeof(desc->lld)); in at_xdmac_init_used_desc()
510 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_init_used_desc()
511 desc->direction = DMA_TRANS_NONE; in at_xdmac_init_used_desc()
512 desc->xfer_size = 0; in at_xdmac_init_used_desc()
513 desc->active_xfer = false; in at_xdmac_init_used_desc()
521 if (list_empty(&atchan->free_descs_list)) { in at_xdmac_get_desc()
522 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); in at_xdmac_get_desc()
524 desc = list_first_entry(&atchan->free_descs_list, in at_xdmac_get_desc()
526 list_del(&desc->desc_node); in at_xdmac_get_desc()
540 prev->lld.mbr_nda = desc->tx_dma_desc.phys; in at_xdmac_queue_desc()
541 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE; in at_xdmac_queue_desc()
544 __func__, prev, &prev->lld.mbr_nda); in at_xdmac_queue_desc()
553 desc->lld.mbr_bc++; in at_xdmac_increment_block_count()
563 struct at_xdmac *atxdmac = of_dma->of_dma_data; in at_xdmac_xlate()
566 struct device *dev = atxdmac->dma.dev; in at_xdmac_xlate()
568 if (dma_spec->args_count != 1) { in at_xdmac_xlate()
569 dev_err(dev, "dma phandler args: bad number of args\n"); in at_xdmac_xlate()
573 chan = dma_get_any_slave_channel(&atxdmac->dma); in at_xdmac_xlate()
575 dev_err(dev, "can't get a dma channel\n"); in at_xdmac_xlate()
580 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); in at_xdmac_xlate()
581 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); in at_xdmac_xlate()
582 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); in at_xdmac_xlate()
584 atchan->memif, atchan->perif, atchan->perid); in at_xdmac_xlate()
593 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_compute_chan_conf()
597 atchan->cfg = in at_xdmac_compute_chan_conf()
598 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
605 if (atxdmac->layout->sdif) in at_xdmac_compute_chan_conf()
606 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) | in at_xdmac_compute_chan_conf()
607 AT_XDMAC_CC_SIF(atchan->perif); in at_xdmac_compute_chan_conf()
609 csize = ffs(atchan->sconfig.src_maxburst) - 1; in at_xdmac_compute_chan_conf()
612 return -EINVAL; in at_xdmac_compute_chan_conf()
614 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
615 dwidth = ffs(atchan->sconfig.src_addr_width) - 1; in at_xdmac_compute_chan_conf()
618 return -EINVAL; in at_xdmac_compute_chan_conf()
620 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
622 atchan->cfg = in at_xdmac_compute_chan_conf()
623 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
630 if (atxdmac->layout->sdif) in at_xdmac_compute_chan_conf()
631 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) | in at_xdmac_compute_chan_conf()
632 AT_XDMAC_CC_SIF(atchan->memif); in at_xdmac_compute_chan_conf()
634 csize = ffs(atchan->sconfig.dst_maxburst) - 1; in at_xdmac_compute_chan_conf()
637 return -EINVAL; in at_xdmac_compute_chan_conf()
639 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
640 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; in at_xdmac_compute_chan_conf()
643 return -EINVAL; in at_xdmac_compute_chan_conf()
645 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
648 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); in at_xdmac_compute_chan_conf()
660 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) in at_xdmac_check_slave_config()
661 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) in at_xdmac_check_slave_config()
662 return -EINVAL; in at_xdmac_check_slave_config()
664 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) in at_xdmac_check_slave_config()
665 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) in at_xdmac_check_slave_config()
666 return -EINVAL; in at_xdmac_check_slave_config()
678 return -EINVAL; in at_xdmac_set_slave_config()
681 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); in at_xdmac_set_slave_config()
703 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_slave_sg()
713 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
736 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_slave_sg()
742 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_slave_sg()
743 desc->lld.mbr_da = mem; in at_xdmac_prep_slave_sg()
745 desc->lld.mbr_sa = mem; in at_xdmac_prep_slave_sg()
746 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_slave_sg()
748 dwidth = at_xdmac_get_dwidth(atchan->cfg); in at_xdmac_prep_slave_sg()
752 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ in at_xdmac_prep_slave_sg()
756 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | in at_xdmac_prep_slave_sg()
760 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_slave_sg()
772 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_slave_sg()
777 first->tx_dma_desc.flags = flags; in at_xdmac_prep_slave_sg()
778 first->xfer_size = xfer_size; in at_xdmac_prep_slave_sg()
779 first->direction = direction; in at_xdmac_prep_slave_sg()
780 ret = &first->tx_dma_desc; in at_xdmac_prep_slave_sg()
783 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
804 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_dma_cyclic()
808 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { in at_xdmac_prep_dma_cyclic()
809 dev_err(chan2dev(chan), "channel currently used\n"); in at_xdmac_prep_dma_cyclic()
819 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
824 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_cyclic()
825 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
828 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
831 __func__, desc, &desc->tx_dma_desc.phys); in at_xdmac_prep_dma_cyclic()
834 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_dma_cyclic()
835 desc->lld.mbr_da = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
837 desc->lld.mbr_sa = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
838 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_dma_cyclic()
840 desc->lld.mbr_cfg = atchan->cfg; in at_xdmac_prep_dma_cyclic()
841 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 in at_xdmac_prep_dma_cyclic()
844 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_prep_dma_cyclic()
848 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_dma_cyclic()
860 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_cyclic()
864 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_cyclic()
865 first->xfer_size = buf_len; in at_xdmac_prep_dma_cyclic()
866 first->direction = direction; in at_xdmac_prep_dma_cyclic()
868 return &first->tx_dma_desc; in at_xdmac_prep_dma_cyclic()
913 * WARNING: The channel configuration is set here since there is no in at_xdmac_interleaved_queue_desc()
921 * match the one of another channel. If not, it could lead to spurious in at_xdmac_interleaved_queue_desc()
932 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); in at_xdmac_interleaved_queue_desc()
933 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { in at_xdmac_interleaved_queue_desc()
936 __func__, chunk->size, in at_xdmac_interleaved_queue_desc()
945 if (xt->src_inc) { in at_xdmac_interleaved_queue_desc()
946 if (xt->src_sgl) in at_xdmac_interleaved_queue_desc()
952 if (xt->dst_inc) { in at_xdmac_interleaved_queue_desc()
953 if (xt->dst_sgl) in at_xdmac_interleaved_queue_desc()
959 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
961 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
969 ublen = chunk->size >> dwidth; in at_xdmac_interleaved_queue_desc()
971 desc->lld.mbr_sa = src; in at_xdmac_interleaved_queue_desc()
972 desc->lld.mbr_da = dst; in at_xdmac_interleaved_queue_desc()
973 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
974 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
976 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_interleaved_queue_desc()
980 desc->lld.mbr_cfg = chan_cc; in at_xdmac_interleaved_queue_desc()
984 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, in at_xdmac_interleaved_queue_desc()
985 desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_interleaved_queue_desc()
1006 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) in at_xdmac_prep_interleaved()
1013 if ((xt->numf > 1) && (xt->frame_size > 1)) in at_xdmac_prep_interleaved()
1017 __func__, &xt->src_start, &xt->dst_start, xt->numf, in at_xdmac_prep_interleaved()
1018 xt->frame_size, flags); in at_xdmac_prep_interleaved()
1020 src_addr = xt->src_start; in at_xdmac_prep_interleaved()
1021 dst_addr = xt->dst_start; in at_xdmac_prep_interleaved()
1023 if (xt->numf > 1) { in at_xdmac_prep_interleaved()
1027 xt, xt->sgl); in at_xdmac_prep_interleaved()
1030 for (i = 0; i < xt->numf - 1; i++) in at_xdmac_prep_interleaved()
1035 list_add_tail(&first->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
1037 for (i = 0; i < xt->frame_size; i++) { in at_xdmac_prep_interleaved()
1041 chunk = xt->sgl + i; in at_xdmac_prep_interleaved()
1046 src_skip = chunk->size + src_icg; in at_xdmac_prep_interleaved()
1047 dst_skip = chunk->size + dst_icg; in at_xdmac_prep_interleaved()
1051 __func__, chunk->size, src_icg, dst_icg); in at_xdmac_prep_interleaved()
1058 list_splice_init(&first->descs_list, in at_xdmac_prep_interleaved()
1059 &atchan->free_descs_list); in at_xdmac_prep_interleaved()
1068 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
1070 if (xt->src_sgl) in at_xdmac_prep_interleaved()
1073 if (xt->dst_sgl) in at_xdmac_prep_interleaved()
1076 len += chunk->size; in at_xdmac_prep_interleaved()
1081 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_interleaved()
1082 first->tx_dma_desc.flags = flags; in at_xdmac_prep_interleaved()
1083 first->xfer_size = len; in at_xdmac_prep_interleaved()
1085 return &first->tx_dma_desc; in at_xdmac_prep_interleaved()
1105 * match the one of another channel. If not, it could lead to spurious in at_xdmac_prep_dma_memcpy()
1133 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1135 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1139 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_memcpy()
1161 remaining_size -= xfer_size; in at_xdmac_prep_dma_memcpy()
1163 desc->lld.mbr_sa = src_addr; in at_xdmac_prep_dma_memcpy()
1164 desc->lld.mbr_da = dst_addr; in at_xdmac_prep_dma_memcpy()
1165 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 in at_xdmac_prep_dma_memcpy()
1169 desc->lld.mbr_cfg = chan_cc; in at_xdmac_prep_dma_memcpy()
1173 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_prep_dma_memcpy()
1185 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_memcpy()
1188 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memcpy()
1189 first->xfer_size = len; in at_xdmac_prep_dma_memcpy()
1191 return &first->tx_dma_desc; in at_xdmac_prep_dma_memcpy()
1205 * WARNING: The channel configuration is set here since there is no in at_xdmac_memset_create_desc()
1213 * match the one of another channel. If not, it could lead to spurious in at_xdmac_memset_create_desc()
1236 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1238 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1248 desc->lld.mbr_da = dst_addr; in at_xdmac_memset_create_desc()
1249 desc->lld.mbr_ds = value; in at_xdmac_memset_create_desc()
1250 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_memset_create_desc()
1254 desc->lld.mbr_cfg = chan_cc; in at_xdmac_memset_create_desc()
1258 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, in at_xdmac_memset_create_desc()
1259 desc->lld.mbr_cfg); in at_xdmac_memset_create_desc()
1278 list_add_tail(&desc->desc_node, &desc->descs_list); in at_xdmac_prep_dma_memset()
1280 desc->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset()
1281 desc->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset()
1282 desc->xfer_size = len; in at_xdmac_prep_dma_memset()
1284 return &desc->tx_dma_desc; in at_xdmac_prep_dma_memset()
1315 list_splice_init(&first->descs_list, in at_xdmac_prep_dma_memset_sg()
1316 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1324 stride = sg_dma_address(sg) - in at_xdmac_prep_dma_memset_sg()
1336 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1337 * | N-2 | | N-1 | | N | in at_xdmac_prep_dma_memset_sg()
1338 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1340 * We need all these three elements (N-2, N-1 and N) in at_xdmac_prep_dma_memset_sg()
1342 * queue N-1 or reuse N-2. in at_xdmac_prep_dma_memset_sg()
1355 * N-2 descriptor in at_xdmac_prep_dma_memset_sg()
1358 ppdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1361 * Put back the N-1 descriptor in the in at_xdmac_prep_dma_memset_sg()
1364 list_add_tail(&pdesc->desc_node, in at_xdmac_prep_dma_memset_sg()
1365 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1368 * Make our N-1 descriptor pointer in at_xdmac_prep_dma_memset_sg()
1369 * point to the N-2 since they were in at_xdmac_prep_dma_memset_sg()
1385 * Queue the N-1 descriptor after the in at_xdmac_prep_dma_memset_sg()
1386 * N-2 in at_xdmac_prep_dma_memset_sg()
1391 * Add the N-1 descriptor to the list in at_xdmac_prep_dma_memset_sg()
1395 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1396 &first->descs_list); in at_xdmac_prep_dma_memset_sg()
1410 if ((i == (sg_len - 1)) && in at_xdmac_prep_dma_memset_sg()
1417 * Increment the block count of the N-1 in at_xdmac_prep_dma_memset_sg()
1421 pdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1427 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1428 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1442 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset_sg()
1443 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset_sg()
1444 first->xfer_size = len; in at_xdmac_prep_dma_memset_sg()
1446 return &first->tx_dma_desc; in at_xdmac_prep_dma_memset_sg()
1454 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_tx_status()
1459 u32 cur_nda, check_nda, cur_ubc, mask, value; in at_xdmac_tx_status() local
1471 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_tx_status()
1473 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); in at_xdmac_tx_status()
1479 if (!desc->active_xfer) { in at_xdmac_tx_status()
1480 dma_set_residue(txstate, desc->xfer_size); in at_xdmac_tx_status()
1484 residue = desc->xfer_size; in at_xdmac_tx_status()
1492 * timeout, it requests the residue. If the data are in the DMA FIFO, in at_xdmac_tx_status()
1498 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; in at_xdmac_tx_status()
1500 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1501 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask); in at_xdmac_tx_status()
1507 * The easiest way to compute the residue should be to pause the DMA in at_xdmac_tx_status()
1511 * - DMA is running therefore a descriptor change is possible while in at_xdmac_tx_status()
1513 * - When the block transfer is done, the value of the CUBC register in at_xdmac_tx_status()
1518 * INITD -------- ------------ in at_xdmac_tx_status()
1558 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1559 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask); in at_xdmac_tx_status()
1569 descs_list = &desc->descs_list; in at_xdmac_tx_status()
1571 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_tx_status()
1572 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; in at_xdmac_tx_status()
1573 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) in at_xdmac_tx_status()
1582 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); in at_xdmac_tx_status()
1585 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_tx_status()
1593 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_remove_xfer()
1599 list_del(&desc->xfer_node); in at_xdmac_remove_xfer()
1600 list_splice_init(&desc->descs_list, &atchan->free_descs_list); in at_xdmac_remove_xfer()
1608 * If channel is enabled, do nothing, advance_work will be triggered in at_xdmac_advance_work()
1611 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { in at_xdmac_advance_work()
1612 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_advance_work()
1615 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_advance_work()
1616 if (!desc->active_xfer) in at_xdmac_advance_work()
1626 if (!list_empty(&atchan->xfers_list)) { in at_xdmac_handle_cyclic()
1627 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_cyclic()
1629 txd = &desc->tx_dma_desc; in at_xdmac_handle_cyclic()
1631 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_handle_cyclic()
1638 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_handle_error()
1647 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) in at_xdmac_handle_error()
1648 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); in at_xdmac_handle_error()
1649 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) in at_xdmac_handle_error()
1650 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); in at_xdmac_handle_error()
1651 if (atchan->irq_status & AT_XDMAC_CIS_ROIS) in at_xdmac_handle_error()
1652 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); in at_xdmac_handle_error()
1654 spin_lock_irq(&atchan->lock); in at_xdmac_handle_error()
1656 /* Channel must be disabled first as it's not done automatically */ in at_xdmac_handle_error()
1657 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_handle_error()
1658 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_handle_error()
1661 bad_desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_error()
1665 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_error()
1668 dev_dbg(chan2dev(&atchan->chan), in at_xdmac_handle_error()
1670 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, in at_xdmac_handle_error()
1671 bad_desc->lld.mbr_ubc); in at_xdmac_handle_error()
1682 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", in at_xdmac_tasklet()
1683 __func__, atchan->irq_status); in at_xdmac_tasklet()
1691 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) in at_xdmac_tasklet()
1692 || (atchan->irq_status & error_mask)) { in at_xdmac_tasklet()
1695 if (atchan->irq_status & error_mask) in at_xdmac_tasklet()
1698 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1699 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_tasklet()
1702 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_tasklet()
1703 if (!desc->active_xfer) { in at_xdmac_tasklet()
1704 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); in at_xdmac_tasklet()
1705 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1709 txd = &desc->tx_dma_desc; in at_xdmac_tasklet()
1712 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1715 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_tasklet()
1720 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1722 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1739 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1746 /* We have to find which channel has generated the interrupt. */ in at_xdmac_interrupt()
1747 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_interrupt()
1751 atchan = &atxdmac->chan[i]; in at_xdmac_interrupt()
1754 atchan->irq_status = chan_status & chan_imr; in at_xdmac_interrupt()
1755 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1758 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_interrupt()
1768 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) in at_xdmac_interrupt()
1769 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_interrupt()
1771 tasklet_schedule(&atchan->tasklet); in at_xdmac_interrupt()
1785 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); in at_xdmac_issue_pending()
1788 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_issue_pending()
1790 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_issue_pending()
1805 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_config()
1807 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_config()
1815 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_pause()
1820 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) in at_xdmac_device_pause()
1823 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_pause()
1824 at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask); in at_xdmac_device_pause()
1828 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_pause()
1836 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_resume()
1841 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_resume()
1843 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1847 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask); in at_xdmac_device_resume()
1848 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_resume()
1849 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1858 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_terminate_all()
1863 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1864 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_device_terminate_all()
1865 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_device_terminate_all()
1869 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) in at_xdmac_device_terminate_all()
1872 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_terminate_all()
1873 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_device_terminate_all()
1874 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1887 "can't allocate channel resources (channel enabled)\n"); in at_xdmac_alloc_chan_resources()
1888 return -EIO; in at_xdmac_alloc_chan_resources()
1891 if (!list_empty(&atchan->free_descs_list)) { in at_xdmac_alloc_chan_resources()
1893 "can't allocate channel resources (channel not free from a previous use)\n"); in at_xdmac_alloc_chan_resources()
1894 return -EIO; in at_xdmac_alloc_chan_resources()
1904 list_add_tail(&desc->desc_node, &atchan->free_descs_list); in at_xdmac_alloc_chan_resources()
1917 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_free_chan_resources()
1920 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { in at_xdmac_free_chan_resources()
1922 list_del(&desc->desc_node); in at_xdmac_free_chan_resources()
1923 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); in at_xdmac_free_chan_resources()
1935 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_prepare()
1940 return -EAGAIN; in atmel_xdmac_prepare()
1954 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_suspend()
1957 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); in atmel_xdmac_suspend()
1961 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); in atmel_xdmac_suspend()
1962 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); in atmel_xdmac_suspend()
1963 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); in atmel_xdmac_suspend()
1966 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); in atmel_xdmac_suspend()
1969 clk_disable_unprepare(atxdmac->clk); in atmel_xdmac_suspend()
1981 ret = clk_prepare_enable(atxdmac->clk); in atmel_xdmac_resume()
1986 for (i = 0; i < atxdmac->dma.chancnt; i++) { in atmel_xdmac_resume()
1987 atchan = &atxdmac->chan[i]; in atmel_xdmac_resume()
1992 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); in atmel_xdmac_resume()
1993 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_resume()
1995 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); in atmel_xdmac_resume()
1999 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); in atmel_xdmac_resume()
2000 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); in atmel_xdmac_resume()
2001 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); in atmel_xdmac_resume()
2003 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in atmel_xdmac_resume()
2016 if (!atxdmac->layout->axi_config) in at_xdmac_axi_config()
2019 if (!of_property_read_u32(pdev->dev.of_node, "dma-requests", in at_xdmac_axi_config()
2021 dev_info(&pdev->dev, "controller in mem2mem mode.\n"); in at_xdmac_axi_config()
2057 dev_err(&pdev->dev, "invalid number of channels (%u)\n", in at_xdmac_probe()
2059 return -EINVAL; in at_xdmac_probe()
2064 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); in at_xdmac_probe()
2066 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); in at_xdmac_probe()
2067 return -ENOMEM; in at_xdmac_probe()
2070 atxdmac->regs = base; in at_xdmac_probe()
2071 atxdmac->irq = irq; in at_xdmac_probe()
2073 atxdmac->layout = of_device_get_match_data(&pdev->dev); in at_xdmac_probe()
2074 if (!atxdmac->layout) in at_xdmac_probe()
2075 return -ENODEV; in at_xdmac_probe()
2077 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); in at_xdmac_probe()
2078 if (IS_ERR(atxdmac->clk)) { in at_xdmac_probe()
2079 dev_err(&pdev->dev, "can't get dma_clk\n"); in at_xdmac_probe()
2080 return PTR_ERR(atxdmac->clk); in at_xdmac_probe()
2084 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); in at_xdmac_probe()
2086 dev_err(&pdev->dev, "can't request irq\n"); in at_xdmac_probe()
2090 ret = clk_prepare_enable(atxdmac->clk); in at_xdmac_probe()
2092 dev_err(&pdev->dev, "can't prepare or enable clock\n"); in at_xdmac_probe()
2096 atxdmac->at_xdmac_desc_pool = in at_xdmac_probe()
2097 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in at_xdmac_probe()
2099 if (!atxdmac->at_xdmac_desc_pool) { in at_xdmac_probe()
2100 dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); in at_xdmac_probe()
2101 ret = -ENOMEM; in at_xdmac_probe()
2105 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); in at_xdmac_probe()
2106 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2107 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); in at_xdmac_probe()
2108 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); in at_xdmac_probe()
2109 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); in at_xdmac_probe()
2110 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2113 * one channel, second allocation fails in private_candidate. in at_xdmac_probe()
2115 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2116 atxdmac->dma.dev = &pdev->dev; in at_xdmac_probe()
2117 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; in at_xdmac_probe()
2118 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; in at_xdmac_probe()
2119 atxdmac->dma.device_tx_status = at_xdmac_tx_status; in at_xdmac_probe()
2120 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; in at_xdmac_probe()
2121 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; in at_xdmac_probe()
2122 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; in at_xdmac_probe()
2123 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; in at_xdmac_probe()
2124 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; in at_xdmac_probe()
2125 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; in at_xdmac_probe()
2126 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; in at_xdmac_probe()
2127 atxdmac->dma.device_config = at_xdmac_device_config; in at_xdmac_probe()
2128 atxdmac->dma.device_pause = at_xdmac_device_pause; in at_xdmac_probe()
2129 atxdmac->dma.device_resume = at_xdmac_device_resume; in at_xdmac_probe()
2130 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; in at_xdmac_probe()
2131 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2132 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2133 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in at_xdmac_probe()
2134 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in at_xdmac_probe()
2140 INIT_LIST_HEAD(&atxdmac->dma.channels); in at_xdmac_probe()
2142 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_probe()
2144 atchan->chan.device = &atxdmac->dma; in at_xdmac_probe()
2145 list_add_tail(&atchan->chan.device_node, in at_xdmac_probe()
2146 &atxdmac->dma.channels); in at_xdmac_probe()
2148 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); in at_xdmac_probe()
2149 atchan->mask = 1 << i; in at_xdmac_probe()
2151 spin_lock_init(&atchan->lock); in at_xdmac_probe()
2152 INIT_LIST_HEAD(&atchan->xfers_list); in at_xdmac_probe()
2153 INIT_LIST_HEAD(&atchan->free_descs_list); in at_xdmac_probe()
2154 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet); in at_xdmac_probe()
2162 ret = dma_async_device_register(&atxdmac->dma); in at_xdmac_probe()
2164 dev_err(&pdev->dev, "fail to register DMA engine device\n"); in at_xdmac_probe()
2168 ret = of_dma_controller_register(pdev->dev.of_node, in at_xdmac_probe()
2171 dev_err(&pdev->dev, "could not register of dma controller\n"); in at_xdmac_probe()
2175 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", in at_xdmac_probe()
2176 nr_channels, atxdmac->regs); in at_xdmac_probe()
2183 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_probe()
2185 clk_disable_unprepare(atxdmac->clk); in at_xdmac_probe()
2187 free_irq(atxdmac->irq, atxdmac); in at_xdmac_probe()
2197 of_dma_controller_free(pdev->dev.of_node); in at_xdmac_remove()
2198 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_remove()
2199 clk_disable_unprepare(atxdmac->clk); in at_xdmac_remove()
2201 free_irq(atxdmac->irq, atxdmac); in at_xdmac_remove()
2203 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_remove()
2204 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_remove()
2206 tasklet_kill(&atchan->tasklet); in at_xdmac_remove()
2207 at_xdmac_free_chan_resources(&atchan->chan); in at_xdmac_remove()
2220 .compatible = "atmel,sama5d4-dma",
2223 .compatible = "microchip,sama7g5-dma",
2253 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");