Lines Matching +full:dma +full:- +full:channel +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
11 #include <dt-bindings/dma/at91.h>
36 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
38 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
39 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
40 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
41 #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
43 #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
44 #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
45 #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
46 #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
47 #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
50 /* Channel relative registers offsets */
51 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
59 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
67 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
68 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
69 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
70 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
71 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
72 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
73 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
74 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
75 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
83 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
84 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
85 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
86 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
87 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
88 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
89 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
90 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
91 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
92 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
93 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
94 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
95 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
96 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
97 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
98 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
99 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
107 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
110 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
113 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
116 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
119 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
122 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
127 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
128 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
129 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
134 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
139 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
148 #define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
149 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
150 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
151 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
153 #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
182 /* ----- Channels ----- */
186 u32 mask; /* Channel Mask */ member
187 u32 cfg; /* Channel Configuration Register */
207 /* ----- Controller ----- */
209 struct dma_device dma; member
219 /* ----- Descriptors ----- */
234 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
249 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); in at_xdmac_chan_reg_base()
252 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
254 writel_relaxed((value), (atxdmac)->regs + (reg))
256 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
257 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
266 return &chan->dev->device; in chan2dev()
271 return container_of(ddev, struct at_xdmac, dma); in to_at_xdmac()
281 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_chan_is_cyclic()
286 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_chan_is_paused()
293 csize = ffs(maxburst) - 1; in at_xdmac_csize()
295 csize = -EINVAL; in at_xdmac_csize()
313 "initial descriptors per channel (default: 64)");
318 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; in at_xdmac_chan_is_enabled()
323 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); in at_xdmac_off()
329 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); in at_xdmac_off()
336 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_start_xfer()
339 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); in at_xdmac_start_xfer()
345 first->active_xfer = true; in at_xdmac_start_xfer()
348 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) in at_xdmac_start_xfer()
349 | AT_XDMAC_CNDA_NDAIF(atchan->memif); in at_xdmac_start_xfer()
359 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) in at_xdmac_start_xfer()
366 * properly. This bit can be modified only by using the channel in at_xdmac_start_xfer()
369 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); in at_xdmac_start_xfer()
376 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
390 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg)) in at_xdmac_start_xfer()
394 * There is no end of list when doing cyclic dma, we need to get in at_xdmac_start_xfer()
403 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); in at_xdmac_start_xfer()
404 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
405 "%s: enable channel (0x%08x)\n", __func__, atchan->mask); in at_xdmac_start_xfer()
407 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in at_xdmac_start_xfer()
409 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
423 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); in at_xdmac_tx_submit()
427 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_tx_submit()
430 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", in at_xdmac_tx_submit()
432 list_add_tail(&desc->xfer_node, &atchan->xfers_list); in at_xdmac_tx_submit()
433 if (list_is_singular(&atchan->xfers_list)) in at_xdmac_tx_submit()
436 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_tx_submit()
444 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_alloc_desc()
447 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); in at_xdmac_alloc_desc()
449 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_alloc_desc()
450 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); in at_xdmac_alloc_desc()
451 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; in at_xdmac_alloc_desc()
452 desc->tx_dma_desc.phys = phys; in at_xdmac_alloc_desc()
460 memset(&desc->lld, 0, sizeof(desc->lld)); in at_xdmac_init_used_desc()
461 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_init_used_desc()
462 desc->direction = DMA_TRANS_NONE; in at_xdmac_init_used_desc()
463 desc->xfer_size = 0; in at_xdmac_init_used_desc()
464 desc->active_xfer = false; in at_xdmac_init_used_desc()
472 if (list_empty(&atchan->free_descs_list)) { in at_xdmac_get_desc()
473 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); in at_xdmac_get_desc()
475 desc = list_first_entry(&atchan->free_descs_list, in at_xdmac_get_desc()
477 list_del(&desc->desc_node); in at_xdmac_get_desc()
491 prev->lld.mbr_nda = desc->tx_dma_desc.phys; in at_xdmac_queue_desc()
492 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE; in at_xdmac_queue_desc()
495 __func__, prev, &prev->lld.mbr_nda); in at_xdmac_queue_desc()
504 desc->lld.mbr_bc++; in at_xdmac_increment_block_count()
514 struct at_xdmac *atxdmac = of_dma->of_dma_data; in at_xdmac_xlate()
517 struct device *dev = atxdmac->dma.dev; in at_xdmac_xlate()
519 if (dma_spec->args_count != 1) { in at_xdmac_xlate()
520 dev_err(dev, "dma phandler args: bad number of args\n"); in at_xdmac_xlate()
524 chan = dma_get_any_slave_channel(&atxdmac->dma); in at_xdmac_xlate()
526 dev_err(dev, "can't get a dma channel\n"); in at_xdmac_xlate()
531 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); in at_xdmac_xlate()
532 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); in at_xdmac_xlate()
533 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); in at_xdmac_xlate()
535 atchan->memif, atchan->perif, atchan->perid); in at_xdmac_xlate()
547 atchan->cfg = in at_xdmac_compute_chan_conf()
548 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
551 | AT_XDMAC_CC_DIF(atchan->memif) in at_xdmac_compute_chan_conf()
552 | AT_XDMAC_CC_SIF(atchan->perif) in at_xdmac_compute_chan_conf()
557 csize = ffs(atchan->sconfig.src_maxburst) - 1; in at_xdmac_compute_chan_conf()
560 return -EINVAL; in at_xdmac_compute_chan_conf()
562 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
563 dwidth = ffs(atchan->sconfig.src_addr_width) - 1; in at_xdmac_compute_chan_conf()
566 return -EINVAL; in at_xdmac_compute_chan_conf()
568 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
570 atchan->cfg = in at_xdmac_compute_chan_conf()
571 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
574 | AT_XDMAC_CC_DIF(atchan->perif) in at_xdmac_compute_chan_conf()
575 | AT_XDMAC_CC_SIF(atchan->memif) in at_xdmac_compute_chan_conf()
580 csize = ffs(atchan->sconfig.dst_maxburst) - 1; in at_xdmac_compute_chan_conf()
583 return -EINVAL; in at_xdmac_compute_chan_conf()
585 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
586 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; in at_xdmac_compute_chan_conf()
589 return -EINVAL; in at_xdmac_compute_chan_conf()
591 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
594 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); in at_xdmac_compute_chan_conf()
606 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) in at_xdmac_check_slave_config()
607 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) in at_xdmac_check_slave_config()
608 return -EINVAL; in at_xdmac_check_slave_config()
610 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) in at_xdmac_check_slave_config()
611 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) in at_xdmac_check_slave_config()
612 return -EINVAL; in at_xdmac_check_slave_config()
624 return -EINVAL; in at_xdmac_set_slave_config()
627 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); in at_xdmac_set_slave_config()
649 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_slave_sg()
659 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
682 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_slave_sg()
688 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_slave_sg()
689 desc->lld.mbr_da = mem; in at_xdmac_prep_slave_sg()
691 desc->lld.mbr_sa = mem; in at_xdmac_prep_slave_sg()
692 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_slave_sg()
694 dwidth = at_xdmac_get_dwidth(atchan->cfg); in at_xdmac_prep_slave_sg()
698 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ in at_xdmac_prep_slave_sg()
702 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | in at_xdmac_prep_slave_sg()
706 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_slave_sg()
718 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_slave_sg()
723 first->tx_dma_desc.flags = flags; in at_xdmac_prep_slave_sg()
724 first->xfer_size = xfer_size; in at_xdmac_prep_slave_sg()
725 first->direction = direction; in at_xdmac_prep_slave_sg()
726 ret = &first->tx_dma_desc; in at_xdmac_prep_slave_sg()
729 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
750 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_dma_cyclic()
754 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { in at_xdmac_prep_dma_cyclic()
755 dev_err(chan2dev(chan), "channel currently used\n"); in at_xdmac_prep_dma_cyclic()
765 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
770 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_cyclic()
771 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
774 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
777 __func__, desc, &desc->tx_dma_desc.phys); in at_xdmac_prep_dma_cyclic()
780 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_dma_cyclic()
781 desc->lld.mbr_da = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
783 desc->lld.mbr_sa = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
784 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_dma_cyclic()
786 desc->lld.mbr_cfg = atchan->cfg; in at_xdmac_prep_dma_cyclic()
787 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 in at_xdmac_prep_dma_cyclic()
790 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_prep_dma_cyclic()
794 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_dma_cyclic()
806 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_cyclic()
810 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_cyclic()
811 first->xfer_size = buf_len; in at_xdmac_prep_dma_cyclic()
812 first->direction = direction; in at_xdmac_prep_dma_cyclic()
814 return &first->tx_dma_desc; in at_xdmac_prep_dma_cyclic()
859 * WARNING: The channel configuration is set here since there is no in at_xdmac_interleaved_queue_desc()
867 * match the one of another channel. If not, it could lead to spurious in at_xdmac_interleaved_queue_desc()
876 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); in at_xdmac_interleaved_queue_desc()
877 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { in at_xdmac_interleaved_queue_desc()
880 __func__, chunk->size, in at_xdmac_interleaved_queue_desc()
889 if (xt->src_inc) { in at_xdmac_interleaved_queue_desc()
890 if (xt->src_sgl) in at_xdmac_interleaved_queue_desc()
896 if (xt->dst_inc) { in at_xdmac_interleaved_queue_desc()
897 if (xt->dst_sgl) in at_xdmac_interleaved_queue_desc()
903 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
905 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
913 ublen = chunk->size >> dwidth; in at_xdmac_interleaved_queue_desc()
915 desc->lld.mbr_sa = src; in at_xdmac_interleaved_queue_desc()
916 desc->lld.mbr_da = dst; in at_xdmac_interleaved_queue_desc()
917 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
918 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
920 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_interleaved_queue_desc()
924 desc->lld.mbr_cfg = chan_cc; in at_xdmac_interleaved_queue_desc()
928 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, in at_xdmac_interleaved_queue_desc()
929 desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_interleaved_queue_desc()
950 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) in at_xdmac_prep_interleaved()
957 if ((xt->numf > 1) && (xt->frame_size > 1)) in at_xdmac_prep_interleaved()
961 __func__, &xt->src_start, &xt->dst_start, xt->numf, in at_xdmac_prep_interleaved()
962 xt->frame_size, flags); in at_xdmac_prep_interleaved()
964 src_addr = xt->src_start; in at_xdmac_prep_interleaved()
965 dst_addr = xt->dst_start; in at_xdmac_prep_interleaved()
967 if (xt->numf > 1) { in at_xdmac_prep_interleaved()
971 xt, xt->sgl); in at_xdmac_prep_interleaved()
974 for (i = 0; i < xt->numf - 1; i++) in at_xdmac_prep_interleaved()
979 list_add_tail(&first->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
981 for (i = 0; i < xt->frame_size; i++) { in at_xdmac_prep_interleaved()
985 chunk = xt->sgl + i; in at_xdmac_prep_interleaved()
990 src_skip = chunk->size + src_icg; in at_xdmac_prep_interleaved()
991 dst_skip = chunk->size + dst_icg; in at_xdmac_prep_interleaved()
995 __func__, chunk->size, src_icg, dst_icg); in at_xdmac_prep_interleaved()
1002 list_splice_init(&first->descs_list, in at_xdmac_prep_interleaved()
1003 &atchan->free_descs_list); in at_xdmac_prep_interleaved()
1012 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
1014 if (xt->src_sgl) in at_xdmac_prep_interleaved()
1017 if (xt->dst_sgl) in at_xdmac_prep_interleaved()
1020 len += chunk->size; in at_xdmac_prep_interleaved()
1025 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_interleaved()
1026 first->tx_dma_desc.flags = flags; in at_xdmac_prep_interleaved()
1027 first->xfer_size = len; in at_xdmac_prep_interleaved()
1029 return &first->tx_dma_desc; in at_xdmac_prep_interleaved()
1049 * match the one of another channel. If not, it could lead to spurious in at_xdmac_prep_dma_memcpy()
1075 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1077 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1081 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_memcpy()
1103 remaining_size -= xfer_size; in at_xdmac_prep_dma_memcpy()
1105 desc->lld.mbr_sa = src_addr; in at_xdmac_prep_dma_memcpy()
1106 desc->lld.mbr_da = dst_addr; in at_xdmac_prep_dma_memcpy()
1107 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 in at_xdmac_prep_dma_memcpy()
1111 desc->lld.mbr_cfg = chan_cc; in at_xdmac_prep_dma_memcpy()
1115 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_prep_dma_memcpy()
1127 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_memcpy()
1130 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memcpy()
1131 first->xfer_size = len; in at_xdmac_prep_dma_memcpy()
1133 return &first->tx_dma_desc; in at_xdmac_prep_dma_memcpy()
1147 * WARNING: The channel configuration is set here since there is no in at_xdmac_memset_create_desc()
1155 * match the one of another channel. If not, it could lead to spurious in at_xdmac_memset_create_desc()
1176 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1178 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1188 desc->lld.mbr_da = dst_addr; in at_xdmac_memset_create_desc()
1189 desc->lld.mbr_ds = value; in at_xdmac_memset_create_desc()
1190 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_memset_create_desc()
1194 desc->lld.mbr_cfg = chan_cc; in at_xdmac_memset_create_desc()
1198 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, in at_xdmac_memset_create_desc()
1199 desc->lld.mbr_cfg); in at_xdmac_memset_create_desc()
1218 list_add_tail(&desc->desc_node, &desc->descs_list); in at_xdmac_prep_dma_memset()
1220 desc->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset()
1221 desc->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset()
1222 desc->xfer_size = len; in at_xdmac_prep_dma_memset()
1224 return &desc->tx_dma_desc; in at_xdmac_prep_dma_memset()
1255 list_splice_init(&first->descs_list, in at_xdmac_prep_dma_memset_sg()
1256 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1264 stride = sg_dma_address(sg) - in at_xdmac_prep_dma_memset_sg()
1276 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1277 * | N-2 | | N-1 | | N | in at_xdmac_prep_dma_memset_sg()
1278 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1280 * We need all these three elements (N-2, N-1 and N) in at_xdmac_prep_dma_memset_sg()
1282 * queue N-1 or reuse N-2. in at_xdmac_prep_dma_memset_sg()
1295 * N-2 descriptor in at_xdmac_prep_dma_memset_sg()
1298 ppdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1301 * Put back the N-1 descriptor in the in at_xdmac_prep_dma_memset_sg()
1304 list_add_tail(&pdesc->desc_node, in at_xdmac_prep_dma_memset_sg()
1305 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1308 * Make our N-1 descriptor pointer in at_xdmac_prep_dma_memset_sg()
1309 * point to the N-2 since they were in at_xdmac_prep_dma_memset_sg()
1325 * Queue the N-1 descriptor after the in at_xdmac_prep_dma_memset_sg()
1326 * N-2 in at_xdmac_prep_dma_memset_sg()
1331 * Add the N-1 descriptor to the list in at_xdmac_prep_dma_memset_sg()
1335 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1336 &first->descs_list); in at_xdmac_prep_dma_memset_sg()
1350 if ((i == (sg_len - 1)) && in at_xdmac_prep_dma_memset_sg()
1357 * Increment the block count of the N-1 in at_xdmac_prep_dma_memset_sg()
1361 pdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1367 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1368 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1382 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset_sg()
1383 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset_sg()
1384 first->xfer_size = len; in at_xdmac_prep_dma_memset_sg()
1386 return &first->tx_dma_desc; in at_xdmac_prep_dma_memset_sg()
1394 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_tx_status()
1399 u32 cur_nda, check_nda, cur_ubc, mask, value; in at_xdmac_tx_status() local
1411 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_tx_status()
1413 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); in at_xdmac_tx_status()
1419 if (!desc->active_xfer) { in at_xdmac_tx_status()
1420 dma_set_residue(txstate, desc->xfer_size); in at_xdmac_tx_status()
1424 residue = desc->xfer_size; in at_xdmac_tx_status()
1432 * timeout, it requests the residue. If the data are in the DMA FIFO, in at_xdmac_tx_status()
1438 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; in at_xdmac_tx_status()
1440 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1441 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1447 * The easiest way to compute the residue should be to pause the DMA in at_xdmac_tx_status()
1451 * - DMA is running therefore a descriptor change is possible while in at_xdmac_tx_status()
1453 * - When the block transfer is done, the value of the CUBC register in at_xdmac_tx_status()
1458 * INITD -------- ------------ in at_xdmac_tx_status()
1498 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1499 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1509 descs_list = &desc->descs_list; in at_xdmac_tx_status()
1511 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_tx_status()
1512 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; in at_xdmac_tx_status()
1513 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) in at_xdmac_tx_status()
1522 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); in at_xdmac_tx_status()
1525 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_tx_status()
1533 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_remove_xfer()
1539 list_del(&desc->xfer_node); in at_xdmac_remove_xfer()
1540 list_splice_init(&desc->descs_list, &atchan->free_descs_list); in at_xdmac_remove_xfer()
1548 * If channel is enabled, do nothing, advance_work will be triggered in at_xdmac_advance_work()
1551 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { in at_xdmac_advance_work()
1552 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_advance_work()
1555 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_advance_work()
1556 if (!desc->active_xfer) in at_xdmac_advance_work()
1566 if (!list_empty(&atchan->xfers_list)) { in at_xdmac_handle_cyclic()
1567 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_cyclic()
1569 txd = &desc->tx_dma_desc; in at_xdmac_handle_cyclic()
1571 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_handle_cyclic()
1578 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_handle_error()
1587 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) in at_xdmac_handle_error()
1588 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); in at_xdmac_handle_error()
1589 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) in at_xdmac_handle_error()
1590 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); in at_xdmac_handle_error()
1591 if (atchan->irq_status & AT_XDMAC_CIS_ROIS) in at_xdmac_handle_error()
1592 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); in at_xdmac_handle_error()
1594 spin_lock_irq(&atchan->lock); in at_xdmac_handle_error()
1596 /* Channel must be disabled first as it's not done automatically */ in at_xdmac_handle_error()
1597 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_handle_error()
1598 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_handle_error()
1601 bad_desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_error()
1605 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_error()
1608 dev_dbg(chan2dev(&atchan->chan), in at_xdmac_handle_error()
1610 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, in at_xdmac_handle_error()
1611 bad_desc->lld.mbr_ubc); in at_xdmac_handle_error()
1622 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", in at_xdmac_tasklet()
1623 __func__, atchan->irq_status); in at_xdmac_tasklet()
1631 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) in at_xdmac_tasklet()
1632 || (atchan->irq_status & error_mask)) { in at_xdmac_tasklet()
1635 if (atchan->irq_status & error_mask) in at_xdmac_tasklet()
1638 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1639 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_tasklet()
1642 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_tasklet()
1643 if (!desc->active_xfer) { in at_xdmac_tasklet()
1644 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); in at_xdmac_tasklet()
1645 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1649 txd = &desc->tx_dma_desc; in at_xdmac_tasklet()
1652 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1655 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_tasklet()
1660 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1662 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1679 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1686 /* We have to find which channel has generated the interrupt. */ in at_xdmac_interrupt()
1687 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_interrupt()
1691 atchan = &atxdmac->chan[i]; in at_xdmac_interrupt()
1694 atchan->irq_status = chan_status & chan_imr; in at_xdmac_interrupt()
1695 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1698 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_interrupt()
1708 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) in at_xdmac_interrupt()
1709 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_interrupt()
1711 tasklet_schedule(&atchan->tasklet); in at_xdmac_interrupt()
1725 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); in at_xdmac_issue_pending()
1728 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_issue_pending()
1730 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_issue_pending()
1745 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_config()
1747 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_config()
1755 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_pause()
1760 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) in at_xdmac_device_pause()
1763 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_pause()
1764 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); in at_xdmac_device_pause()
1768 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_pause()
1776 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_resume()
1781 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_resume()
1783 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1787 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); in at_xdmac_device_resume()
1788 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_resume()
1789 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1798 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_terminate_all()
1803 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1804 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_device_terminate_all()
1805 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_device_terminate_all()
1809 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) in at_xdmac_device_terminate_all()
1812 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_terminate_all()
1813 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_device_terminate_all()
1814 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1827 "can't allocate channel resources (channel enabled)\n"); in at_xdmac_alloc_chan_resources()
1828 return -EIO; in at_xdmac_alloc_chan_resources()
1831 if (!list_empty(&atchan->free_descs_list)) { in at_xdmac_alloc_chan_resources()
1833 "can't allocate channel resources (channel not free from a previous use)\n"); in at_xdmac_alloc_chan_resources()
1834 return -EIO; in at_xdmac_alloc_chan_resources()
1844 list_add_tail(&desc->desc_node, &atchan->free_descs_list); in at_xdmac_alloc_chan_resources()
1857 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_free_chan_resources()
1860 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { in at_xdmac_free_chan_resources()
1862 list_del(&desc->desc_node); in at_xdmac_free_chan_resources()
1863 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); in at_xdmac_free_chan_resources()
1875 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_prepare()
1880 return -EAGAIN; in atmel_xdmac_prepare()
1894 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_suspend()
1897 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); in atmel_xdmac_suspend()
1901 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); in atmel_xdmac_suspend()
1902 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); in atmel_xdmac_suspend()
1903 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); in atmel_xdmac_suspend()
1906 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); in atmel_xdmac_suspend()
1909 clk_disable_unprepare(atxdmac->clk); in atmel_xdmac_suspend()
1921 ret = clk_prepare_enable(atxdmac->clk); in atmel_xdmac_resume()
1926 for (i = 0; i < atxdmac->dma.chancnt; i++) { in atmel_xdmac_resume()
1927 atchan = &atxdmac->chan[i]; in atmel_xdmac_resume()
1932 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); in atmel_xdmac_resume()
1933 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_resume()
1935 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); in atmel_xdmac_resume()
1939 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); in atmel_xdmac_resume()
1940 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); in atmel_xdmac_resume()
1941 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); in atmel_xdmac_resume()
1943 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in atmel_xdmac_resume()
1973 dev_err(&pdev->dev, "invalid number of channels (%u)\n", in at_xdmac_probe()
1975 return -EINVAL; in at_xdmac_probe()
1980 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); in at_xdmac_probe()
1982 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); in at_xdmac_probe()
1983 return -ENOMEM; in at_xdmac_probe()
1986 atxdmac->regs = base; in at_xdmac_probe()
1987 atxdmac->irq = irq; in at_xdmac_probe()
1989 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); in at_xdmac_probe()
1990 if (IS_ERR(atxdmac->clk)) { in at_xdmac_probe()
1991 dev_err(&pdev->dev, "can't get dma_clk\n"); in at_xdmac_probe()
1992 return PTR_ERR(atxdmac->clk); in at_xdmac_probe()
1996 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); in at_xdmac_probe()
1998 dev_err(&pdev->dev, "can't request irq\n"); in at_xdmac_probe()
2002 ret = clk_prepare_enable(atxdmac->clk); in at_xdmac_probe()
2004 dev_err(&pdev->dev, "can't prepare or enable clock\n"); in at_xdmac_probe()
2008 atxdmac->at_xdmac_desc_pool = in at_xdmac_probe()
2009 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in at_xdmac_probe()
2011 if (!atxdmac->at_xdmac_desc_pool) { in at_xdmac_probe()
2012 dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); in at_xdmac_probe()
2013 ret = -ENOMEM; in at_xdmac_probe()
2017 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); in at_xdmac_probe()
2018 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2019 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); in at_xdmac_probe()
2020 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); in at_xdmac_probe()
2021 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); in at_xdmac_probe()
2022 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2025 * one channel, second allocation fails in private_candidate. in at_xdmac_probe()
2027 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2028 atxdmac->dma.dev = &pdev->dev; in at_xdmac_probe()
2029 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; in at_xdmac_probe()
2030 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; in at_xdmac_probe()
2031 atxdmac->dma.device_tx_status = at_xdmac_tx_status; in at_xdmac_probe()
2032 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; in at_xdmac_probe()
2033 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; in at_xdmac_probe()
2034 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; in at_xdmac_probe()
2035 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; in at_xdmac_probe()
2036 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; in at_xdmac_probe()
2037 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; in at_xdmac_probe()
2038 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; in at_xdmac_probe()
2039 atxdmac->dma.device_config = at_xdmac_device_config; in at_xdmac_probe()
2040 atxdmac->dma.device_pause = at_xdmac_device_pause; in at_xdmac_probe()
2041 atxdmac->dma.device_resume = at_xdmac_device_resume; in at_xdmac_probe()
2042 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; in at_xdmac_probe()
2043 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2044 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2045 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in at_xdmac_probe()
2046 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in at_xdmac_probe()
2052 INIT_LIST_HEAD(&atxdmac->dma.channels); in at_xdmac_probe()
2054 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_probe()
2056 atchan->chan.device = &atxdmac->dma; in at_xdmac_probe()
2057 list_add_tail(&atchan->chan.device_node, in at_xdmac_probe()
2058 &atxdmac->dma.channels); in at_xdmac_probe()
2060 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); in at_xdmac_probe()
2061 atchan->mask = 1 << i; in at_xdmac_probe()
2063 spin_lock_init(&atchan->lock); in at_xdmac_probe()
2064 INIT_LIST_HEAD(&atchan->xfers_list); in at_xdmac_probe()
2065 INIT_LIST_HEAD(&atchan->free_descs_list); in at_xdmac_probe()
2066 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet); in at_xdmac_probe()
2074 ret = dma_async_device_register(&atxdmac->dma); in at_xdmac_probe()
2076 dev_err(&pdev->dev, "fail to register DMA engine device\n"); in at_xdmac_probe()
2080 ret = of_dma_controller_register(pdev->dev.of_node, in at_xdmac_probe()
2083 dev_err(&pdev->dev, "could not register of dma controller\n"); in at_xdmac_probe()
2087 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", in at_xdmac_probe()
2088 nr_channels, atxdmac->regs); in at_xdmac_probe()
2093 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_probe()
2095 clk_disable_unprepare(atxdmac->clk); in at_xdmac_probe()
2097 free_irq(atxdmac->irq, atxdmac); in at_xdmac_probe()
2107 of_dma_controller_free(pdev->dev.of_node); in at_xdmac_remove()
2108 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_remove()
2109 clk_disable_unprepare(atxdmac->clk); in at_xdmac_remove()
2111 free_irq(atxdmac->irq, atxdmac); in at_xdmac_remove()
2113 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_remove()
2114 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_remove()
2116 tasklet_kill(&atchan->tasklet); in at_xdmac_remove()
2117 at_xdmac_free_chan_resources(&atchan->chan); in at_xdmac_remove()
2130 .compatible = "atmel,sama5d4-dma",
2153 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");