Lines Matching +full:map +full:- +full:to +full:- +full:dma +full:- +full:channel
1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas R-Car Gen2/Gen3 DMA Controller Driver
5 * Copyright (C) 2014-2019 Renesas Electronics Inc.
11 #include <linux/dma-mapping.h>
28 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
43 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
56 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
57 * @async_tx: base DMA asynchronous transaction descriptor
58 * @direction: direction of the DMA transfer
60 * @chcr: value of the channel configuration register for this transfer
61 * @node: entry in the channel's descriptors lists
67 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @cyclic: when set indicates that the DMA transfer is cyclic
86 dma_addr_t dma; member
97 * struct rcar_dmac_desc_page - One page worth of descriptors
98 * @node: entry in the channel's pages list
99 * @descs: array of DMA descriptors
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
119 * struct rcar_dmac_chan_slave - Slave configuration
129 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
130 * @addr: slave dma address
141 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
142 * @chan: base DMA channel object
143 * @iomem: channel I/O memory base
144 * @index: index of this channel in the controller
145 * @irq: channel IRQ
148 * @mid_rid: hardware MID/RID for the DMA client using this channel
149 * @lock: protects the channel CHCR register and the desc members
167 struct rcar_dmac_chan_map map; member
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
190 * @engine: base DMA engine object
195 * @channels_mask: bitfield of which DMA channels are managed by this driver
213 * struct rcar_dmac_of_data - This driver's OF data
222 /* -----------------------------------------------------------------------------
296 /* Hardcode the MEMCPY transfer size to 4 bytes. */
299 /* -----------------------------------------------------------------------------
306 writew(data, dmac->iomem + reg); in rcar_dmac_write()
308 writel(data, dmac->iomem + reg); in rcar_dmac_write()
314 return readw(dmac->iomem + reg); in rcar_dmac_read()
316 return readl(dmac->iomem + reg); in rcar_dmac_read()
322 return readw(chan->iomem + reg); in rcar_dmac_chan_read()
324 return readl(chan->iomem + reg); in rcar_dmac_chan_read()
330 writew(data, chan->iomem + reg); in rcar_dmac_chan_write()
332 writel(data, chan->iomem + reg); in rcar_dmac_chan_write()
335 /* -----------------------------------------------------------------------------
348 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_start_xfer()
349 u32 chcr = desc->chcr; in rcar_dmac_chan_start_xfer()
353 if (chan->mid_rid >= 0) in rcar_dmac_chan_start_xfer()
354 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); in rcar_dmac_chan_start_xfer()
356 if (desc->hwdescs.use) { in rcar_dmac_chan_start_xfer()
358 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
361 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
363 chan->index, desc, desc->nchunks, &desc->hwdescs.dma); in rcar_dmac_chan_start_xfer()
367 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
369 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
371 desc->hwdescs.dma >> 32); in rcar_dmac_chan_start_xfer()
374 (desc->hwdescs.dma & 0xfffffff0) | in rcar_dmac_chan_start_xfer()
377 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | in rcar_dmac_chan_start_xfer()
388 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
391 * Program the descriptor stage interrupt to occur after the end in rcar_dmac_chan_start_xfer()
403 if (!desc->cyclic) in rcar_dmac_chan_start_xfer()
409 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
418 struct rcar_dmac_xfer_chunk *chunk = desc->running; in rcar_dmac_chan_start_xfer()
420 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
421 "chan%u: queue chunk %p: %u@%pad -> %pad\n", in rcar_dmac_chan_start_xfer()
422 chan->index, chunk, chunk->size, &chunk->src_addr, in rcar_dmac_chan_start_xfer()
423 &chunk->dst_addr); in rcar_dmac_chan_start_xfer()
427 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
429 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
432 chunk->src_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
434 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
436 chunk->size >> desc->xfer_shift); in rcar_dmac_chan_start_xfer()
450 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); in rcar_dmac_init()
456 dev_warn(dmac->dev, "DMAOR initialization failed.\n"); in rcar_dmac_init()
457 return -EIO; in rcar_dmac_init()
463 /* -----------------------------------------------------------------------------
469 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); in rcar_dmac_tx_submit()
474 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_tx_submit()
478 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", in rcar_dmac_tx_submit()
479 chan->index, tx->cookie, desc); in rcar_dmac_tx_submit()
481 list_add_tail(&desc->node, &chan->desc.pending); in rcar_dmac_tx_submit()
482 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
485 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_tx_submit()
490 /* -----------------------------------------------------------------------------
495 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
496 * @chan: the DMA channel
508 return -ENOMEM; in rcar_dmac_desc_alloc()
511 struct rcar_dmac_desc *desc = &page->descs[i]; in rcar_dmac_desc_alloc()
513 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc()
514 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc()
515 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
517 list_add_tail(&desc->node, &list); in rcar_dmac_desc_alloc()
520 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_alloc()
521 list_splice_tail(&list, &chan->desc.free); in rcar_dmac_desc_alloc()
522 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_desc_alloc()
523 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_alloc()
529 * rcar_dmac_desc_put - Release a DMA transfer descriptor
530 * @chan: the DMA channel
533 * Put the descriptor and its transfer chunk descriptors back in the channel's
534 * free descriptors lists. The descriptor's chunks list will be reinitialized to
537 * The descriptor must have been removed from the channel's lists before calling
545 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_put()
546 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
547 list_add(&desc->node, &chan->desc.free); in rcar_dmac_desc_put()
548 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_put()
558 * We have to temporarily move all descriptors from the wait list to a in rcar_dmac_desc_recycle_acked()
560 * list_for_each_entry_safe, isn't safe if we release the channel lock in rcar_dmac_desc_recycle_acked()
563 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
564 list_splice_init(&chan->desc.wait, &list); in rcar_dmac_desc_recycle_acked()
565 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
568 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
569 list_del(&desc->node); in rcar_dmac_desc_recycle_acked()
578 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
579 list_splice(&list, &chan->desc.wait); in rcar_dmac_desc_recycle_acked()
580 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
584 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
585 * @chan: the DMA channel
587 * Locking: This function must be called in a non-atomic context.
589 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
601 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
603 while (list_empty(&chan->desc.free)) { in rcar_dmac_desc_get()
606 * again, as someone else could race us to get the newly in rcar_dmac_desc_get()
610 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
614 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
617 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); in rcar_dmac_desc_get()
618 list_del(&desc->node); in rcar_dmac_desc_get()
620 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
626 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
627 * @chan: the DMA channel
639 return -ENOMEM; in rcar_dmac_xfer_chunk_alloc()
642 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
644 list_add_tail(&chunk->node, &list); in rcar_dmac_xfer_chunk_alloc()
647 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
648 list_splice_tail(&list, &chan->desc.chunks_free); in rcar_dmac_xfer_chunk_alloc()
649 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_xfer_chunk_alloc()
650 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
656 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
657 * @chan: the DMA channel
659 * Locking: This function must be called in a non-atomic context.
661 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
671 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
673 while (list_empty(&chan->desc.chunks_free)) { in rcar_dmac_xfer_chunk_get()
676 * again, as someone else could race us to get the newly in rcar_dmac_xfer_chunk_get()
680 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
684 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
687 chunk = list_first_entry(&chan->desc.chunks_free, in rcar_dmac_xfer_chunk_get()
689 list_del(&chunk->node); in rcar_dmac_xfer_chunk_get()
691 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
700 * dma_alloc_coherent() allocates memory in page size increments. To in rcar_dmac_realloc_hwdesc()
702 * wouldn't change align the requested size to a multiple of the page in rcar_dmac_realloc_hwdesc()
707 if (desc->hwdescs.size == size) in rcar_dmac_realloc_hwdesc()
710 if (desc->hwdescs.mem) { in rcar_dmac_realloc_hwdesc()
711 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, in rcar_dmac_realloc_hwdesc()
712 desc->hwdescs.mem, desc->hwdescs.dma); in rcar_dmac_realloc_hwdesc()
713 desc->hwdescs.mem = NULL; in rcar_dmac_realloc_hwdesc()
714 desc->hwdescs.size = 0; in rcar_dmac_realloc_hwdesc()
720 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, in rcar_dmac_realloc_hwdesc()
721 &desc->hwdescs.dma, GFP_NOWAIT); in rcar_dmac_realloc_hwdesc()
722 if (!desc->hwdescs.mem) in rcar_dmac_realloc_hwdesc()
725 desc->hwdescs.size = size; in rcar_dmac_realloc_hwdesc()
734 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); in rcar_dmac_fill_hwdesc()
736 hwdesc = desc->hwdescs.mem; in rcar_dmac_fill_hwdesc()
738 return -ENOMEM; in rcar_dmac_fill_hwdesc()
740 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
741 hwdesc->sar = chunk->src_addr; in rcar_dmac_fill_hwdesc()
742 hwdesc->dar = chunk->dst_addr; in rcar_dmac_fill_hwdesc()
743 hwdesc->tcr = chunk->size >> desc->xfer_shift; in rcar_dmac_fill_hwdesc()
750 /* -----------------------------------------------------------------------------
769 dev_err(chan->chan.device->dev, "CHCR DE check error\n"); in rcar_dmac_chcr_de_barrier()
800 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_chan_reinit()
802 /* Move all non-free descriptors to the local lists. */ in rcar_dmac_chan_reinit()
803 list_splice_init(&chan->desc.pending, &descs); in rcar_dmac_chan_reinit()
804 list_splice_init(&chan->desc.active, &descs); in rcar_dmac_chan_reinit()
805 list_splice_init(&chan->desc.done, &descs); in rcar_dmac_chan_reinit()
806 list_splice_init(&chan->desc.wait, &descs); in rcar_dmac_chan_reinit()
808 chan->desc.running = NULL; in rcar_dmac_chan_reinit()
810 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_chan_reinit()
813 list_del(&desc->node); in rcar_dmac_chan_reinit()
823 for (i = 0; i < dmac->n_channels; ++i) { in rcar_dmac_stop_all_chan()
824 struct rcar_dmac_chan *chan = &dmac->channels[i]; in rcar_dmac_stop_all_chan()
826 if (!(dmac->channels_mask & BIT(i))) in rcar_dmac_stop_all_chan()
829 /* Stop and reinitialize the channel. */ in rcar_dmac_stop_all_chan()
830 spin_lock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
832 spin_unlock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
841 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_pause()
843 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_pause()
848 /* -----------------------------------------------------------------------------
865 switch (desc->direction) { in rcar_dmac_chan_configure_desc()
869 xfer_size = chan->src.xfer_size; in rcar_dmac_chan_configure_desc()
875 xfer_size = chan->dst.xfer_size; in rcar_dmac_chan_configure_desc()
886 desc->xfer_shift = ilog2(xfer_size); in rcar_dmac_chan_configure_desc()
887 desc->chcr = chcr | chcr_ts[desc->xfer_shift]; in rcar_dmac_chan_configure_desc()
891 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
893 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
894 * converted to scatter-gather to guarantee consistent locking and a correct
895 * list manipulation. For slave DMA direction carries the usual meaning, and,
923 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
924 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
926 desc->cyclic = cyclic; in rcar_dmac_chan_prep_sg()
927 desc->direction = dir; in rcar_dmac_chan_prep_sg()
931 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; in rcar_dmac_chan_prep_sg()
935 * reference to the DMA descriptor, there's no need for locking. in rcar_dmac_chan_prep_sg()
961 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
962 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; in rcar_dmac_chan_prep_sg()
965 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
966 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; in rcar_dmac_chan_prep_sg()
978 chunk->src_addr = dev_addr; in rcar_dmac_chan_prep_sg()
979 chunk->dst_addr = mem_addr; in rcar_dmac_chan_prep_sg()
981 chunk->src_addr = mem_addr; in rcar_dmac_chan_prep_sg()
982 chunk->dst_addr = dev_addr; in rcar_dmac_chan_prep_sg()
985 chunk->size = size; in rcar_dmac_chan_prep_sg()
987 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_prep_sg()
988 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", in rcar_dmac_chan_prep_sg()
989 chan->index, chunk, desc, i, sg, size, len, in rcar_dmac_chan_prep_sg()
990 &chunk->src_addr, &chunk->dst_addr); in rcar_dmac_chan_prep_sg()
996 len -= size; in rcar_dmac_chan_prep_sg()
998 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
1003 desc->nchunks = nchunks; in rcar_dmac_chan_prep_sg()
1004 desc->size = full_size; in rcar_dmac_chan_prep_sg()
1008 * needs to be transferred (otherwise they don't make much sense). in rcar_dmac_chan_prep_sg()
1014 desc->hwdescs.use = !cross_boundary && nchunks > 1; in rcar_dmac_chan_prep_sg()
1015 if (desc->hwdescs.use) { in rcar_dmac_chan_prep_sg()
1017 desc->hwdescs.use = false; in rcar_dmac_chan_prep_sg()
1020 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1023 /* -----------------------------------------------------------------------------
1024 * DMA engine operations
1032 INIT_LIST_HEAD(&rchan->desc.chunks_free); in rcar_dmac_alloc_chan_resources()
1033 INIT_LIST_HEAD(&rchan->desc.pages); in rcar_dmac_alloc_chan_resources()
1038 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1042 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1044 return pm_runtime_get_sync(chan->device->dev); in rcar_dmac_alloc_chan_resources()
1050 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_free_chan_resources()
1051 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_free_chan_resources() local
1057 spin_lock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1059 spin_unlock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1063 * running. Wait for it to finish before freeing resources. in rcar_dmac_free_chan_resources()
1065 synchronize_irq(rchan->irq); in rcar_dmac_free_chan_resources()
1067 if (rchan->mid_rid >= 0) { in rcar_dmac_free_chan_resources()
1069 clear_bit(rchan->mid_rid, dmac->modules); in rcar_dmac_free_chan_resources()
1070 rchan->mid_rid = -EINVAL; in rcar_dmac_free_chan_resources()
1073 list_splice_init(&rchan->desc.free, &list); in rcar_dmac_free_chan_resources()
1074 list_splice_init(&rchan->desc.pending, &list); in rcar_dmac_free_chan_resources()
1075 list_splice_init(&rchan->desc.active, &list); in rcar_dmac_free_chan_resources()
1076 list_splice_init(&rchan->desc.done, &list); in rcar_dmac_free_chan_resources()
1077 list_splice_init(&rchan->desc.wait, &list); in rcar_dmac_free_chan_resources()
1079 rchan->desc.running = NULL; in rcar_dmac_free_chan_resources()
1084 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { in rcar_dmac_free_chan_resources()
1085 list_del(&page->node); in rcar_dmac_free_chan_resources()
1090 if (map->slave.xfer_size) { in rcar_dmac_free_chan_resources()
1091 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_free_chan_resources()
1092 map->slave.xfer_size, map->dir, 0); in rcar_dmac_free_chan_resources()
1093 map->slave.xfer_size = 0; in rcar_dmac_free_chan_resources()
1096 pm_runtime_put(chan->device->dev); in rcar_dmac_free_chan_resources()
1123 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_map_slave_addr() local
1129 dev_addr = rchan->src.slave_addr; in rcar_dmac_map_slave_addr()
1130 dev_size = rchan->src.xfer_size; in rcar_dmac_map_slave_addr()
1133 dev_addr = rchan->dst.slave_addr; in rcar_dmac_map_slave_addr()
1134 dev_size = rchan->dst.xfer_size; in rcar_dmac_map_slave_addr()
1138 /* Reuse current map if possible. */ in rcar_dmac_map_slave_addr()
1139 if (dev_addr == map->slave.slave_addr && in rcar_dmac_map_slave_addr()
1140 dev_size == map->slave.xfer_size && in rcar_dmac_map_slave_addr()
1141 dev_dir == map->dir) in rcar_dmac_map_slave_addr()
1145 if (map->slave.xfer_size) in rcar_dmac_map_slave_addr()
1146 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_map_slave_addr()
1147 map->slave.xfer_size, map->dir, 0); in rcar_dmac_map_slave_addr()
1148 map->slave.xfer_size = 0; in rcar_dmac_map_slave_addr()
1150 /* Create new slave address map. */ in rcar_dmac_map_slave_addr()
1151 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, in rcar_dmac_map_slave_addr()
1154 if (dma_mapping_error(chan->device->dev, map->addr)) { in rcar_dmac_map_slave_addr()
1155 dev_err(chan->device->dev, in rcar_dmac_map_slave_addr()
1156 "chan%u: failed to map %zx@%pap", rchan->index, in rcar_dmac_map_slave_addr()
1158 return -EIO; in rcar_dmac_map_slave_addr()
1161 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", in rcar_dmac_map_slave_addr()
1162 rchan->index, dev_size, &dev_addr, &map->addr, in rcar_dmac_map_slave_addr()
1165 map->slave.slave_addr = dev_addr; in rcar_dmac_map_slave_addr()
1166 map->slave.xfer_size = dev_size; in rcar_dmac_map_slave_addr()
1167 map->dir = dev_dir; in rcar_dmac_map_slave_addr()
1179 /* Someone calling slave DMA on a generic channel? */ in rcar_dmac_prep_slave_sg()
1180 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { in rcar_dmac_prep_slave_sg()
1181 dev_warn(chan->device->dev, in rcar_dmac_prep_slave_sg()
1183 __func__, sg_len, rchan->mid_rid); in rcar_dmac_prep_slave_sg()
1190 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_slave_sg()
1207 /* Someone calling slave DMA on a generic channel? */ in rcar_dmac_prep_dma_cyclic()
1208 if (rchan->mid_rid < 0 || buf_len < period_len) { in rcar_dmac_prep_dma_cyclic()
1209 dev_warn(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1211 __func__, buf_len, period_len, rchan->mid_rid); in rcar_dmac_prep_dma_cyclic()
1220 dev_err(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1222 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); in rcar_dmac_prep_dma_cyclic()
1245 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_dma_cyclic()
1259 * channel, while using it... in rcar_dmac_device_config()
1261 rchan->src.slave_addr = cfg->src_addr; in rcar_dmac_device_config()
1262 rchan->dst.slave_addr = cfg->dst_addr; in rcar_dmac_device_config()
1263 rchan->src.xfer_size = cfg->src_addr_width; in rcar_dmac_device_config()
1264 rchan->dst.xfer_size = cfg->dst_addr_width; in rcar_dmac_device_config()
1274 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1276 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1291 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_get_residue()
1305 * If the cookie corresponds to a descriptor that has been completed in rcar_dmac_chan_get_residue()
1307 * caller but without holding the channel lock, so the descriptor could in rcar_dmac_chan_get_residue()
1310 status = dma_cookie_status(&chan->chan, cookie, NULL); in rcar_dmac_chan_get_residue()
1315 * If the cookie doesn't correspond to the currently running transfer in rcar_dmac_chan_get_residue()
1317 * equal to the full descriptor size. in rcar_dmac_chan_get_residue()
1318 * Also, a client driver is possible to call this function before in rcar_dmac_chan_get_residue()
1324 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
1325 list_for_each_entry(desc, &chan->desc.done, node) { in rcar_dmac_chan_get_residue()
1326 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1329 list_for_each_entry(desc, &chan->desc.pending, node) { in rcar_dmac_chan_get_residue()
1330 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1331 return desc->size; in rcar_dmac_chan_get_residue()
1333 list_for_each_entry(desc, &chan->desc.active, node) { in rcar_dmac_chan_get_residue()
1334 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1335 return desc->size; in rcar_dmac_chan_get_residue()
1348 * We need to read two registers. in rcar_dmac_chan_get_residue()
1349 * Make sure the control register does not skip to next chunk in rcar_dmac_chan_get_residue()
1368 * descriptor pointer field in the CHCRB register. In non-descriptor in rcar_dmac_chan_get_residue()
1371 if (desc->hwdescs.use) { in rcar_dmac_chan_get_residue()
1374 dptr = desc->nchunks; in rcar_dmac_chan_get_residue()
1375 dptr--; in rcar_dmac_chan_get_residue()
1376 WARN_ON(dptr >= desc->nchunks); in rcar_dmac_chan_get_residue()
1378 running = desc->running; in rcar_dmac_chan_get_residue()
1381 /* Compute the size of all chunks still to be transferred. */ in rcar_dmac_chan_get_residue()
1382 list_for_each_entry_reverse(chunk, &desc->chunks, node) { in rcar_dmac_chan_get_residue()
1383 if (chunk == running || ++dptr == desc->nchunks) in rcar_dmac_chan_get_residue()
1386 residue += chunk->size; in rcar_dmac_chan_get_residue()
1390 residue += tcrb << desc->xfer_shift; in rcar_dmac_chan_get_residue()
1409 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_tx_status()
1411 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; in rcar_dmac_tx_status()
1412 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_tx_status()
1428 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_issue_pending()
1430 if (list_empty(&rchan->desc.pending)) in rcar_dmac_issue_pending()
1433 /* Append the pending list to the active list. */ in rcar_dmac_issue_pending()
1434 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); in rcar_dmac_issue_pending()
1440 if (!rchan->desc.running) { in rcar_dmac_issue_pending()
1443 desc = list_first_entry(&rchan->desc.active, in rcar_dmac_issue_pending()
1445 rchan->desc.running = desc; in rcar_dmac_issue_pending()
1451 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_issue_pending()
1458 synchronize_irq(rchan->irq); in rcar_dmac_device_synchronize()
1461 /* -----------------------------------------------------------------------------
1467 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_desc_stage_end()
1470 if (WARN_ON(!desc || !desc->cyclic)) { in rcar_dmac_isr_desc_stage_end()
1479 /* Program the interrupt pointer to the next stage. */ in rcar_dmac_isr_desc_stage_end()
1489 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_transfer_end()
1504 * non-descriptor mode. in rcar_dmac_isr_transfer_end()
1506 if (!desc->hwdescs.use) { in rcar_dmac_isr_transfer_end()
1509 * to the next one. Only wake the IRQ thread if the transfer is in rcar_dmac_isr_transfer_end()
1512 if (!list_is_last(&desc->running->node, &desc->chunks)) { in rcar_dmac_isr_transfer_end()
1513 desc->running = list_next_entry(desc->running, node); in rcar_dmac_isr_transfer_end()
1514 if (!desc->cyclic) in rcar_dmac_isr_transfer_end()
1521 * cyclic, move back to the first one. in rcar_dmac_isr_transfer_end()
1523 if (desc->cyclic) { in rcar_dmac_isr_transfer_end()
1524 desc->running = in rcar_dmac_isr_transfer_end()
1525 list_first_entry(&desc->chunks, in rcar_dmac_isr_transfer_end()
1532 /* The descriptor is complete, move it to the done list. */ in rcar_dmac_isr_transfer_end()
1533 list_move_tail(&desc->node, &chan->desc.done); in rcar_dmac_isr_transfer_end()
1536 if (!list_empty(&chan->desc.active)) in rcar_dmac_isr_transfer_end()
1537 chan->desc.running = list_first_entry(&chan->desc.active, in rcar_dmac_isr_transfer_end()
1541 chan->desc.running = NULL; in rcar_dmac_isr_transfer_end()
1544 if (chan->desc.running) in rcar_dmac_isr_transfer_end()
1558 spin_lock(&chan->lock); in rcar_dmac_isr_channel()
1562 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); in rcar_dmac_isr_channel()
1565 * We don't need to call rcar_dmac_chan_halt() in rcar_dmac_isr_channel()
1566 * because channel is already stopped in error case. in rcar_dmac_isr_channel()
1567 * We need to clear register and check DE bit as recovery. in rcar_dmac_isr_channel()
1569 rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index); in rcar_dmac_isr_channel()
1588 spin_unlock(&chan->lock); in rcar_dmac_isr_channel()
1591 dev_err(chan->chan.device->dev, "Channel Address Error\n"); in rcar_dmac_isr_channel()
1606 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1609 if (chan->desc.running && chan->desc.running->cyclic) { in rcar_dmac_isr_channel_thread()
1610 desc = chan->desc.running; in rcar_dmac_isr_channel_thread()
1611 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1614 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1616 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1622 * move them to the ack wait list. in rcar_dmac_isr_channel_thread()
1624 while (!list_empty(&chan->desc.done)) { in rcar_dmac_isr_channel_thread()
1625 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, in rcar_dmac_isr_channel_thread()
1627 dma_cookie_complete(&desc->async_tx); in rcar_dmac_isr_channel_thread()
1628 list_del(&desc->node); in rcar_dmac_isr_channel_thread()
1630 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1632 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1634 * We own the only reference to this descriptor, we can in rcar_dmac_isr_channel_thread()
1635 * safely dereference it without holding the channel in rcar_dmac_isr_channel_thread()
1639 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1642 list_add_tail(&desc->node, &chan->desc.wait); in rcar_dmac_isr_channel_thread()
1645 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1653 /* -----------------------------------------------------------------------------
1654 * OF xlate and channel filter
1659 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_chan_filter()
1664 * function knows from which device it wants to allocate a channel from, in rcar_dmac_chan_filter()
1665 * and would be perfectly capable of selecting the channel it wants. in rcar_dmac_chan_filter()
1666 * Forcing it to call dma_request_channel() and iterate through all in rcar_dmac_chan_filter()
1669 if (chan->device->device_config != rcar_dmac_device_config) in rcar_dmac_chan_filter()
1672 return !test_and_set_bit(dma_spec->args[0], dmac->modules); in rcar_dmac_chan_filter()
1682 if (dma_spec->args_count != 1) in rcar_dmac_of_xlate()
1685 /* Only slave DMA channels can be allocated via DT */ in rcar_dmac_of_xlate()
1690 ofdma->of_node); in rcar_dmac_of_xlate()
1695 rchan->mid_rid = dma_spec->args[0]; in rcar_dmac_of_xlate()
1700 /* -----------------------------------------------------------------------------
1721 * - Wait for the current transfer to complete and stop the device,
1722 * - Resume transfers, if any.
1730 /* -----------------------------------------------------------------------------
1739 struct platform_device *pdev = to_platform_device(dmac->dev); in rcar_dmac_chan_probe()
1740 struct dma_chan *chan = &rchan->chan; in rcar_dmac_chan_probe()
1745 rchan->index = index; in rcar_dmac_chan_probe()
1746 rchan->iomem = dmac->iomem + data->chan_offset_base + in rcar_dmac_chan_probe()
1747 data->chan_offset_stride * index; in rcar_dmac_chan_probe()
1748 rchan->mid_rid = -EINVAL; in rcar_dmac_chan_probe()
1750 spin_lock_init(&rchan->lock); in rcar_dmac_chan_probe()
1752 INIT_LIST_HEAD(&rchan->desc.free); in rcar_dmac_chan_probe()
1753 INIT_LIST_HEAD(&rchan->desc.pending); in rcar_dmac_chan_probe()
1754 INIT_LIST_HEAD(&rchan->desc.active); in rcar_dmac_chan_probe()
1755 INIT_LIST_HEAD(&rchan->desc.done); in rcar_dmac_chan_probe()
1756 INIT_LIST_HEAD(&rchan->desc.wait); in rcar_dmac_chan_probe()
1758 /* Request the channel interrupt. */ in rcar_dmac_chan_probe()
1760 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); in rcar_dmac_chan_probe()
1761 if (rchan->irq < 0) in rcar_dmac_chan_probe()
1762 return -ENODEV; in rcar_dmac_chan_probe()
1764 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", in rcar_dmac_chan_probe()
1765 dev_name(dmac->dev), index); in rcar_dmac_chan_probe()
1767 return -ENOMEM; in rcar_dmac_chan_probe()
1770 * Initialize the DMA engine channel and add it to the DMA engine in rcar_dmac_chan_probe()
1773 chan->device = &dmac->engine; in rcar_dmac_chan_probe()
1776 list_add_tail(&chan->device_node, &dmac->engine.channels); in rcar_dmac_chan_probe()
1778 ret = devm_request_threaded_irq(dmac->dev, rchan->irq, in rcar_dmac_chan_probe()
1783 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", in rcar_dmac_chan_probe()
1784 rchan->irq, ret); in rcar_dmac_chan_probe()
1795 struct device_node *np = dev->of_node; in rcar_dmac_parse_of()
1798 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); in rcar_dmac_parse_of()
1800 dev_err(dev, "unable to read dma-channels property\n"); in rcar_dmac_parse_of()
1805 if (dmac->n_channels <= 0 || in rcar_dmac_parse_of()
1806 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { in rcar_dmac_parse_of()
1808 dmac->n_channels); in rcar_dmac_parse_of()
1809 return -EINVAL; in rcar_dmac_parse_of()
1813 * If the driver is unable to read dma-channel-mask property, in rcar_dmac_parse_of()
1816 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); in rcar_dmac_parse_of()
1817 of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); in rcar_dmac_parse_of()
1819 /* If the property has out-of-channel mask, this driver clears it */ in rcar_dmac_parse_of()
1820 dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); in rcar_dmac_parse_of()
1837 data = of_device_get_match_data(&pdev->dev); in rcar_dmac_probe()
1839 return -EINVAL; in rcar_dmac_probe()
1841 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); in rcar_dmac_probe()
1843 return -ENOMEM; in rcar_dmac_probe()
1845 dmac->dev = &pdev->dev; in rcar_dmac_probe()
1847 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); in rcar_dmac_probe()
1848 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); in rcar_dmac_probe()
1850 ret = rcar_dmac_parse_of(&pdev->dev, dmac); in rcar_dmac_probe()
1855 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be in rcar_dmac_probe()
1856 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 in rcar_dmac_probe()
1857 * is connected to microTLB 0 on currently supported platforms, so we in rcar_dmac_probe()
1859 * level we can't disable it selectively, so ignore channel 0 for now if in rcar_dmac_probe()
1862 if (device_iommu_mapped(&pdev->dev)) in rcar_dmac_probe()
1863 dmac->channels_mask &= ~BIT(0); in rcar_dmac_probe()
1865 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, in rcar_dmac_probe()
1866 sizeof(*dmac->channels), GFP_KERNEL); in rcar_dmac_probe()
1867 if (!dmac->channels) in rcar_dmac_probe()
1868 return -ENOMEM; in rcar_dmac_probe()
1871 dmac->iomem = devm_platform_ioremap_resource(pdev, 0); in rcar_dmac_probe()
1872 if (IS_ERR(dmac->iomem)) in rcar_dmac_probe()
1873 return PTR_ERR(dmac->iomem); in rcar_dmac_probe()
1876 pm_runtime_enable(&pdev->dev); in rcar_dmac_probe()
1877 ret = pm_runtime_get_sync(&pdev->dev); in rcar_dmac_probe()
1879 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); in rcar_dmac_probe()
1884 pm_runtime_put(&pdev->dev); in rcar_dmac_probe()
1887 dev_err(&pdev->dev, "failed to reset device\n"); in rcar_dmac_probe()
1892 engine = &dmac->engine; in rcar_dmac_probe()
1894 dma_cap_set(DMA_MEMCPY, engine->cap_mask); in rcar_dmac_probe()
1895 dma_cap_set(DMA_SLAVE, engine->cap_mask); in rcar_dmac_probe()
1897 engine->dev = &pdev->dev; in rcar_dmac_probe()
1898 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); in rcar_dmac_probe()
1900 engine->src_addr_widths = widths; in rcar_dmac_probe()
1901 engine->dst_addr_widths = widths; in rcar_dmac_probe()
1902 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); in rcar_dmac_probe()
1903 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in rcar_dmac_probe()
1905 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; in rcar_dmac_probe()
1906 engine->device_free_chan_resources = rcar_dmac_free_chan_resources; in rcar_dmac_probe()
1907 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; in rcar_dmac_probe()
1908 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; in rcar_dmac_probe()
1909 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; in rcar_dmac_probe()
1910 engine->device_config = rcar_dmac_device_config; in rcar_dmac_probe()
1911 engine->device_pause = rcar_dmac_chan_pause; in rcar_dmac_probe()
1912 engine->device_terminate_all = rcar_dmac_chan_terminate_all; in rcar_dmac_probe()
1913 engine->device_tx_status = rcar_dmac_tx_status; in rcar_dmac_probe()
1914 engine->device_issue_pending = rcar_dmac_issue_pending; in rcar_dmac_probe()
1915 engine->device_synchronize = rcar_dmac_device_synchronize; in rcar_dmac_probe()
1917 INIT_LIST_HEAD(&engine->channels); in rcar_dmac_probe()
1919 for (i = 0; i < dmac->n_channels; ++i) { in rcar_dmac_probe()
1920 if (!(dmac->channels_mask & BIT(i))) in rcar_dmac_probe()
1923 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i); in rcar_dmac_probe()
1928 /* Register the DMAC as a DMA provider for DT. */ in rcar_dmac_probe()
1929 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, in rcar_dmac_probe()
1935 * Register the DMA engine device. in rcar_dmac_probe()
1937 * Default transfer size of 32 bytes requires 32-byte alignment. in rcar_dmac_probe()
1946 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_probe()
1947 pm_runtime_disable(&pdev->dev); in rcar_dmac_probe()
1955 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_remove()
1956 dma_async_device_unregister(&dmac->engine); in rcar_dmac_remove()
1958 pm_runtime_disable(&pdev->dev); in rcar_dmac_remove()
1977 .compatible = "renesas,rcar-dmac",
1987 .name = "rcar-dmac",
1997 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");