Lines Matching +full:control +full:- +full:channel
1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
14 * This driver is based on dw_dmac and amba-pl08x drivers.
26 #include <linux/platform_data/dma-ep93xx.h>
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
120 * @node: link used for putting this into a channel queue
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
141 * @flags: flags for the channel
149 * @runtime_ctrl: M2M runtime values for the control register.
161 * necessary channel configuration information. For memcpy channels this must
174 /* Channel is configured for cyclic transfers */
187 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
190 * @hw_setup: method which sets the channel up for operation
191 * @hw_synchronize: synchronizes DMA channel termination to current context
192 * @hw_shutdown: shuts the channel down and flushes whatever is left
200 * different on M2M and M2P channels. These methods are called with channel
221 return &edmac->chan.dev->device; in chan2dev()
230 * ep93xx_dma_set_active - set new active descriptor chain
231 * @edmac: channel
238 * Called with @edmac->lock held and interrupts disabled.
243 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_set_active()
245 list_add_tail(&desc->node, &edmac->active); in ep93xx_dma_set_active()
247 /* Flatten the @desc->tx_list chain into @edmac->active list */ in ep93xx_dma_set_active()
248 while (!list_empty(&desc->tx_list)) { in ep93xx_dma_set_active()
249 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, in ep93xx_dma_set_active()
258 d->txd.callback = desc->txd.callback; in ep93xx_dma_set_active()
259 d->txd.callback_param = desc->txd.callback_param; in ep93xx_dma_set_active()
261 list_move_tail(&d->node, &edmac->active); in ep93xx_dma_set_active()
265 /* Called with @edmac->lock held and interrupts disabled */
269 return list_first_entry_or_null(&edmac->active, in ep93xx_dma_get_active()
274 * ep93xx_dma_advance_active - advances to the next active descriptor
275 * @edmac: channel
277 * Function advances active descriptor to the next in the @edmac->active and
281 * When the channel is in cyclic mode always returns %true.
283 * Called with @edmac->lock held and interrupts disabled.
289 list_rotate_left(&edmac->active); in ep93xx_dma_advance_active()
291 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_advance_active()
302 return !desc->txd.cookie; in ep93xx_dma_advance_active()
309 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) in m2p_set_control() argument
311 writel(control, edmac->regs + M2P_CONTROL); in m2p_set_control()
314 * write to the control register. in m2p_set_control()
316 readl(edmac->regs + M2P_CONTROL); in m2p_set_control()
321 struct ep93xx_dma_data *data = edmac->chan.private; in m2p_hw_setup()
322 u32 control; in m2p_hw_setup() local
324 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); in m2p_hw_setup()
326 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE in m2p_hw_setup()
328 m2p_set_control(edmac, control); in m2p_hw_setup()
330 edmac->buffer = 0; in m2p_hw_setup()
337 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; in m2p_channel_state()
343 u32 control; in m2p_hw_synchronize() local
345 spin_lock_irqsave(&edmac->lock, flags); in m2p_hw_synchronize()
346 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_synchronize()
347 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); in m2p_hw_synchronize()
348 m2p_set_control(edmac, control); in m2p_hw_synchronize()
349 spin_unlock_irqrestore(&edmac->lock, flags); in m2p_hw_synchronize()
374 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) in m2p_fill_desc()
375 bus_addr = desc->src_addr; in m2p_fill_desc()
377 bus_addr = desc->dst_addr; in m2p_fill_desc()
379 if (edmac->buffer == 0) { in m2p_fill_desc()
380 writel(desc->size, edmac->regs + M2P_MAXCNT0); in m2p_fill_desc()
381 writel(bus_addr, edmac->regs + M2P_BASE0); in m2p_fill_desc()
383 writel(desc->size, edmac->regs + M2P_MAXCNT1); in m2p_fill_desc()
384 writel(bus_addr, edmac->regs + M2P_BASE1); in m2p_fill_desc()
387 edmac->buffer ^= 1; in m2p_fill_desc()
392 u32 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_submit() local
395 control |= M2P_CONTROL_STALLINT; in m2p_hw_submit()
399 control |= M2P_CONTROL_NFBINT; in m2p_hw_submit()
402 m2p_set_control(edmac, control); in m2p_hw_submit()
407 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
408 u32 control; in m2p_hw_interrupt() local
414 writel(1, edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
430 desc->txd.cookie, desc->src_addr, desc->dst_addr, in m2p_hw_interrupt()
431 desc->size); in m2p_hw_interrupt()
448 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_interrupt()
449 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); in m2p_hw_interrupt()
450 m2p_set_control(edmac, control); in m2p_hw_interrupt()
461 const struct ep93xx_dma_data *data = edmac->chan.private; in m2m_hw_setup()
462 u32 control = 0; in m2m_hw_setup() local
465 /* This is memcpy channel, nothing to configure */ in m2m_hw_setup()
466 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
470 switch (data->port) { in m2m_hw_setup()
473 * This was found via experimenting - anything less than 5 in m2m_hw_setup()
474 * causes the channel to perform only a partial transfer which in m2m_hw_setup()
477 control = (5 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
478 control |= M2M_CONTROL_NO_HDSK; in m2m_hw_setup()
480 if (data->direction == DMA_MEM_TO_DEV) { in m2m_hw_setup()
481 control |= M2M_CONTROL_DAH; in m2m_hw_setup()
482 control |= M2M_CONTROL_TM_TX; in m2m_hw_setup()
483 control |= M2M_CONTROL_RSS_SSPTX; in m2m_hw_setup()
485 control |= M2M_CONTROL_SAH; in m2m_hw_setup()
486 control |= M2M_CONTROL_TM_RX; in m2m_hw_setup()
487 control |= M2M_CONTROL_RSS_SSPRX; in m2m_hw_setup()
496 if (data->direction == DMA_MEM_TO_DEV) { in m2m_hw_setup()
498 control = (3 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
499 control |= M2M_CONTROL_DAH; in m2m_hw_setup()
500 control |= M2M_CONTROL_TM_TX; in m2m_hw_setup()
502 control = (2 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
503 control |= M2M_CONTROL_SAH; in m2m_hw_setup()
504 control |= M2M_CONTROL_TM_RX; in m2m_hw_setup()
507 control |= M2M_CONTROL_NO_HDSK; in m2m_hw_setup()
508 control |= M2M_CONTROL_RSS_IDE; in m2m_hw_setup()
509 control |= M2M_CONTROL_PW_16; in m2m_hw_setup()
513 return -EINVAL; in m2m_hw_setup()
516 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
522 /* Just disable the channel */ in m2m_hw_shutdown()
523 writel(0, edmac->regs + M2M_CONTROL); in m2m_hw_shutdown()
536 if (edmac->buffer == 0) { in m2m_fill_desc()
537 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); in m2m_fill_desc()
538 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); in m2m_fill_desc()
539 writel(desc->size, edmac->regs + M2M_BCR0); in m2m_fill_desc()
541 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); in m2m_fill_desc()
542 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); in m2m_fill_desc()
543 writel(desc->size, edmac->regs + M2M_BCR1); in m2m_fill_desc()
546 edmac->buffer ^= 1; in m2m_fill_desc()
551 struct ep93xx_dma_data *data = edmac->chan.private; in m2m_hw_submit()
552 u32 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_submit() local
559 control &= ~M2M_CONTROL_PW_MASK; in m2m_hw_submit()
560 control |= edmac->runtime_ctrl; in m2m_hw_submit()
563 control |= M2M_CONTROL_DONEINT; in m2m_hw_submit()
567 control |= M2M_CONTROL_NFBINT; in m2m_hw_submit()
571 * Now we can finally enable the channel. For M2M channel this must be in m2m_hw_submit()
574 control |= M2M_CONTROL_ENABLE; in m2m_hw_submit()
575 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
582 control |= M2M_CONTROL_START; in m2m_hw_submit()
583 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
590 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
591 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
592 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
593 * In effect, disabling the channel when only DONE bit is set could stop
595 * Control FSM to check current state of DMA channel.
599 u32 status = readl(edmac->regs + M2M_STATUS); in m2m_hw_interrupt()
604 u32 control; in m2m_hw_interrupt() local
608 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) in m2m_hw_interrupt()
613 writel(0, edmac->regs + M2M_INTERRUPT); in m2m_hw_interrupt()
618 * with DMA channel state, determines action to take in interrupt. in m2m_hw_interrupt()
621 last_done = !desc || desc->txd.cookie; in m2m_hw_interrupt()
624 * Use M2M DMA Buffer FSM and Control FSM to check current state of in m2m_hw_interrupt()
625 * DMA channel. Using DONE and NFB bits from channel status register in m2m_hw_interrupt()
626 * or bits from channel interrupt register is not reliable. in m2m_hw_interrupt()
634 * disabling the channel or polling the DONE bit. in m2m_hw_interrupt()
639 if (done && !edmac->chan.private) { in m2m_hw_interrupt()
640 /* Software trigger for memcpy channel */ in m2m_hw_interrupt()
641 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
642 control |= M2M_CONTROL_START; in m2m_hw_interrupt()
643 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
652 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state in m2m_hw_interrupt()
653 * and Control FSM is in DMA_STALL state. in m2m_hw_interrupt()
658 /* Disable interrupts and the channel */ in m2m_hw_interrupt()
659 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
660 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT in m2m_hw_interrupt()
662 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
683 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_get()
684 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { in ep93xx_dma_desc_get()
685 if (async_tx_test_ack(&desc->txd)) { in ep93xx_dma_desc_get()
686 list_del_init(&desc->node); in ep93xx_dma_desc_get()
688 /* Re-initialize the descriptor */ in ep93xx_dma_desc_get()
689 desc->src_addr = 0; in ep93xx_dma_desc_get()
690 desc->dst_addr = 0; in ep93xx_dma_desc_get()
691 desc->size = 0; in ep93xx_dma_desc_get()
692 desc->complete = false; in ep93xx_dma_desc_get()
693 desc->txd.cookie = 0; in ep93xx_dma_desc_get()
694 desc->txd.callback = NULL; in ep93xx_dma_desc_get()
695 desc->txd.callback_param = NULL; in ep93xx_dma_desc_get()
701 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_get()
711 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_put()
712 list_splice_init(&desc->tx_list, &edmac->free_list); in ep93xx_dma_desc_put()
713 list_add(&desc->node, &edmac->free_list); in ep93xx_dma_desc_put()
714 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_put()
719 * ep93xx_dma_advance_work - start processing the next pending transaction
720 * @edmac: channel
723 * function takes the next queued transaction from the @edmac->queue and
731 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_advance_work()
732 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { in ep93xx_dma_advance_work()
733 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
738 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); in ep93xx_dma_advance_work()
739 list_del_init(&new->node); in ep93xx_dma_advance_work()
744 edmac->edma->hw_submit(edmac); in ep93xx_dma_advance_work()
745 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
756 spin_lock_irq(&edmac->lock); in ep93xx_dma_tasklet()
764 if (desc->complete) { in ep93xx_dma_tasklet()
766 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_tasklet()
767 dma_cookie_complete(&desc->txd); in ep93xx_dma_tasklet()
768 list_splice_init(&edmac->active, &list); in ep93xx_dma_tasklet()
770 dmaengine_desc_get_callback(&desc->txd, &cb); in ep93xx_dma_tasklet()
772 spin_unlock_irq(&edmac->lock); in ep93xx_dma_tasklet()
779 dma_descriptor_unmap(&desc->txd); in ep93xx_dma_tasklet()
792 spin_lock(&edmac->lock); in ep93xx_dma_interrupt()
798 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
802 switch (edmac->edma->hw_interrupt(edmac)) { in ep93xx_dma_interrupt()
804 desc->complete = true; in ep93xx_dma_interrupt()
805 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
809 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_interrupt()
810 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
819 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
824 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
833 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); in ep93xx_dma_tx_submit()
838 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_tx_submit()
848 if (list_empty(&edmac->active)) { in ep93xx_dma_tx_submit()
850 edmac->edma->hw_submit(edmac); in ep93xx_dma_tx_submit()
852 list_add_tail(&desc->node, &edmac->queue); in ep93xx_dma_tx_submit()
855 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_tx_submit()
860 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
861 * @chan: channel to allocate resources
863 * Function allocates necessary resources for the given DMA channel and
864 * returns number of allocated descriptors for the channel. Negative errno
870 struct ep93xx_dma_data *data = chan->private; in ep93xx_dma_alloc_chan_resources()
874 /* Sanity check the channel parameters */ in ep93xx_dma_alloc_chan_resources()
875 if (!edmac->edma->m2m) { in ep93xx_dma_alloc_chan_resources()
877 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
878 if (data->port < EP93XX_DMA_I2S1 || in ep93xx_dma_alloc_chan_resources()
879 data->port > EP93XX_DMA_IRDA) in ep93xx_dma_alloc_chan_resources()
880 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
881 if (data->direction != ep93xx_dma_chan_direction(chan)) in ep93xx_dma_alloc_chan_resources()
882 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
885 switch (data->port) { in ep93xx_dma_alloc_chan_resources()
888 if (!is_slave_direction(data->direction)) in ep93xx_dma_alloc_chan_resources()
889 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
892 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
897 if (data && data->name) in ep93xx_dma_alloc_chan_resources()
898 name = data->name; in ep93xx_dma_alloc_chan_resources()
900 ret = clk_prepare_enable(edmac->clk); in ep93xx_dma_alloc_chan_resources()
904 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); in ep93xx_dma_alloc_chan_resources()
908 spin_lock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
909 dma_cookie_init(&edmac->chan); in ep93xx_dma_alloc_chan_resources()
910 ret = edmac->edma->hw_setup(edmac); in ep93xx_dma_alloc_chan_resources()
911 spin_unlock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
925 INIT_LIST_HEAD(&desc->tx_list); in ep93xx_dma_alloc_chan_resources()
927 dma_async_tx_descriptor_init(&desc->txd, chan); in ep93xx_dma_alloc_chan_resources()
928 desc->txd.flags = DMA_CTRL_ACK; in ep93xx_dma_alloc_chan_resources()
929 desc->txd.tx_submit = ep93xx_dma_tx_submit; in ep93xx_dma_alloc_chan_resources()
937 free_irq(edmac->irq, edmac); in ep93xx_dma_alloc_chan_resources()
939 clk_disable_unprepare(edmac->clk); in ep93xx_dma_alloc_chan_resources()
945 * ep93xx_dma_free_chan_resources - release resources for the channel
946 * @chan: channel
948 * Function releases all the resources allocated for the given channel.
949 * The channel must be idle when this is called.
958 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_free_chan_resources()
959 BUG_ON(!list_empty(&edmac->queue)); in ep93xx_dma_free_chan_resources()
961 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
962 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_free_chan_resources()
963 edmac->runtime_addr = 0; in ep93xx_dma_free_chan_resources()
964 edmac->runtime_ctrl = 0; in ep93xx_dma_free_chan_resources()
965 edmac->buffer = 0; in ep93xx_dma_free_chan_resources()
966 list_splice_init(&edmac->free_list, &list); in ep93xx_dma_free_chan_resources()
967 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
972 clk_disable_unprepare(edmac->clk); in ep93xx_dma_free_chan_resources()
973 free_irq(edmac->irq, edmac); in ep93xx_dma_free_chan_resources()
977 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
978 * @chan: channel
1002 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); in ep93xx_dma_prep_dma_memcpy()
1004 desc->src_addr = src + offset; in ep93xx_dma_prep_dma_memcpy()
1005 desc->dst_addr = dest + offset; in ep93xx_dma_prep_dma_memcpy()
1006 desc->size = bytes; in ep93xx_dma_prep_dma_memcpy()
1011 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_memcpy()
1014 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_memcpy()
1015 first->txd.flags = flags; in ep93xx_dma_prep_dma_memcpy()
1017 return &first->txd; in ep93xx_dma_prep_dma_memcpy()
1024 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1025 * @chan: channel
1044 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_slave_sg()
1046 "channel was configured with different direction\n"); in ep93xx_dma_prep_slave_sg()
1050 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_slave_sg()
1052 "channel is already used for cyclic transfers\n"); in ep93xx_dma_prep_slave_sg()
1056 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_slave_sg()
1075 desc->src_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1076 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1078 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1079 desc->dst_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1081 desc->size = len; in ep93xx_dma_prep_slave_sg()
1086 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_slave_sg()
1089 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_slave_sg()
1090 first->txd.flags = flags; in ep93xx_dma_prep_slave_sg()
1092 return &first->txd; in ep93xx_dma_prep_slave_sg()
1100 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1101 * @chan: channel
1112 * channel.
1125 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_dma_cyclic()
1127 "channel was configured with different direction\n"); in ep93xx_dma_prep_dma_cyclic()
1131 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_dma_cyclic()
1133 "channel is already used for cyclic transfers\n"); in ep93xx_dma_prep_dma_cyclic()
1143 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_dma_cyclic()
1155 desc->src_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1156 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1158 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1159 desc->dst_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1162 desc->size = period_len; in ep93xx_dma_prep_dma_cyclic()
1167 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_cyclic()
1170 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_cyclic()
1172 return &first->txd; in ep93xx_dma_prep_dma_cyclic()
1180 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1182 * @chan: channel
1184 * Synchronizes the DMA channel termination to the current context. When this
1195 if (edmac->edma->hw_synchronize) in ep93xx_dma_synchronize()
1196 edmac->edma->hw_synchronize(edmac); in ep93xx_dma_synchronize()
1200 * ep93xx_dma_terminate_all - terminate all transactions
1201 * @chan: channel
1204 * @edmac->free_list and callbacks are _not_ called.
1213 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1214 /* First we disable and flush the DMA channel */ in ep93xx_dma_terminate_all()
1215 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_terminate_all()
1216 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); in ep93xx_dma_terminate_all()
1217 list_splice_init(&edmac->active, &list); in ep93xx_dma_terminate_all()
1218 list_splice_init(&edmac->queue, &list); in ep93xx_dma_terminate_all()
1220 * We then re-enable the channel. This way we can continue submitting in ep93xx_dma_terminate_all()
1221 * the descriptors by just calling ->hw_submit() again. in ep93xx_dma_terminate_all()
1223 edmac->edma->hw_setup(edmac); in ep93xx_dma_terminate_all()
1224 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1237 memcpy(&edmac->slave_config, config, sizeof(*config)); in ep93xx_dma_slave_config()
1251 if (!edmac->edma->m2m) in ep93xx_dma_slave_config_write()
1252 return -EINVAL; in ep93xx_dma_slave_config_write()
1256 width = config->src_addr_width; in ep93xx_dma_slave_config_write()
1257 addr = config->src_addr; in ep93xx_dma_slave_config_write()
1261 width = config->dst_addr_width; in ep93xx_dma_slave_config_write()
1262 addr = config->dst_addr; in ep93xx_dma_slave_config_write()
1266 return -EINVAL; in ep93xx_dma_slave_config_write()
1280 return -EINVAL; in ep93xx_dma_slave_config_write()
1283 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1284 edmac->runtime_addr = addr; in ep93xx_dma_slave_config_write()
1285 edmac->runtime_ctrl = ctrl; in ep93xx_dma_slave_config_write()
1286 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1292 * ep93xx_dma_tx_status - check if a transaction is completed
1293 * @chan: channel
1307 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1308 * @chan: channel
1320 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); in ep93xx_dma_probe()
1326 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); in ep93xx_dma_probe()
1329 return -ENOMEM; in ep93xx_dma_probe()
1331 dma_dev = &edma->dma_dev; in ep93xx_dma_probe()
1332 edma->m2m = platform_get_device_id(pdev)->driver_data; in ep93xx_dma_probe()
1333 edma->num_channels = pdata->num_channels; in ep93xx_dma_probe()
1335 INIT_LIST_HEAD(&dma_dev->channels); in ep93xx_dma_probe()
1336 for (i = 0; i < pdata->num_channels; i++) { in ep93xx_dma_probe()
1337 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; in ep93xx_dma_probe()
1338 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_probe()
1340 edmac->chan.device = dma_dev; in ep93xx_dma_probe()
1341 edmac->regs = cdata->base; in ep93xx_dma_probe()
1342 edmac->irq = cdata->irq; in ep93xx_dma_probe()
1343 edmac->edma = edma; in ep93xx_dma_probe()
1345 edmac->clk = clk_get(NULL, cdata->name); in ep93xx_dma_probe()
1346 if (IS_ERR(edmac->clk)) { in ep93xx_dma_probe()
1347 dev_warn(&pdev->dev, "failed to get clock for %s\n", in ep93xx_dma_probe()
1348 cdata->name); in ep93xx_dma_probe()
1352 spin_lock_init(&edmac->lock); in ep93xx_dma_probe()
1353 INIT_LIST_HEAD(&edmac->active); in ep93xx_dma_probe()
1354 INIT_LIST_HEAD(&edmac->queue); in ep93xx_dma_probe()
1355 INIT_LIST_HEAD(&edmac->free_list); in ep93xx_dma_probe()
1356 tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet); in ep93xx_dma_probe()
1358 list_add_tail(&edmac->chan.device_node, in ep93xx_dma_probe()
1359 &dma_dev->channels); in ep93xx_dma_probe()
1362 dma_cap_zero(dma_dev->cap_mask); in ep93xx_dma_probe()
1363 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in ep93xx_dma_probe()
1364 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); in ep93xx_dma_probe()
1366 dma_dev->dev = &pdev->dev; in ep93xx_dma_probe()
1367 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; in ep93xx_dma_probe()
1368 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; in ep93xx_dma_probe()
1369 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; in ep93xx_dma_probe()
1370 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; in ep93xx_dma_probe()
1371 dma_dev->device_config = ep93xx_dma_slave_config; in ep93xx_dma_probe()
1372 dma_dev->device_synchronize = ep93xx_dma_synchronize; in ep93xx_dma_probe()
1373 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; in ep93xx_dma_probe()
1374 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; in ep93xx_dma_probe()
1375 dma_dev->device_tx_status = ep93xx_dma_tx_status; in ep93xx_dma_probe()
1377 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); in ep93xx_dma_probe()
1379 if (edma->m2m) { in ep93xx_dma_probe()
1380 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in ep93xx_dma_probe()
1381 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; in ep93xx_dma_probe()
1383 edma->hw_setup = m2m_hw_setup; in ep93xx_dma_probe()
1384 edma->hw_shutdown = m2m_hw_shutdown; in ep93xx_dma_probe()
1385 edma->hw_submit = m2m_hw_submit; in ep93xx_dma_probe()
1386 edma->hw_interrupt = m2m_hw_interrupt; in ep93xx_dma_probe()
1388 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); in ep93xx_dma_probe()
1390 edma->hw_synchronize = m2p_hw_synchronize; in ep93xx_dma_probe()
1391 edma->hw_setup = m2p_hw_setup; in ep93xx_dma_probe()
1392 edma->hw_shutdown = m2p_hw_shutdown; in ep93xx_dma_probe()
1393 edma->hw_submit = m2p_hw_submit; in ep93xx_dma_probe()
1394 edma->hw_interrupt = m2p_hw_interrupt; in ep93xx_dma_probe()
1399 for (i = 0; i < edma->num_channels; i++) { in ep93xx_dma_probe()
1400 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_probe()
1401 if (!IS_ERR_OR_NULL(edmac->clk)) in ep93xx_dma_probe()
1402 clk_put(edmac->clk); in ep93xx_dma_probe()
1406 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", in ep93xx_dma_probe()
1407 edma->m2m ? "M" : "P"); in ep93xx_dma_probe()
1414 { "ep93xx-dma-m2p", 0 },
1415 { "ep93xx-dma-m2m", 1 },
1421 .name = "ep93xx-dma",