Lines Matching refs:schan

52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)  in shdma_chan_xfer_ld_queue()  argument
54 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_chan_xfer_ld_queue()
59 if (ops->channel_busy(schan)) in shdma_chan_xfer_ld_queue()
63 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_chan_xfer_ld_queue()
65 ops->start_xfer(schan, sdesc); in shdma_chan_xfer_ld_queue()
74 struct shdma_chan *schan = to_shdma_chan(tx->chan); in shdma_tx_submit() local
79 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
81 power_up = list_empty(&schan->ld_queue); in shdma_tx_submit()
94 &chunk->node == &schan->ld_free)) in shdma_tx_submit()
105 list_move_tail(&chunk->node, &schan->ld_queue); in shdma_tx_submit()
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n", in shdma_tx_submit()
108 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit()
113 schan->pm_state = SHDMA_PM_BUSY; in shdma_tx_submit()
115 ret = pm_runtime_get(schan->dev); in shdma_tx_submit()
117 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); in shdma_tx_submit()
121 pm_runtime_barrier(schan->dev); in shdma_tx_submit()
123 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
126 if (schan->pm_state != SHDMA_PM_ESTABLISHED) { in shdma_tx_submit()
128 to_shdma_dev(schan->dma_chan.device); in shdma_tx_submit()
130 dev_dbg(schan->dev, "Bring up channel %d\n", in shdma_tx_submit()
131 schan->id); in shdma_tx_submit()
137 ops->setup_xfer(schan, schan->slave_id); in shdma_tx_submit()
139 if (schan->pm_state == SHDMA_PM_PENDING) in shdma_tx_submit()
140 shdma_chan_xfer_ld_queue(schan); in shdma_tx_submit()
141 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_tx_submit()
148 schan->pm_state = SHDMA_PM_PENDING; in shdma_tx_submit()
151 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
157 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) in shdma_get_desc() argument
161 list_for_each_entry(sdesc, &schan->ld_free, node) in shdma_get_desc()
171 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) in shdma_setup_slave() argument
173 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_setup_slave()
177 if (schan->dev->of_node) { in shdma_setup_slave()
178 match = schan->hw_req; in shdma_setup_slave()
179 ret = ops->set_slave(schan, match, slave_addr, true); in shdma_setup_slave()
183 match = schan->real_slave_id; in shdma_setup_slave()
186 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) in shdma_setup_slave()
189 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) in shdma_setup_slave()
192 ret = ops->set_slave(schan, match, slave_addr, false); in shdma_setup_slave()
194 clear_bit(schan->real_slave_id, shdma_slave_used); in shdma_setup_slave()
198 schan->slave_id = schan->real_slave_id; in shdma_setup_slave()
205 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_alloc_chan_resources() local
206 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_alloc_chan_resources()
218 schan->real_slave_id = slave->slave_id; in shdma_alloc_chan_resources()
219 ret = shdma_setup_slave(schan, 0); in shdma_alloc_chan_resources()
224 schan->slave_id = -EINVAL; in shdma_alloc_chan_resources()
227 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, in shdma_alloc_chan_resources()
229 if (!schan->desc) { in shdma_alloc_chan_resources()
233 schan->desc_num = NR_DESCS_PER_CHANNEL; in shdma_alloc_chan_resources()
236 desc = ops->embedded_desc(schan->desc, i); in shdma_alloc_chan_resources()
238 &schan->dma_chan); in shdma_alloc_chan_resources()
242 list_add(&desc->node, &schan->ld_free); in shdma_alloc_chan_resources()
276 struct shdma_chan *schan; in shdma_chan_filter() local
286 schan = to_shdma_chan(chan); in shdma_chan_filter()
295 if (schan->dev->of_node) { in shdma_chan_filter()
296 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
300 schan->real_slave_id = schan->slave_id; in shdma_chan_filter()
313 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
317 schan->real_slave_id = slave_id; in shdma_chan_filter()
323 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) in __ld_cleanup() argument
335 spin_lock_irqsave(&schan->chan_lock, flags); in __ld_cleanup()
336 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { in __ld_cleanup()
357 if (schan->dma_chan.completed_cookie != desc->cookie - 1) in __ld_cleanup()
358 dev_dbg(schan->dev, in __ld_cleanup()
361 schan->dma_chan.completed_cookie + 1); in __ld_cleanup()
362 schan->dma_chan.completed_cookie = desc->cookie; in __ld_cleanup()
370 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", in __ld_cleanup()
371 tx->cookie, tx, schan->id); in __ld_cleanup()
393 dev_dbg(schan->dev, "descriptor %p #%d completed.\n", in __ld_cleanup()
403 list_move(&desc->node, &schan->ld_free); in __ld_cleanup()
410 if (list_empty(&schan->ld_queue)) { in __ld_cleanup()
411 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in __ld_cleanup()
412 pm_runtime_put(schan->dev); in __ld_cleanup()
413 schan->pm_state = SHDMA_PM_ESTABLISHED; in __ld_cleanup()
414 } else if (schan->pm_state == SHDMA_PM_PENDING) { in __ld_cleanup()
415 shdma_chan_xfer_ld_queue(schan); in __ld_cleanup()
425 schan->dma_chan.completed_cookie = schan->dma_chan.cookie; in __ld_cleanup()
427 list_splice_tail(&cyclic_list, &schan->ld_queue); in __ld_cleanup()
429 spin_unlock_irqrestore(&schan->chan_lock, flags); in __ld_cleanup()
441 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) in shdma_chan_ld_cleanup() argument
443 while (__ld_cleanup(schan, all)) in shdma_chan_ld_cleanup()
452 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_free_chan_resources() local
458 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
459 ops->halt_channel(schan); in shdma_free_chan_resources()
460 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
465 if (!list_empty(&schan->ld_queue)) in shdma_free_chan_resources()
466 shdma_chan_ld_cleanup(schan, true); in shdma_free_chan_resources()
468 if (schan->slave_id >= 0) { in shdma_free_chan_resources()
470 clear_bit(schan->slave_id, shdma_slave_used); in shdma_free_chan_resources()
474 schan->real_slave_id = 0; in shdma_free_chan_resources()
476 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
478 list_splice_init(&schan->ld_free, &list); in shdma_free_chan_resources()
479 schan->desc_num = 0; in shdma_free_chan_resources()
481 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
483 kfree(schan->desc); in shdma_free_chan_resources()
501 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, in shdma_add_desc() argument
505 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_add_desc()
514 new = shdma_get_desc(schan); in shdma_add_desc()
516 dev_err(schan->dev, "No free link descriptor available\n"); in shdma_add_desc()
520 ops->desc_setup(schan, new, *src, *dst, &copy_size); in shdma_add_desc()
531 dev_dbg(schan->dev, in shdma_add_desc()
560 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, in shdma_prep_sg() argument
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
575 spin_lock_irqsave(&schan->chan_lock, irq_flags); in shdma_prep_sg()
596 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", in shdma_prep_sg()
600 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
604 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
623 list_splice_tail(&tx_list, &schan->ld_free); in shdma_prep_sg()
625 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
632 list_splice(&tx_list, &schan->ld_free); in shdma_prep_sg()
634 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
643 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_memcpy() local
649 BUG_ON(!schan->desc_num); in shdma_prep_memcpy()
657 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, in shdma_prep_memcpy()
665 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_slave_sg() local
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_slave_sg()
668 int slave_id = schan->slave_id; in shdma_prep_slave_sg()
674 BUG_ON(!schan->desc_num); in shdma_prep_slave_sg()
678 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", in shdma_prep_slave_sg()
683 slave_addr = ops->slave_addr(schan); in shdma_prep_slave_sg()
685 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_slave_sg()
696 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_dma_cyclic() local
697 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_dma_cyclic()
701 int slave_id = schan->slave_id; in shdma_prep_dma_cyclic()
709 BUG_ON(!schan->desc_num); in shdma_prep_dma_cyclic()
712 dev_err(schan->dev, "sg length %d exceeds limit %d", in shdma_prep_dma_cyclic()
719 dev_warn(schan->dev, in shdma_prep_dma_cyclic()
725 slave_addr = ops->slave_addr(schan); in shdma_prep_dma_cyclic()
746 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_dma_cyclic()
755 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_terminate_all() local
760 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_terminate_all()
761 ops->halt_channel(schan); in shdma_terminate_all()
763 if (ops->get_partial && !list_empty(&schan->ld_queue)) { in shdma_terminate_all()
765 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, in shdma_terminate_all()
767 desc->partial = ops->get_partial(schan, desc); in shdma_terminate_all()
770 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_terminate_all()
772 shdma_chan_ld_cleanup(schan, true); in shdma_terminate_all()
780 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_config() local
793 return shdma_setup_slave(schan, in shdma_config()
800 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_issue_pending() local
802 spin_lock_irq(&schan->chan_lock); in shdma_issue_pending()
803 if (schan->pm_state == SHDMA_PM_ESTABLISHED) in shdma_issue_pending()
804 shdma_chan_xfer_ld_queue(schan); in shdma_issue_pending()
806 schan->pm_state = SHDMA_PM_PENDING; in shdma_issue_pending()
807 spin_unlock_irq(&schan->chan_lock); in shdma_issue_pending()
814 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_tx_status() local
818 shdma_chan_ld_cleanup(schan, false); in shdma_tx_status()
820 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_tx_status()
831 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_tx_status()
838 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_tx_status()
847 struct shdma_chan *schan; in shdma_reset() local
852 shdma_for_each_chan(schan, sdev, i) { in shdma_reset()
856 if (!schan) in shdma_reset()
859 spin_lock(&schan->chan_lock); in shdma_reset()
862 ops->halt_channel(schan); in shdma_reset()
864 list_splice_init(&schan->ld_queue, &dl); in shdma_reset()
867 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in shdma_reset()
868 pm_runtime_put(schan->dev); in shdma_reset()
870 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_reset()
872 spin_unlock(&schan->chan_lock); in shdma_reset()
882 spin_lock(&schan->chan_lock); in shdma_reset()
883 list_splice(&dl, &schan->ld_free); in shdma_reset()
884 spin_unlock(&schan->chan_lock); in shdma_reset()
895 struct shdma_chan *schan = dev; in chan_irq() local
897 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irq()
900 spin_lock(&schan->chan_lock); in chan_irq()
902 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; in chan_irq()
904 spin_unlock(&schan->chan_lock); in chan_irq()
911 struct shdma_chan *schan = dev; in chan_irqt() local
913 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irqt()
916 spin_lock_irq(&schan->chan_lock); in chan_irqt()
917 list_for_each_entry(sdesc, &schan->ld_queue, node) { in chan_irqt()
919 ops->desc_completed(schan, sdesc)) { in chan_irqt()
920 dev_dbg(schan->dev, "done #%d@%p\n", in chan_irqt()
927 shdma_chan_xfer_ld_queue(schan); in chan_irqt()
928 spin_unlock_irq(&schan->chan_lock); in chan_irqt()
930 shdma_chan_ld_cleanup(schan, false); in chan_irqt()
935 int shdma_request_irq(struct shdma_chan *schan, int irq, in shdma_request_irq() argument
938 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, in shdma_request_irq()
939 chan_irqt, flags, name, schan); in shdma_request_irq()
941 schan->irq = ret < 0 ? ret : irq; in shdma_request_irq()
948 struct shdma_chan *schan, int id) in shdma_chan_probe() argument
950 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_chan_probe()
953 schan->dma_chan.device = &sdev->dma_dev; in shdma_chan_probe()
954 dma_cookie_init(&schan->dma_chan); in shdma_chan_probe()
956 schan->dev = sdev->dma_dev.dev; in shdma_chan_probe()
957 schan->id = id; in shdma_chan_probe()
959 if (!schan->max_xfer_len) in shdma_chan_probe()
960 schan->max_xfer_len = PAGE_SIZE; in shdma_chan_probe()
962 spin_lock_init(&schan->chan_lock); in shdma_chan_probe()
965 INIT_LIST_HEAD(&schan->ld_queue); in shdma_chan_probe()
966 INIT_LIST_HEAD(&schan->ld_free); in shdma_chan_probe()
969 list_add_tail(&schan->dma_chan.device_node, in shdma_chan_probe()
971 sdev->schan[id] = schan; in shdma_chan_probe()
975 void shdma_chan_remove(struct shdma_chan *schan) in shdma_chan_remove() argument
977 list_del(&schan->dma_chan.device_node); in shdma_chan_remove()
1003 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); in shdma_init()
1004 if (!sdev->schan) in shdma_init()
1031 kfree(sdev->schan); in shdma_cleanup()