Lines Matching refs:mdev
212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) in msgdma_get_descriptor() argument
217 spin_lock_irqsave(&mdev->lock, flags); in msgdma_get_descriptor()
218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); in msgdma_get_descriptor()
220 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_get_descriptor()
232 static void msgdma_free_descriptor(struct msgdma_device *mdev, in msgdma_free_descriptor() argument
237 mdev->desc_free_cnt++; in msgdma_free_descriptor()
238 list_add_tail(&desc->node, &mdev->free_list); in msgdma_free_descriptor()
240 mdev->desc_free_cnt++; in msgdma_free_descriptor()
241 list_move_tail(&child->node, &mdev->free_list); in msgdma_free_descriptor()
250 static void msgdma_free_desc_list(struct msgdma_device *mdev, in msgdma_free_desc_list() argument
256 msgdma_free_descriptor(mdev, desc); in msgdma_free_desc_list()
307 struct msgdma_device *mdev = to_mdev(tx->chan); in msgdma_tx_submit() local
313 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tx_submit()
316 list_add_tail(&new->node, &mdev->pending_list); in msgdma_tx_submit()
317 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tx_submit()
336 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_memcpy() local
345 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_memcpy()
346 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_memcpy()
347 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_memcpy()
351 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_memcpy()
352 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
356 new = msgdma_get_descriptor(mdev); in msgdma_prep_memcpy()
394 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_slave_sg() local
395 struct dma_slave_config *cfg = &mdev->slave_cfg; in msgdma_prep_slave_sg()
408 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
409 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_slave_sg()
410 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_slave_sg()
414 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_slave_sg()
415 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
422 new = msgdma_get_descriptor(mdev); in msgdma_prep_slave_sg()
465 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_dma_config() local
467 memcpy(&mdev->slave_cfg, config, sizeof(*config)); in msgdma_dma_config()
472 static void msgdma_reset(struct msgdma_device *mdev) in msgdma_reset() argument
478 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
479 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
481 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, in msgdma_reset()
485 dev_err(mdev->dev, "DMA channel did not reset\n"); in msgdma_reset()
488 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
492 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
494 mdev->idle = true; in msgdma_reset()
497 static void msgdma_copy_one(struct msgdma_device *mdev, in msgdma_copy_one() argument
500 void __iomem *hw_desc = mdev->desc; in msgdma_copy_one()
506 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & in msgdma_copy_one()
523 mdev->idle = false; in msgdma_copy_one()
535 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, in msgdma_copy_desc_to_fifo() argument
540 msgdma_copy_one(mdev, desc); in msgdma_copy_desc_to_fifo()
543 msgdma_copy_one(mdev, sdesc); in msgdma_copy_desc_to_fifo()
550 static void msgdma_start_transfer(struct msgdma_device *mdev) in msgdma_start_transfer() argument
554 if (!mdev->idle) in msgdma_start_transfer()
557 desc = list_first_entry_or_null(&mdev->pending_list, in msgdma_start_transfer()
562 list_splice_tail_init(&mdev->pending_list, &mdev->active_list); in msgdma_start_transfer()
563 msgdma_copy_desc_to_fifo(mdev, desc); in msgdma_start_transfer()
572 struct msgdma_device *mdev = to_mdev(chan); in msgdma_issue_pending() local
575 spin_lock_irqsave(&mdev->lock, flags); in msgdma_issue_pending()
576 msgdma_start_transfer(mdev); in msgdma_issue_pending()
577 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_issue_pending()
584 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) in msgdma_chan_desc_cleanup() argument
588 list_for_each_entry_safe(desc, next, &mdev->done_list, node) { in msgdma_chan_desc_cleanup()
597 spin_unlock(&mdev->lock); in msgdma_chan_desc_cleanup()
599 spin_lock(&mdev->lock); in msgdma_chan_desc_cleanup()
603 msgdma_free_descriptor(mdev, desc); in msgdma_chan_desc_cleanup()
611 static void msgdma_complete_descriptor(struct msgdma_device *mdev) in msgdma_complete_descriptor() argument
615 desc = list_first_entry_or_null(&mdev->active_list, in msgdma_complete_descriptor()
621 list_add_tail(&desc->node, &mdev->done_list); in msgdma_complete_descriptor()
628 static void msgdma_free_descriptors(struct msgdma_device *mdev) in msgdma_free_descriptors() argument
630 msgdma_free_desc_list(mdev, &mdev->active_list); in msgdma_free_descriptors()
631 msgdma_free_desc_list(mdev, &mdev->pending_list); in msgdma_free_descriptors()
632 msgdma_free_desc_list(mdev, &mdev->done_list); in msgdma_free_descriptors()
641 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_free_chan_resources() local
644 spin_lock_irqsave(&mdev->lock, flags); in msgdma_free_chan_resources()
645 msgdma_free_descriptors(mdev); in msgdma_free_chan_resources()
646 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_free_chan_resources()
647 kfree(mdev->sw_desq); in msgdma_free_chan_resources()
658 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_alloc_chan_resources() local
662 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); in msgdma_alloc_chan_resources()
663 if (!mdev->sw_desq) in msgdma_alloc_chan_resources()
666 mdev->idle = true; in msgdma_alloc_chan_resources()
667 mdev->desc_free_cnt = MSGDMA_DESC_NUM; in msgdma_alloc_chan_resources()
669 INIT_LIST_HEAD(&mdev->free_list); in msgdma_alloc_chan_resources()
672 desc = mdev->sw_desq + i; in msgdma_alloc_chan_resources()
673 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
675 list_add_tail(&desc->node, &mdev->free_list); in msgdma_alloc_chan_resources()
687 struct msgdma_device *mdev = (struct msgdma_device *)data; in msgdma_tasklet() local
693 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tasklet()
696 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); in msgdma_tasklet()
697 dev_dbg(mdev->dev, "%s (%d): response count=%d\n", in msgdma_tasklet()
707 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); in msgdma_tasklet()
708 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); in msgdma_tasklet()
710 msgdma_complete_descriptor(mdev); in msgdma_tasklet()
711 msgdma_chan_desc_cleanup(mdev); in msgdma_tasklet()
714 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tasklet()
726 struct msgdma_device *mdev = data; in msgdma_irq_handler() local
729 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
732 spin_lock(&mdev->lock); in msgdma_irq_handler()
733 mdev->idle = true; in msgdma_irq_handler()
734 msgdma_start_transfer(mdev); in msgdma_irq_handler()
735 spin_unlock(&mdev->lock); in msgdma_irq_handler()
738 tasklet_schedule(&mdev->irq_tasklet); in msgdma_irq_handler()
741 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
750 static void msgdma_dev_remove(struct msgdma_device *mdev) in msgdma_dev_remove() argument
752 if (!mdev) in msgdma_dev_remove()
755 devm_free_irq(mdev->dev, mdev->irq, mdev); in msgdma_dev_remove()
756 tasklet_kill(&mdev->irq_tasklet); in msgdma_dev_remove()
757 list_del(&mdev->dmachan.device_node); in msgdma_dev_remove()
797 struct msgdma_device *mdev; in msgdma_probe() local
802 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); in msgdma_probe()
803 if (!mdev) in msgdma_probe()
806 mdev->dev = &pdev->dev; in msgdma_probe()
809 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); in msgdma_probe()
814 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); in msgdma_probe()
819 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); in msgdma_probe()
823 platform_set_drvdata(pdev, mdev); in msgdma_probe()
826 mdev->irq = platform_get_irq(pdev, 0); in msgdma_probe()
827 if (mdev->irq < 0) in msgdma_probe()
830 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, in msgdma_probe()
831 0, dev_name(&pdev->dev), mdev); in msgdma_probe()
835 tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev); in msgdma_probe()
837 dma_cookie_init(&mdev->dmachan); in msgdma_probe()
839 spin_lock_init(&mdev->lock); in msgdma_probe()
841 INIT_LIST_HEAD(&mdev->active_list); in msgdma_probe()
842 INIT_LIST_HEAD(&mdev->pending_list); in msgdma_probe()
843 INIT_LIST_HEAD(&mdev->done_list); in msgdma_probe()
844 INIT_LIST_HEAD(&mdev->free_list); in msgdma_probe()
846 dma_dev = &mdev->dmadev; in msgdma_probe()
875 mdev->dmachan.device = dma_dev; in msgdma_probe()
876 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); in msgdma_probe()
887 msgdma_reset(mdev); in msgdma_probe()
898 msgdma_dev_remove(mdev); in msgdma_probe()
911 struct msgdma_device *mdev = platform_get_drvdata(pdev); in msgdma_remove() local
913 dma_async_device_unregister(&mdev->dmadev); in msgdma_remove()
914 msgdma_dev_remove(mdev); in msgdma_remove()