Lines Matching full:pc

92  *                         channel (PC)
93 * @queue: Queue for the PDs issued to this PC
94 * @base: The mapped register I/O base of this PC
95 * @irq: The IRQ that this PC are using
96 * @refcnt: Track how many VCs are using this PC
97 * @tasklet: Tasklet for this PC
98 * @lock: Lock protect agaisting multiple VCs access PC
109 /* lock to protect PC */
117 * @pc: The pointer to the underlying PC
123 struct mtk_cqdma_pchan *pc; member
136 * @pc: The pointer to all the underlying PCs
145 struct mtk_cqdma_pchan **pc; member
168 static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) in mtk_dma_read() argument
170 return readl(pc->base + reg); in mtk_dma_read()
173 static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_write() argument
175 writel_relaxed(val, pc->base + reg); in mtk_dma_write()
178 static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, in mtk_dma_rmw() argument
183 val = mtk_dma_read(pc, reg); in mtk_dma_rmw()
186 mtk_dma_write(pc, reg, val); in mtk_dma_rmw()
189 static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_set() argument
191 mtk_dma_rmw(pc, reg, 0, val); in mtk_dma_set()
194 static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_clr() argument
196 mtk_dma_rmw(pc, reg, val, 0); in mtk_dma_clr()
204 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) in mtk_cqdma_poll_engine_done() argument
209 return readl_poll_timeout(pc->base + MTK_CQDMA_EN, in mtk_cqdma_poll_engine_done()
215 return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, in mtk_cqdma_poll_engine_done()
222 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) in mtk_cqdma_hard_reset() argument
224 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); in mtk_cqdma_hard_reset()
225 mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); in mtk_cqdma_hard_reset()
227 return mtk_cqdma_poll_engine_done(pc, true); in mtk_cqdma_hard_reset()
230 static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, in mtk_cqdma_start() argument
234 if (mtk_cqdma_poll_engine_done(pc, true) < 0) in mtk_cqdma_start()
238 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); in mtk_cqdma_start()
239 if (mtk_cqdma_poll_engine_done(pc, true) < 0) in mtk_cqdma_start()
243 mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); in mtk_cqdma_start()
245 mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); in mtk_cqdma_start()
247 mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); in mtk_cqdma_start()
251 mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); in mtk_cqdma_start()
253 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); in mtk_cqdma_start()
255 mtk_dma_set(pc, MTK_CQDMA_DST2, 0); in mtk_cqdma_start()
259 mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); in mtk_cqdma_start()
262 mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); in mtk_cqdma_start()
268 struct mtk_cqdma_pchan *pc = cvc->pc; in mtk_cqdma_issue_vchan_pending() local
273 lockdep_assert_held(&pc->lock); in mtk_cqdma_issue_vchan_pending()
276 /* need to trigger dma engine if PC's queue is empty */ in mtk_cqdma_issue_vchan_pending()
277 if (list_empty(&pc->queue)) in mtk_cqdma_issue_vchan_pending()
282 /* add VD into PC's queue */ in mtk_cqdma_issue_vchan_pending()
283 list_add_tail(&cvd->node, &pc->queue); in mtk_cqdma_issue_vchan_pending()
287 mtk_cqdma_start(pc, cvd); in mtk_cqdma_issue_vchan_pending()
296 * meaning that there are VDs under processing by the PC
302 list_for_each_entry(cvd, &cvc->pc->queue, node) in mtk_cqdma_is_vchan_active()
310 * return the pointer of the CVD that is just consumed by the PC
313 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) in mtk_cqdma_consume_work_queue() argument
318 /* consume a CVD from PC's queue */ in mtk_cqdma_consume_work_queue()
319 cvd = list_first_entry_or_null(&pc->queue, in mtk_cqdma_consume_work_queue()
330 /* delete CVD from PC's queue */ in mtk_cqdma_consume_work_queue()
350 cvd = list_first_entry_or_null(&pc->queue, in mtk_cqdma_consume_work_queue()
353 mtk_cqdma_start(pc, cvd); in mtk_cqdma_consume_work_queue()
360 struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet); in mtk_cqdma_tasklet_cb() local
364 spin_lock_irqsave(&pc->lock, flags); in mtk_cqdma_tasklet_cb()
366 cvd = mtk_cqdma_consume_work_queue(pc); in mtk_cqdma_tasklet_cb()
367 spin_unlock_irqrestore(&pc->lock, flags); in mtk_cqdma_tasklet_cb()
382 enable_irq(pc->irq); in mtk_cqdma_tasklet_cb()
392 /* clear interrupt flags for each PC */ in mtk_cqdma_irq()
394 spin_lock(&cqdma->pc[i]->lock); in mtk_cqdma_irq()
395 if (mtk_dma_read(cqdma->pc[i], in mtk_cqdma_irq()
398 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, in mtk_cqdma_irq()
404 spin_unlock(&cqdma->pc[i]->lock); in mtk_cqdma_irq()
408 disable_irq_nosync(cqdma->pc[i]->irq); in mtk_cqdma_irq()
411 tasklet_schedule(&cqdma->pc[i]->tasklet); in mtk_cqdma_irq()
425 spin_lock_irqsave(&cvc->pc->lock, flags); in mtk_cqdma_find_active_desc()
426 list_for_each_entry(vd, &cvc->pc->queue, node) in mtk_cqdma_find_active_desc()
428 spin_unlock_irqrestore(&cvc->pc->lock, flags); in mtk_cqdma_find_active_desc()
431 spin_unlock_irqrestore(&cvc->pc->lock, flags); in mtk_cqdma_find_active_desc()
475 /* acquire PC's lock before VS's lock for lock dependency in tasklet */ in mtk_cqdma_issue_pending()
476 spin_lock_irqsave(&cvc->pc->lock, pc_flags); in mtk_cqdma_issue_pending()
483 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); in mtk_cqdma_issue_pending()
577 /* acquire PC's lock first due to lock dependency in dma ISR */ in mtk_cqdma_free_active_desc()
578 spin_lock_irqsave(&cvc->pc->lock, pc_flags); in mtk_cqdma_free_active_desc()
588 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); in mtk_cqdma_free_active_desc()
616 struct mtk_cqdma_pchan *pc = NULL; in mtk_cqdma_alloc_chan_resources() local
620 /* allocate PC with the minimun refcount */ in mtk_cqdma_alloc_chan_resources()
622 refcnt = refcount_read(&cqdma->pc[i]->refcnt); in mtk_cqdma_alloc_chan_resources()
624 pc = cqdma->pc[i]; in mtk_cqdma_alloc_chan_resources()
629 if (!pc) in mtk_cqdma_alloc_chan_resources()
632 spin_lock_irqsave(&pc->lock, flags); in mtk_cqdma_alloc_chan_resources()
634 if (!refcount_read(&pc->refcnt)) { in mtk_cqdma_alloc_chan_resources()
635 /* allocate PC when the refcount is zero */ in mtk_cqdma_alloc_chan_resources()
636 mtk_cqdma_hard_reset(pc); in mtk_cqdma_alloc_chan_resources()
638 /* enable interrupt for this PC */ in mtk_cqdma_alloc_chan_resources()
639 mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); in mtk_cqdma_alloc_chan_resources()
645 refcount_set(&pc->refcnt, 1); in mtk_cqdma_alloc_chan_resources()
647 refcount_inc(&pc->refcnt); in mtk_cqdma_alloc_chan_resources()
650 spin_unlock_irqrestore(&pc->lock, flags); in mtk_cqdma_alloc_chan_resources()
652 vc->pc = pc; in mtk_cqdma_alloc_chan_resources()
665 spin_lock_irqsave(&cvc->pc->lock, flags); in mtk_cqdma_free_chan_resources()
667 /* PC is not freed until there is no VC mapped to it */ in mtk_cqdma_free_chan_resources()
668 if (refcount_dec_and_test(&cvc->pc->refcnt)) { in mtk_cqdma_free_chan_resources()
670 mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); in mtk_cqdma_free_chan_resources()
673 if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0) in mtk_cqdma_free_chan_resources()
677 mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); in mtk_cqdma_free_chan_resources()
678 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, in mtk_cqdma_free_chan_resources()
681 /* disable interrupt for this PC */ in mtk_cqdma_free_chan_resources()
682 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); in mtk_cqdma_free_chan_resources()
685 spin_unlock_irqrestore(&cvc->pc->lock, flags); in mtk_cqdma_free_chan_resources()
707 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
708 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { in mtk_cqdma_hw_init()
710 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
717 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
730 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_deinit()
731 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) in mtk_cqdma_hw_deinit()
733 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_deinit()
805 cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, in mtk_cqdma_probe()
806 sizeof(*cqdma->pc), GFP_KERNEL); in mtk_cqdma_probe()
807 if (!cqdma->pc) in mtk_cqdma_probe()
812 cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, in mtk_cqdma_probe()
813 sizeof(**cqdma->pc), GFP_KERNEL); in mtk_cqdma_probe()
814 if (!cqdma->pc[i]) in mtk_cqdma_probe()
817 INIT_LIST_HEAD(&cqdma->pc[i]->queue); in mtk_cqdma_probe()
818 spin_lock_init(&cqdma->pc[i]->lock); in mtk_cqdma_probe()
819 refcount_set(&cqdma->pc[i]->refcnt, 0); in mtk_cqdma_probe()
820 cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i); in mtk_cqdma_probe()
821 if (IS_ERR(cqdma->pc[i]->base)) in mtk_cqdma_probe()
822 return PTR_ERR(cqdma->pc[i]->base); in mtk_cqdma_probe()
828 cqdma->pc[i]->irq = err; in mtk_cqdma_probe()
830 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, in mtk_cqdma_probe()
874 /* initialize tasklet for each PC */ in mtk_cqdma_probe()
876 tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb); in mtk_cqdma_probe()
905 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_remove()
906 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, in mtk_cqdma_remove()
908 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_remove()
911 synchronize_irq(cqdma->pc[i]->irq); in mtk_cqdma_remove()
913 tasklet_kill(&cqdma->pc[i]->tasklet); in mtk_cqdma_remove()