Lines Matching +full:c +full:- +full:phy

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 - 2015 Linaro Ltd.
4 * Copyright (c) 2013 HiSilicon Limited.
8 #include <linux/dma-mapping.h>
23 #include "virt-dma.h"
25 #define DRIVER_NAME "k3-dma"
83 struct k3_dma_phy *phy; member
105 struct k3_dma_phy *phy; member
134 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) in k3_dma_pause_dma() argument
139 val = readl_relaxed(phy->base + CX_CFG); in k3_dma_pause_dma()
141 writel_relaxed(val, phy->base + CX_CFG); in k3_dma_pause_dma()
143 val = readl_relaxed(phy->base + CX_CFG); in k3_dma_pause_dma()
145 writel_relaxed(val, phy->base + CX_CFG); in k3_dma_pause_dma()
149 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) in k3_dma_terminate_chan() argument
153 k3_dma_pause_dma(phy, false); in k3_dma_terminate_chan()
155 val = 0x1 << phy->idx; in k3_dma_terminate_chan()
156 writel_relaxed(val, d->base + INT_TC1_RAW); in k3_dma_terminate_chan()
157 writel_relaxed(val, d->base + INT_TC2_RAW); in k3_dma_terminate_chan()
158 writel_relaxed(val, d->base + INT_ERR1_RAW); in k3_dma_terminate_chan()
159 writel_relaxed(val, d->base + INT_ERR2_RAW); in k3_dma_terminate_chan()
162 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) in k3_dma_set_desc() argument
164 writel_relaxed(hw->lli, phy->base + CX_LLI); in k3_dma_set_desc()
165 writel_relaxed(hw->count, phy->base + CX_CNT0); in k3_dma_set_desc()
166 writel_relaxed(hw->saddr, phy->base + CX_SRC); in k3_dma_set_desc()
167 writel_relaxed(hw->daddr, phy->base + CX_DST); in k3_dma_set_desc()
168 writel_relaxed(hw->config, phy->base + CX_CFG); in k3_dma_set_desc()
171 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) in k3_dma_get_curr_cnt() argument
175 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); in k3_dma_get_curr_cnt()
180 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) in k3_dma_get_curr_lli() argument
182 return readl_relaxed(phy->base + CX_LLI); in k3_dma_get_curr_lli()
187 return readl_relaxed(d->base + CH_STAT); in k3_dma_get_chan_stat()
194 writel_relaxed(0x0, d->base + CH_PRI); in k3_dma_enable_dma()
197 writel_relaxed(0xffff, d->base + INT_TC1_MASK); in k3_dma_enable_dma()
198 writel_relaxed(0xffff, d->base + INT_TC2_MASK); in k3_dma_enable_dma()
199 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); in k3_dma_enable_dma()
200 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); in k3_dma_enable_dma()
203 writel_relaxed(0x0, d->base + INT_TC1_MASK); in k3_dma_enable_dma()
204 writel_relaxed(0x0, d->base + INT_TC2_MASK); in k3_dma_enable_dma()
205 writel_relaxed(0x0, d->base + INT_ERR1_MASK); in k3_dma_enable_dma()
206 writel_relaxed(0x0, d->base + INT_ERR2_MASK); in k3_dma_enable_dma()
214 struct k3_dma_chan *c; in k3_dma_int_handler() local
215 u32 stat = readl_relaxed(d->base + INT_STAT); in k3_dma_int_handler()
216 u32 tc1 = readl_relaxed(d->base + INT_TC1); in k3_dma_int_handler()
217 u32 tc2 = readl_relaxed(d->base + INT_TC2); in k3_dma_int_handler()
218 u32 err1 = readl_relaxed(d->base + INT_ERR1); in k3_dma_int_handler()
219 u32 err2 = readl_relaxed(d->base + INT_ERR2); in k3_dma_int_handler()
227 p = &d->phy[i]; in k3_dma_int_handler()
228 c = p->vchan; in k3_dma_int_handler()
229 if (c && (tc1 & BIT(i))) { in k3_dma_int_handler()
230 spin_lock(&c->vc.lock); in k3_dma_int_handler()
231 if (p->ds_run != NULL) { in k3_dma_int_handler()
232 vchan_cookie_complete(&p->ds_run->vd); in k3_dma_int_handler()
233 p->ds_done = p->ds_run; in k3_dma_int_handler()
234 p->ds_run = NULL; in k3_dma_int_handler()
236 spin_unlock(&c->vc.lock); in k3_dma_int_handler()
238 if (c && (tc2 & BIT(i))) { in k3_dma_int_handler()
239 spin_lock(&c->vc.lock); in k3_dma_int_handler()
240 if (p->ds_run != NULL) in k3_dma_int_handler()
241 vchan_cyclic_callback(&p->ds_run->vd); in k3_dma_int_handler()
242 spin_unlock(&c->vc.lock); in k3_dma_int_handler()
247 dev_warn(d->slave.dev, "DMA ERR\n"); in k3_dma_int_handler()
250 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); in k3_dma_int_handler()
251 writel_relaxed(irq_chan, d->base + INT_TC2_RAW); in k3_dma_int_handler()
252 writel_relaxed(err1, d->base + INT_ERR1_RAW); in k3_dma_int_handler()
253 writel_relaxed(err2, d->base + INT_ERR2_RAW); in k3_dma_int_handler()
256 tasklet_schedule(&d->task); in k3_dma_int_handler()
264 static int k3_dma_start_txd(struct k3_dma_chan *c) in k3_dma_start_txd() argument
266 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); in k3_dma_start_txd()
267 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); in k3_dma_start_txd()
269 if (!c->phy) in k3_dma_start_txd()
270 return -EAGAIN; in k3_dma_start_txd()
272 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) in k3_dma_start_txd()
273 return -EAGAIN; in k3_dma_start_txd()
276 if (c->phy->ds_run) in k3_dma_start_txd()
277 return -EAGAIN; in k3_dma_start_txd()
283 * fetch and remove request from vc->desc_issued in k3_dma_start_txd()
284 * so vc->desc_issued only contains desc pending in k3_dma_start_txd()
286 list_del(&ds->vd.node); in k3_dma_start_txd()
288 c->phy->ds_run = ds; in k3_dma_start_txd()
289 c->phy->ds_done = NULL; in k3_dma_start_txd()
291 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); in k3_dma_start_txd()
294 c->phy->ds_run = NULL; in k3_dma_start_txd()
295 c->phy->ds_done = NULL; in k3_dma_start_txd()
296 return -EAGAIN; in k3_dma_start_txd()
303 struct k3_dma_chan *c, *cn; in k3_dma_tasklet() local
306 /* check new dma request of running channel in vc->desc_issued */ in k3_dma_tasklet()
307 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { in k3_dma_tasklet()
308 spin_lock_irq(&c->vc.lock); in k3_dma_tasklet()
309 p = c->phy; in k3_dma_tasklet()
310 if (p && p->ds_done) { in k3_dma_tasklet()
311 if (k3_dma_start_txd(c)) { in k3_dma_tasklet()
313 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); in k3_dma_tasklet()
315 c->phy = NULL; in k3_dma_tasklet()
316 p->vchan = NULL; in k3_dma_tasklet()
319 spin_unlock_irq(&c->vc.lock); in k3_dma_tasklet()
322 /* check new channel request in d->chan_pending */ in k3_dma_tasklet()
323 spin_lock_irq(&d->lock); in k3_dma_tasklet()
324 for (pch = 0; pch < d->dma_channels; pch++) { in k3_dma_tasklet()
325 if (!(d->dma_channel_mask & (1 << pch))) in k3_dma_tasklet()
328 p = &d->phy[pch]; in k3_dma_tasklet()
330 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { in k3_dma_tasklet()
331 c = list_first_entry(&d->chan_pending, in k3_dma_tasklet()
333 /* remove from d->chan_pending */ in k3_dma_tasklet()
334 list_del_init(&c->node); in k3_dma_tasklet()
337 p->vchan = c; in k3_dma_tasklet()
338 c->phy = p; in k3_dma_tasklet()
339 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); in k3_dma_tasklet()
342 spin_unlock_irq(&d->lock); in k3_dma_tasklet()
344 for (pch = 0; pch < d->dma_channels; pch++) { in k3_dma_tasklet()
345 if (!(d->dma_channel_mask & (1 << pch))) in k3_dma_tasklet()
349 p = &d->phy[pch]; in k3_dma_tasklet()
350 c = p->vchan; in k3_dma_tasklet()
351 if (c) { in k3_dma_tasklet()
352 spin_lock_irq(&c->vc.lock); in k3_dma_tasklet()
353 k3_dma_start_txd(c); in k3_dma_tasklet()
354 spin_unlock_irq(&c->vc.lock); in k3_dma_tasklet()
362 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_free_chan_resources() local
363 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_free_chan_resources()
366 spin_lock_irqsave(&d->lock, flags); in k3_dma_free_chan_resources()
367 list_del_init(&c->node); in k3_dma_free_chan_resources()
368 spin_unlock_irqrestore(&d->lock, flags); in k3_dma_free_chan_resources()
370 vchan_free_chan_resources(&c->vc); in k3_dma_free_chan_resources()
371 c->ccfg = 0; in k3_dma_free_chan_resources()
377 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_tx_status() local
378 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_tx_status()
385 ret = dma_cookie_status(&c->vc.chan, cookie, state); in k3_dma_tx_status()
389 spin_lock_irqsave(&c->vc.lock, flags); in k3_dma_tx_status()
390 p = c->phy; in k3_dma_tx_status()
391 ret = c->status; in k3_dma_tx_status()
397 vd = vchan_find_desc(&c->vc, cookie); in k3_dma_tx_status()
398 if (vd && !c->cyclic) { in k3_dma_tx_status()
399 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; in k3_dma_tx_status()
400 } else if ((!p) || (!p->ds_run)) { in k3_dma_tx_status()
403 struct k3_dma_desc_sw *ds = p->ds_run; in k3_dma_tx_status()
408 index = ((clli - ds->desc_hw_lli) / in k3_dma_tx_status()
410 for (; index < ds->desc_num; index++) { in k3_dma_tx_status()
411 bytes += ds->desc_hw[index].count; in k3_dma_tx_status()
413 if (!ds->desc_hw[index].lli) in k3_dma_tx_status()
417 spin_unlock_irqrestore(&c->vc.lock, flags); in k3_dma_tx_status()
424 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_issue_pending() local
425 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_issue_pending()
428 spin_lock_irqsave(&c->vc.lock, flags); in k3_dma_issue_pending()
429 /* add request to vc->desc_issued */ in k3_dma_issue_pending()
430 if (vchan_issue_pending(&c->vc)) { in k3_dma_issue_pending()
431 spin_lock(&d->lock); in k3_dma_issue_pending()
432 if (!c->phy) { in k3_dma_issue_pending()
433 if (list_empty(&c->node)) { in k3_dma_issue_pending()
435 list_add_tail(&c->node, &d->chan_pending); in k3_dma_issue_pending()
437 tasklet_schedule(&d->task); in k3_dma_issue_pending()
438 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); in k3_dma_issue_pending()
441 spin_unlock(&d->lock); in k3_dma_issue_pending()
443 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); in k3_dma_issue_pending()
444 spin_unlock_irqrestore(&c->vc.lock, flags); in k3_dma_issue_pending()
450 if (num != ds->desc_num - 1) in k3_dma_fill_desc()
451 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * in k3_dma_fill_desc()
454 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; in k3_dma_fill_desc()
455 ds->desc_hw[num].count = len; in k3_dma_fill_desc()
456 ds->desc_hw[num].saddr = src; in k3_dma_fill_desc()
457 ds->desc_hw[num].daddr = dst; in k3_dma_fill_desc()
458 ds->desc_hw[num].config = ccfg; in k3_dma_fill_desc()
464 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_alloc_desc_resource() local
466 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_alloc_desc_resource()
470 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", in k3_dma_alloc_desc_resource()
471 &c->vc, num, lli_limit); in k3_dma_alloc_desc_resource()
479 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); in k3_dma_alloc_desc_resource()
480 if (!ds->desc_hw) { in k3_dma_alloc_desc_resource()
481 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); in k3_dma_alloc_desc_resource()
485 ds->desc_num = num; in k3_dma_alloc_desc_resource()
493 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_prep_memcpy() local
507 c->cyclic = 0; in k3_dma_prep_memcpy()
508 ds->size = len; in k3_dma_prep_memcpy()
511 if (!c->ccfg) { in k3_dma_prep_memcpy()
513 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; in k3_dma_prep_memcpy()
514 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ in k3_dma_prep_memcpy()
515 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ in k3_dma_prep_memcpy()
520 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); in k3_dma_prep_memcpy()
524 len -= copy; in k3_dma_prep_memcpy()
527 ds->desc_hw[num-1].lli = 0; /* end of link */ in k3_dma_prep_memcpy()
528 return vchan_tx_prep(&c->vc, &ds->vd, flags); in k3_dma_prep_memcpy()
535 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_prep_slave_sg() local
545 c->cyclic = 0; in k3_dma_prep_slave_sg()
550 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; in k3_dma_prep_slave_sg()
557 k3_dma_config_write(chan, dir, &c->slave_config); in k3_dma_prep_slave_sg()
569 dst = c->dev_addr; in k3_dma_prep_slave_sg()
571 src = c->dev_addr; in k3_dma_prep_slave_sg()
575 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); in k3_dma_prep_slave_sg()
578 avail -= len; in k3_dma_prep_slave_sg()
582 ds->desc_hw[num-1].lli = 0; /* end of link */ in k3_dma_prep_slave_sg()
583 ds->size = total; in k3_dma_prep_slave_sg()
584 return vchan_tx_prep(&c->vc, &ds->vd, flags); in k3_dma_prep_slave_sg()
593 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_prep_dma_cyclic() local
601 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", in k3_dma_prep_dma_cyclic()
602 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, in k3_dma_prep_dma_cyclic()
607 num += DIV_ROUND_UP(avail, modulo) - 1; in k3_dma_prep_dma_cyclic()
613 c->cyclic = 1; in k3_dma_prep_dma_cyclic()
618 k3_dma_config_write(chan, dir, &c->slave_config); in k3_dma_prep_dma_cyclic()
628 dst = c->dev_addr; in k3_dma_prep_dma_cyclic()
630 src = c->dev_addr; in k3_dma_prep_dma_cyclic()
637 since -= period_len; in k3_dma_prep_dma_cyclic()
641 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); in k3_dma_prep_dma_cyclic()
644 avail -= len; in k3_dma_prep_dma_cyclic()
648 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; in k3_dma_prep_dma_cyclic()
650 ds->size = total; in k3_dma_prep_dma_cyclic()
652 return vchan_tx_prep(&c->vc, &ds->vd, flags); in k3_dma_prep_dma_cyclic()
658 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_config() local
660 memcpy(&c->slave_config, cfg, sizeof(*cfg)); in k3_dma_config()
669 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_config_write() local
674 c->ccfg = CX_CFG_DSTINCR; in k3_dma_config_write()
675 c->dev_addr = cfg->src_addr; in k3_dma_config_write()
676 maxburst = cfg->src_maxburst; in k3_dma_config_write()
677 width = cfg->src_addr_width; in k3_dma_config_write()
679 c->ccfg = CX_CFG_SRCINCR; in k3_dma_config_write()
680 c->dev_addr = cfg->dst_addr; in k3_dma_config_write()
681 maxburst = cfg->dst_maxburst; in k3_dma_config_write()
682 width = cfg->dst_addr_width; in k3_dma_config_write()
695 c->ccfg |= (val << 12) | (val << 16); in k3_dma_config_write()
700 val = maxburst - 1; in k3_dma_config_write()
701 c->ccfg |= (val << 20) | (val << 24); in k3_dma_config_write()
702 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; in k3_dma_config_write()
705 c->ccfg |= c->vc.chan.chan_id << 4; in k3_dma_config_write()
714 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); in k3_dma_free_desc()
716 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); in k3_dma_free_desc()
722 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_terminate_all() local
723 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_terminate_all()
724 struct k3_dma_phy *p = c->phy; in k3_dma_terminate_all()
728 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); in k3_dma_terminate_all()
731 spin_lock(&d->lock); in k3_dma_terminate_all()
732 list_del_init(&c->node); in k3_dma_terminate_all()
733 spin_unlock(&d->lock); in k3_dma_terminate_all()
736 spin_lock_irqsave(&c->vc.lock, flags); in k3_dma_terminate_all()
737 vchan_get_all_descriptors(&c->vc, &head); in k3_dma_terminate_all()
739 /* vchan is assigned to a pchan - stop the channel */ in k3_dma_terminate_all()
741 c->phy = NULL; in k3_dma_terminate_all()
742 p->vchan = NULL; in k3_dma_terminate_all()
743 if (p->ds_run) { in k3_dma_terminate_all()
744 vchan_terminate_vdesc(&p->ds_run->vd); in k3_dma_terminate_all()
745 p->ds_run = NULL; in k3_dma_terminate_all()
747 p->ds_done = NULL; in k3_dma_terminate_all()
749 spin_unlock_irqrestore(&c->vc.lock, flags); in k3_dma_terminate_all()
750 vchan_dma_desc_free_list(&c->vc, &head); in k3_dma_terminate_all()
757 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_synchronize() local
759 vchan_synchronize(&c->vc); in k3_dma_synchronize()
764 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_transfer_pause() local
765 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_transfer_pause()
766 struct k3_dma_phy *p = c->phy; in k3_dma_transfer_pause()
768 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); in k3_dma_transfer_pause()
769 if (c->status == DMA_IN_PROGRESS) { in k3_dma_transfer_pause()
770 c->status = DMA_PAUSED; in k3_dma_transfer_pause()
774 spin_lock(&d->lock); in k3_dma_transfer_pause()
775 list_del_init(&c->node); in k3_dma_transfer_pause()
776 spin_unlock(&d->lock); in k3_dma_transfer_pause()
785 struct k3_dma_chan *c = to_k3_chan(chan); in k3_dma_transfer_resume() local
786 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_transfer_resume()
787 struct k3_dma_phy *p = c->phy; in k3_dma_transfer_resume()
790 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); in k3_dma_transfer_resume()
791 spin_lock_irqsave(&c->vc.lock, flags); in k3_dma_transfer_resume()
792 if (c->status == DMA_PAUSED) { in k3_dma_transfer_resume()
793 c->status = DMA_IN_PROGRESS; in k3_dma_transfer_resume()
796 } else if (!list_empty(&c->vc.desc_issued)) { in k3_dma_transfer_resume()
797 spin_lock(&d->lock); in k3_dma_transfer_resume()
798 list_add_tail(&c->node, &d->chan_pending); in k3_dma_transfer_resume()
799 spin_unlock(&d->lock); in k3_dma_transfer_resume()
802 spin_unlock_irqrestore(&c->vc.lock, flags); in k3_dma_transfer_resume()
816 { .compatible = "hisilicon,k3-dma-1.0",
819 { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
829 struct k3_dma_dev *d = ofdma->of_dma_data; in k3_of_dma_simple_xlate()
830 unsigned int request = dma_spec->args[0]; in k3_of_dma_simple_xlate()
832 if (request >= d->dma_requests) in k3_of_dma_simple_xlate()
835 return dma_get_slave_channel(&(d->chans[request].vc.chan)); in k3_of_dma_simple_xlate()
845 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); in k3_dma_probe()
847 return -ENOMEM; in k3_dma_probe()
849 soc_data = device_get_match_data(&op->dev); in k3_dma_probe()
851 return -EINVAL; in k3_dma_probe()
853 d->base = devm_platform_ioremap_resource(op, 0); in k3_dma_probe()
854 if (IS_ERR(d->base)) in k3_dma_probe()
855 return PTR_ERR(d->base); in k3_dma_probe()
857 of_id = of_match_device(k3_pdma_dt_ids, &op->dev); in k3_dma_probe()
859 of_property_read_u32((&op->dev)->of_node, in k3_dma_probe()
860 "dma-channels", &d->dma_channels); in k3_dma_probe()
861 of_property_read_u32((&op->dev)->of_node, in k3_dma_probe()
862 "dma-requests", &d->dma_requests); in k3_dma_probe()
863 ret = of_property_read_u32((&op->dev)->of_node, in k3_dma_probe()
864 "dma-channel-mask", &d->dma_channel_mask); in k3_dma_probe()
866 dev_warn(&op->dev, in k3_dma_probe()
867 "dma-channel-mask doesn't exist, considering all as available.\n"); in k3_dma_probe()
868 d->dma_channel_mask = (u32)~0UL; in k3_dma_probe()
872 if (!(soc_data->flags & K3_FLAG_NOCLK)) { in k3_dma_probe()
873 d->clk = devm_clk_get(&op->dev, NULL); in k3_dma_probe()
874 if (IS_ERR(d->clk)) { in k3_dma_probe()
875 dev_err(&op->dev, "no dma clk\n"); in k3_dma_probe()
876 return PTR_ERR(d->clk); in k3_dma_probe()
881 ret = devm_request_irq(&op->dev, irq, in k3_dma_probe()
886 d->irq = irq; in k3_dma_probe()
888 /* A DMA memory pool for LLIs, align on 32-byte boundary */ in k3_dma_probe()
889 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, in k3_dma_probe()
891 if (!d->pool) in k3_dma_probe()
892 return -ENOMEM; in k3_dma_probe()
894 /* init phy channel */ in k3_dma_probe()
895 d->phy = devm_kcalloc(&op->dev, in k3_dma_probe()
896 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL); in k3_dma_probe()
897 if (d->phy == NULL) in k3_dma_probe()
898 return -ENOMEM; in k3_dma_probe()
900 for (i = 0; i < d->dma_channels; i++) { in k3_dma_probe()
903 if (!(d->dma_channel_mask & BIT(i))) in k3_dma_probe()
906 p = &d->phy[i]; in k3_dma_probe()
907 p->idx = i; in k3_dma_probe()
908 p->base = d->base + i * 0x40; in k3_dma_probe()
911 INIT_LIST_HEAD(&d->slave.channels); in k3_dma_probe()
912 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); in k3_dma_probe()
913 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); in k3_dma_probe()
914 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); in k3_dma_probe()
915 d->slave.dev = &op->dev; in k3_dma_probe()
916 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; in k3_dma_probe()
917 d->slave.device_tx_status = k3_dma_tx_status; in k3_dma_probe()
918 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; in k3_dma_probe()
919 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; in k3_dma_probe()
920 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; in k3_dma_probe()
921 d->slave.device_issue_pending = k3_dma_issue_pending; in k3_dma_probe()
922 d->slave.device_config = k3_dma_config; in k3_dma_probe()
923 d->slave.device_pause = k3_dma_transfer_pause; in k3_dma_probe()
924 d->slave.device_resume = k3_dma_transfer_resume; in k3_dma_probe()
925 d->slave.device_terminate_all = k3_dma_terminate_all; in k3_dma_probe()
926 d->slave.device_synchronize = k3_dma_synchronize; in k3_dma_probe()
927 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; in k3_dma_probe()
930 d->chans = devm_kcalloc(&op->dev, in k3_dma_probe()
931 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL); in k3_dma_probe()
932 if (d->chans == NULL) in k3_dma_probe()
933 return -ENOMEM; in k3_dma_probe()
935 for (i = 0; i < d->dma_requests; i++) { in k3_dma_probe()
936 struct k3_dma_chan *c = &d->chans[i]; in k3_dma_probe() local
938 c->status = DMA_IN_PROGRESS; in k3_dma_probe()
939 INIT_LIST_HEAD(&c->node); in k3_dma_probe()
940 c->vc.desc_free = k3_dma_free_desc; in k3_dma_probe()
941 vchan_init(&c->vc, &d->slave); in k3_dma_probe()
945 ret = clk_prepare_enable(d->clk); in k3_dma_probe()
947 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); in k3_dma_probe()
953 ret = dma_async_device_register(&d->slave); in k3_dma_probe()
957 ret = of_dma_controller_register((&op->dev)->of_node, in k3_dma_probe()
962 spin_lock_init(&d->lock); in k3_dma_probe()
963 INIT_LIST_HEAD(&d->chan_pending); in k3_dma_probe()
964 tasklet_setup(&d->task, k3_dma_tasklet); in k3_dma_probe()
966 dev_info(&op->dev, "initialized\n"); in k3_dma_probe()
971 dma_async_device_unregister(&d->slave); in k3_dma_probe()
973 clk_disable_unprepare(d->clk); in k3_dma_probe()
979 struct k3_dma_chan *c, *cn; in k3_dma_remove() local
982 dma_async_device_unregister(&d->slave); in k3_dma_remove()
983 of_dma_controller_free((&op->dev)->of_node); in k3_dma_remove()
985 devm_free_irq(&op->dev, d->irq, d); in k3_dma_remove()
987 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { in k3_dma_remove()
988 list_del(&c->vc.chan.device_node); in k3_dma_remove()
989 tasklet_kill(&c->vc.task); in k3_dma_remove()
991 tasklet_kill(&d->task); in k3_dma_remove()
992 clk_disable_unprepare(d->clk); in k3_dma_remove()
1004 dev_warn(d->slave.dev, in k3_dma_suspend_dev()
1006 return -1; in k3_dma_suspend_dev()
1009 clk_disable_unprepare(d->clk); in k3_dma_suspend_dev()
1018 ret = clk_prepare_enable(d->clk); in k3_dma_resume_dev()
1020 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); in k3_dma_resume_dev()