Lines Matching +full:d +full:- +full:phy

1 // SPDX-License-Identifier: GPL-2.0-only
8 #include <linux/dma-mapping.h>
22 #include "virt-dma.h"
24 #define DRIVER_NAME "zx-dma"
26 #define DMA_MAX_SIZE (0x10000 - 512)
99 int id; /* Request phy chan id */
103 struct zx_dma_phy *phy; member
120 spinlock_t lock; /* lock for ch and phy */
122 struct zx_dma_phy *phy; member
138 static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d) in zx_dma_terminate_chan() argument
142 val = readl_relaxed(phy->base + REG_ZX_CTRL); in zx_dma_terminate_chan()
145 writel_relaxed(val, phy->base + REG_ZX_CTRL); in zx_dma_terminate_chan()
147 val = 0x1 << phy->idx; in zx_dma_terminate_chan()
148 writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_terminate_chan()
149 writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_terminate_chan()
150 writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_terminate_chan()
151 writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_terminate_chan()
154 static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw) in zx_dma_set_desc() argument
156 writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); in zx_dma_set_desc()
157 writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); in zx_dma_set_desc()
158 writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); in zx_dma_set_desc()
159 writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); in zx_dma_set_desc()
160 writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); in zx_dma_set_desc()
161 writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); in zx_dma_set_desc()
162 writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); in zx_dma_set_desc()
163 writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); in zx_dma_set_desc()
166 static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy) in zx_dma_get_curr_lli() argument
168 return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); in zx_dma_get_curr_lli()
171 static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d) in zx_dma_get_chan_stat() argument
173 return readl_relaxed(d->base + REG_ZX_STATUS); in zx_dma_get_chan_stat()
176 static void zx_dma_init_state(struct zx_dma_dev *d) in zx_dma_init_state() argument
179 writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); in zx_dma_init_state()
181 writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_init_state()
182 writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_init_state()
183 writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_init_state()
184 writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_init_state()
189 struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); in zx_dma_start_txd() local
190 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); in zx_dma_start_txd()
192 if (!c->phy) in zx_dma_start_txd()
193 return -EAGAIN; in zx_dma_start_txd()
195 if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) in zx_dma_start_txd()
196 return -EAGAIN; in zx_dma_start_txd()
202 * fetch and remove request from vc->desc_issued in zx_dma_start_txd()
203 * so vc->desc_issued only contains desc pending in zx_dma_start_txd()
205 list_del(&ds->vd.node); in zx_dma_start_txd()
206 c->phy->ds_run = ds; in zx_dma_start_txd()
207 c->phy->ds_done = NULL; in zx_dma_start_txd()
209 zx_dma_set_desc(c->phy, ds->desc_hw); in zx_dma_start_txd()
212 c->phy->ds_done = NULL; in zx_dma_start_txd()
213 c->phy->ds_run = NULL; in zx_dma_start_txd()
214 return -EAGAIN; in zx_dma_start_txd()
217 static void zx_dma_task(struct zx_dma_dev *d) in zx_dma_task() argument
224 /* check new dma request of running channel in vc->desc_issued */ in zx_dma_task()
225 list_for_each_entry_safe(c, cn, &d->slave.channels, in zx_dma_task()
227 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_task()
228 p = c->phy; in zx_dma_task()
229 if (p && p->ds_done && zx_dma_start_txd(c)) { in zx_dma_task()
231 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); in zx_dma_task()
233 c->phy = NULL; in zx_dma_task()
234 p->vchan = NULL; in zx_dma_task()
236 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_task()
239 /* check new channel request in d->chan_pending */ in zx_dma_task()
240 spin_lock_irqsave(&d->lock, flags); in zx_dma_task()
241 while (!list_empty(&d->chan_pending)) { in zx_dma_task()
242 c = list_first_entry(&d->chan_pending, in zx_dma_task()
244 p = &d->phy[c->id]; in zx_dma_task()
245 if (!p->vchan) { in zx_dma_task()
246 /* remove from d->chan_pending */ in zx_dma_task()
247 list_del_init(&c->node); in zx_dma_task()
248 pch_alloc |= 1 << c->id; in zx_dma_task()
250 p->vchan = c; in zx_dma_task()
251 c->phy = p; in zx_dma_task()
253 dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); in zx_dma_task()
256 spin_unlock_irqrestore(&d->lock, flags); in zx_dma_task()
258 for (pch = 0; pch < d->dma_channels; pch++) { in zx_dma_task()
260 p = &d->phy[pch]; in zx_dma_task()
261 c = p->vchan; in zx_dma_task()
263 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_task()
265 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_task()
273 struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id; in zx_dma_int_handler() local
276 u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); in zx_dma_int_handler()
277 u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); in zx_dma_int_handler()
278 u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); in zx_dma_int_handler()
279 u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); in zx_dma_int_handler()
285 p = &d->phy[i]; in zx_dma_int_handler()
286 c = p->vchan; in zx_dma_int_handler()
288 spin_lock(&c->vc.lock); in zx_dma_int_handler()
289 if (c->cyclic) { in zx_dma_int_handler()
290 vchan_cyclic_callback(&p->ds_run->vd); in zx_dma_int_handler()
292 vchan_cookie_complete(&p->ds_run->vd); in zx_dma_int_handler()
293 p->ds_done = p->ds_run; in zx_dma_int_handler()
296 spin_unlock(&c->vc.lock); in zx_dma_int_handler()
302 dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", in zx_dma_int_handler()
305 writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_int_handler()
306 writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_int_handler()
307 writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_int_handler()
308 writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_int_handler()
311 zx_dma_task(d); in zx_dma_int_handler()
318 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_free_chan_resources() local
321 spin_lock_irqsave(&d->lock, flags); in zx_dma_free_chan_resources()
322 list_del_init(&c->node); in zx_dma_free_chan_resources()
323 spin_unlock_irqrestore(&d->lock, flags); in zx_dma_free_chan_resources()
325 vchan_free_chan_resources(&c->vc); in zx_dma_free_chan_resources()
326 c->ccfg = 0; in zx_dma_free_chan_resources()
340 ret = dma_cookie_status(&c->vc.chan, cookie, state); in zx_dma_tx_status()
344 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_tx_status()
345 p = c->phy; in zx_dma_tx_status()
346 ret = c->status; in zx_dma_tx_status()
352 vd = vchan_find_desc(&c->vc, cookie); in zx_dma_tx_status()
354 bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; in zx_dma_tx_status()
355 } else if ((!p) || (!p->ds_run)) { in zx_dma_tx_status()
358 struct zx_dma_desc_sw *ds = p->ds_run; in zx_dma_tx_status()
363 index = (clli - ds->desc_hw_lli) / in zx_dma_tx_status()
365 for (; index < ds->desc_num; index++) { in zx_dma_tx_status()
366 bytes += ds->desc_hw[index].src_x; in zx_dma_tx_status()
368 if (!ds->desc_hw[index].lli) in zx_dma_tx_status()
372 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_tx_status()
380 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_issue_pending() local
384 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_issue_pending()
385 /* add request to vc->desc_issued */ in zx_dma_issue_pending()
386 if (vchan_issue_pending(&c->vc)) { in zx_dma_issue_pending()
387 spin_lock(&d->lock); in zx_dma_issue_pending()
388 if (!c->phy && list_empty(&c->node)) { in zx_dma_issue_pending()
390 list_add_tail(&c->node, &d->chan_pending); in zx_dma_issue_pending()
392 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); in zx_dma_issue_pending()
394 spin_unlock(&d->lock); in zx_dma_issue_pending()
396 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); in zx_dma_issue_pending()
398 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_issue_pending()
401 zx_dma_task(d); in zx_dma_issue_pending()
407 if ((num + 1) < ds->desc_num) in zx_dma_fill_desc()
408 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * in zx_dma_fill_desc()
410 ds->desc_hw[num].saddr = src; in zx_dma_fill_desc()
411 ds->desc_hw[num].daddr = dst; in zx_dma_fill_desc()
412 ds->desc_hw[num].src_x = len; in zx_dma_fill_desc()
413 ds->desc_hw[num].ctr = ccfg; in zx_dma_fill_desc()
421 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_alloc_desc_resource() local
425 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", in zx_alloc_desc_resource()
426 &c->vc, num, lli_limit); in zx_alloc_desc_resource()
434 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); in zx_alloc_desc_resource()
435 if (!ds->desc_hw) { in zx_alloc_desc_resource()
436 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); in zx_alloc_desc_resource()
440 ds->desc_num = num; in zx_alloc_desc_resource()
451 return ffs(width) - 1; in zx_dma_burst_width()
459 struct dma_slave_config *cfg = &c->slave_cfg; in zx_pre_config()
466 c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ in zx_pre_config()
467 | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) in zx_pre_config()
472 c->dev_addr = cfg->dst_addr; in zx_pre_config()
478 dst_width = zx_dma_burst_width(cfg->dst_addr_width); in zx_pre_config()
479 maxburst = cfg->dst_maxburst; in zx_pre_config()
482 c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE in zx_pre_config()
483 | ZX_SRC_BURST_LEN(maxburst - 1) in zx_pre_config()
488 c->dev_addr = cfg->src_addr; in zx_pre_config()
489 src_width = zx_dma_burst_width(cfg->src_addr_width); in zx_pre_config()
490 maxburst = cfg->src_maxburst; in zx_pre_config()
493 c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE in zx_pre_config()
494 | ZX_SRC_BURST_LEN(maxburst - 1) in zx_pre_config()
499 return -EINVAL; in zx_pre_config()
525 ds->size = len; in zx_dma_prep_memcpy()
530 zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); in zx_dma_prep_memcpy()
534 len -= copy; in zx_dma_prep_memcpy()
537 c->cyclic = 0; in zx_dma_prep_memcpy()
538 ds->desc_hw[num - 1].lli = 0; /* end of link */ in zx_dma_prep_memcpy()
539 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; in zx_dma_prep_memcpy()
540 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_memcpy()
563 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; in zx_dma_prep_slave_sg()
570 c->cyclic = 0; in zx_dma_prep_slave_sg()
582 dst = c->dev_addr; in zx_dma_prep_slave_sg()
584 src = c->dev_addr; in zx_dma_prep_slave_sg()
588 zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); in zx_dma_prep_slave_sg()
591 avail -= len; in zx_dma_prep_slave_sg()
595 ds->desc_hw[num - 1].lli = 0; /* end of link */ in zx_dma_prep_slave_sg()
596 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; in zx_dma_prep_slave_sg()
597 ds->size = total; in zx_dma_prep_slave_sg()
598 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_slave_sg()
613 dev_err(chan->device->dev, "maximum period size exceeded\n"); in zx_dma_prep_dma_cyclic()
623 c->cyclic = 1; in zx_dma_prep_dma_cyclic()
628 dst = c->dev_addr; in zx_dma_prep_dma_cyclic()
630 src = c->dev_addr; in zx_dma_prep_dma_cyclic()
634 c->ccfg | ZX_IRQ_ENABLE_ALL); in zx_dma_prep_dma_cyclic()
639 ds->desc_hw[num - 1].lli = ds->desc_hw_lli; in zx_dma_prep_dma_cyclic()
640 ds->size = buf_len; in zx_dma_prep_dma_cyclic()
641 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_dma_cyclic()
650 return -EINVAL; in zx_dma_config()
652 memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); in zx_dma_config()
660 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_terminate_all() local
661 struct zx_dma_phy *p = c->phy; in zx_dma_terminate_all()
665 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); in zx_dma_terminate_all()
668 spin_lock(&d->lock); in zx_dma_terminate_all()
669 list_del_init(&c->node); in zx_dma_terminate_all()
670 spin_unlock(&d->lock); in zx_dma_terminate_all()
673 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_terminate_all()
674 vchan_get_all_descriptors(&c->vc, &head); in zx_dma_terminate_all()
676 /* vchan is assigned to a pchan - stop the channel */ in zx_dma_terminate_all()
677 zx_dma_terminate_chan(p, d); in zx_dma_terminate_all()
678 c->phy = NULL; in zx_dma_terminate_all()
679 p->vchan = NULL; in zx_dma_terminate_all()
680 p->ds_run = NULL; in zx_dma_terminate_all()
681 p->ds_done = NULL; in zx_dma_terminate_all()
683 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_terminate_all()
684 vchan_dma_desc_free_list(&c->vc, &head); in zx_dma_terminate_all()
694 val = readl_relaxed(c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_pause()
696 writel_relaxed(val, c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_pause()
706 val = readl_relaxed(c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_resume()
708 writel_relaxed(val, c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_resume()
717 struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); in zx_dma_free_desc() local
719 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); in zx_dma_free_desc()
724 { .compatible = "zte,zx296702-dma", },
732 struct zx_dma_dev *d = ofdma->of_dma_data; in zx_of_dma_simple_xlate() local
733 unsigned int request = dma_spec->args[0]; in zx_of_dma_simple_xlate()
737 if (request >= d->dma_requests) in zx_of_dma_simple_xlate()
740 chan = dma_get_any_slave_channel(&d->slave); in zx_of_dma_simple_xlate()
742 dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); in zx_of_dma_simple_xlate()
746 c->id = request; in zx_of_dma_simple_xlate()
747 dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", in zx_of_dma_simple_xlate()
748 c->id, &c->vc); in zx_of_dma_simple_xlate()
754 struct zx_dma_dev *d; in zx_dma_probe() local
757 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); in zx_dma_probe()
758 if (!d) in zx_dma_probe()
759 return -ENOMEM; in zx_dma_probe()
761 d->base = devm_platform_ioremap_resource(op, 0); in zx_dma_probe()
762 if (IS_ERR(d->base)) in zx_dma_probe()
763 return PTR_ERR(d->base); in zx_dma_probe()
765 of_property_read_u32((&op->dev)->of_node, in zx_dma_probe()
766 "dma-channels", &d->dma_channels); in zx_dma_probe()
767 of_property_read_u32((&op->dev)->of_node, in zx_dma_probe()
768 "dma-requests", &d->dma_requests); in zx_dma_probe()
769 if (!d->dma_requests || !d->dma_channels) in zx_dma_probe()
770 return -EINVAL; in zx_dma_probe()
772 d->clk = devm_clk_get(&op->dev, NULL); in zx_dma_probe()
773 if (IS_ERR(d->clk)) { in zx_dma_probe()
774 dev_err(&op->dev, "no dma clk\n"); in zx_dma_probe()
775 return PTR_ERR(d->clk); in zx_dma_probe()
778 d->irq = platform_get_irq(op, 0); in zx_dma_probe()
779 ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, in zx_dma_probe()
780 0, DRIVER_NAME, d); in zx_dma_probe()
784 /* A DMA memory pool for LLIs, align on 32-byte boundary */ in zx_dma_probe()
785 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, in zx_dma_probe()
787 if (!d->pool) in zx_dma_probe()
788 return -ENOMEM; in zx_dma_probe()
790 /* init phy channel */ in zx_dma_probe()
791 d->phy = devm_kcalloc(&op->dev, in zx_dma_probe()
792 d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL); in zx_dma_probe()
793 if (!d->phy) in zx_dma_probe()
794 return -ENOMEM; in zx_dma_probe()
796 for (i = 0; i < d->dma_channels; i++) { in zx_dma_probe()
797 struct zx_dma_phy *p = &d->phy[i]; in zx_dma_probe()
799 p->idx = i; in zx_dma_probe()
800 p->base = d->base + i * 0x40; in zx_dma_probe()
803 INIT_LIST_HEAD(&d->slave.channels); in zx_dma_probe()
804 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); in zx_dma_probe()
805 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); in zx_dma_probe()
806 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); in zx_dma_probe()
807 dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); in zx_dma_probe()
808 d->slave.dev = &op->dev; in zx_dma_probe()
809 d->slave.device_free_chan_resources = zx_dma_free_chan_resources; in zx_dma_probe()
810 d->slave.device_tx_status = zx_dma_tx_status; in zx_dma_probe()
811 d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; in zx_dma_probe()
812 d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; in zx_dma_probe()
813 d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; in zx_dma_probe()
814 d->slave.device_issue_pending = zx_dma_issue_pending; in zx_dma_probe()
815 d->slave.device_config = zx_dma_config; in zx_dma_probe()
816 d->slave.device_terminate_all = zx_dma_terminate_all; in zx_dma_probe()
817 d->slave.device_pause = zx_dma_transfer_pause; in zx_dma_probe()
818 d->slave.device_resume = zx_dma_transfer_resume; in zx_dma_probe()
819 d->slave.copy_align = DMA_ALIGN; in zx_dma_probe()
820 d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; in zx_dma_probe()
821 d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; in zx_dma_probe()
822 d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) in zx_dma_probe()
824 d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; in zx_dma_probe()
827 d->chans = devm_kcalloc(&op->dev, in zx_dma_probe()
828 d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL); in zx_dma_probe()
829 if (!d->chans) in zx_dma_probe()
830 return -ENOMEM; in zx_dma_probe()
832 for (i = 0; i < d->dma_requests; i++) { in zx_dma_probe()
833 struct zx_dma_chan *c = &d->chans[i]; in zx_dma_probe()
835 c->status = DMA_IN_PROGRESS; in zx_dma_probe()
836 INIT_LIST_HEAD(&c->node); in zx_dma_probe()
837 c->vc.desc_free = zx_dma_free_desc; in zx_dma_probe()
838 vchan_init(&c->vc, &d->slave); in zx_dma_probe()
842 ret = clk_prepare_enable(d->clk); in zx_dma_probe()
844 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); in zx_dma_probe()
848 zx_dma_init_state(d); in zx_dma_probe()
850 spin_lock_init(&d->lock); in zx_dma_probe()
851 INIT_LIST_HEAD(&d->chan_pending); in zx_dma_probe()
852 platform_set_drvdata(op, d); in zx_dma_probe()
854 ret = dma_async_device_register(&d->slave); in zx_dma_probe()
858 ret = of_dma_controller_register((&op->dev)->of_node, in zx_dma_probe()
859 zx_of_dma_simple_xlate, d); in zx_dma_probe()
863 dev_info(&op->dev, "initialized\n"); in zx_dma_probe()
867 dma_async_device_unregister(&d->slave); in zx_dma_probe()
869 clk_disable_unprepare(d->clk); in zx_dma_probe()
877 struct zx_dma_dev *d = platform_get_drvdata(op); in zx_dma_remove() local
880 devm_free_irq(&op->dev, d->irq, d); in zx_dma_remove()
882 dma_async_device_unregister(&d->slave); in zx_dma_remove()
883 of_dma_controller_free((&op->dev)->of_node); in zx_dma_remove()
885 list_for_each_entry_safe(c, cn, &d->slave.channels, in zx_dma_remove()
887 list_del(&c->vc.chan.device_node); in zx_dma_remove()
889 clk_disable_unprepare(d->clk); in zx_dma_remove()
897 struct zx_dma_dev *d = dev_get_drvdata(dev); in zx_dma_suspend_dev() local
900 stat = zx_dma_get_chan_stat(d); in zx_dma_suspend_dev()
902 dev_warn(d->slave.dev, in zx_dma_suspend_dev()
903 "chan %d is running fail to suspend\n", stat); in zx_dma_suspend_dev()
904 return -1; in zx_dma_suspend_dev()
906 clk_disable_unprepare(d->clk); in zx_dma_suspend_dev()
912 struct zx_dma_dev *d = dev_get_drvdata(dev); in zx_dma_resume_dev() local
915 ret = clk_prepare_enable(d->clk); in zx_dma_resume_dev()
917 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); in zx_dma_resume_dev()
920 zx_dma_init_state(d); in zx_dma_resume_dev()