Lines Matching +full:pd +full:- +full:disable
4 * SPDX-License-Identifier: Apache-2.0
33 return ring->pkt_id = 0x0; in reset_pkt_id()
41 ring->pkt_id = (ring->pkt_id + 1) % 32; in alloc_pkt_id()
42 return ring->pkt_id; in alloc_pkt_id()
47 return ring->pkt_id; in curr_pkt_id()
52 return ring->curr.toggle; in curr_toggle_val()
63 r->opq = opq; in rm_write_header_desc()
65 r->bdcount = bdcount; in rm_write_header_desc()
66 r->prot = 0x0; in rm_write_header_desc()
68 r->start = 1; in rm_write_header_desc()
69 r->end = 1; in rm_write_header_desc()
70 r->toggle = toggle; in rm_write_header_desc()
72 r->type = PAX_DMA_TYPE_RM_HEADER; in rm_write_header_desc()
84 rm_write_header_desc(desc, (r->curr.toggle == 0) ? 1 : 0, opq, bdcount); in rm_write_header_next_desc()
92 r->bdcount = bdcount; in rm_header_set_bd_count()
99 r->toggle = toggle; in rm_header_set_toggle()
110 hdr->length = pl->xfer_sz; in rm_write_dma_header_desc()
111 hdr->opcode = pl->direction; in rm_write_dma_header_desc()
113 hdr->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_dma_header_desc()
124 axi->axi_addr = pl->axi_addr; in rm_write_axi_addr_desc()
125 axi->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_axi_addr_desc()
137 pci->pcie_addr = pl->pci_addr >> PAX_DMA_PCI_ADDR_ALIGNMT_SHIFT; in rm_write_pci_addr_desc()
138 pci->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_pci_addr_desc()
150 curr = (uintptr_t)ring->curr.write_ptr + PAX_DMA_RM_DESC_BDWIDTH; in next_desc_addr()
153 if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) { in next_desc_addr()
155 curr, nxt->toggle, (uintptr_t)nxt->addr); in next_desc_addr()
156 uintptr_t last = (uintptr_t)ring->bd + in next_desc_addr()
158 ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0; in next_desc_addr()
162 curr = (uintptr_t)ring->bd; in next_desc_addr()
167 ring->curr.write_ptr = (void *)curr; in next_desc_addr()
179 nxt->addr = (uintptr_t)next_ptr; in rm_write_next_table_desc()
180 nxt->type = PAX_DMA_TYPE_NEXT_PTR; in rm_write_next_table_desc()
181 nxt->toggle = toggle; in rm_write_next_table_desc()
191 memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); in prepare_ring()
192 memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE); in prepare_ring()
195 rm_write_header_desc(ring->bd, 0x0, reset_pkt_id(ring), in prepare_ring()
199 curr = (uintptr_t)ring->bd; in prepare_ring()
213 next = (uintptr_t)ring->bd; in prepare_ring()
216 } while (--buff_count); in prepare_ring()
221 ring->curr.write_ptr = ring->bd; in prepare_ring()
223 ring->curr.toggle = 1; in prepare_ring()
225 ring->curr.cmpl_rd_offs = 0; in prepare_ring()
228 ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE; in prepare_ring()
229 ring->curr.sync_data.ring = ring->idx; in prepare_ring()
231 ring->curr.sync_data.opaque = 0x0; in prepare_ring()
233 ring->curr.sync_data.total_pkts = 0x0; in prepare_ring()
236 static int init_rm(struct dma_iproc_pax_data *pd) in init_rm() argument
238 int ret = -ETIMEDOUT, timeout = 1000; in init_rm()
240 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_rm()
244 if ((sys_read32(RM_COMM_REG(pd, RM_COMM_MAIN_HW_INIT_DONE)) & in init_rm()
250 } while (--timeout); in init_rm()
251 k_mutex_unlock(&pd->dma_lock); in init_rm()
262 static void rm_cfg_start(struct dma_iproc_pax_data *pd) in rm_cfg_start() argument
266 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_start()
269 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
271 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
275 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
277 /* Disable MSI */ in rm_cfg_start()
279 RM_COMM_REG(pd, RM_COMM_MSI_DISABLE)); in rm_cfg_start()
281 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
283 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
286 sys_write32(RM_COMM_AE_TIMEOUT_VAL, RM_COMM_REG(pd, in rm_cfg_start()
288 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
290 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_start()
293 val = sys_read32(RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); in rm_cfg_start()
295 sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); in rm_cfg_start()
297 sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); in rm_cfg_start()
300 val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); in rm_cfg_start()
302 sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); in rm_cfg_start()
306 RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_0)); in rm_cfg_start()
308 RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_1)); in rm_cfg_start()
310 RM_COMM_REG(pd, RM_COMM_RM_BURST_LENGTH)); in rm_cfg_start()
313 val = sys_read32(RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); in rm_cfg_start()
315 sys_write32(val, RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); in rm_cfg_start()
317 k_mutex_unlock(&pd->dma_lock); in rm_cfg_start()
320 static void rm_ring_clear_stats(struct dma_iproc_pax_data *pd, in rm_ring_clear_stats() argument
324 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_LS)); in rm_ring_clear_stats()
325 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_MS)); in rm_ring_clear_stats()
326 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_LS)); in rm_ring_clear_stats()
327 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_MS)); in rm_ring_clear_stats()
328 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND)); in rm_ring_clear_stats()
331 static void rm_cfg_finish(struct dma_iproc_pax_data *pd) in rm_cfg_finish() argument
335 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_finish()
338 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_finish()
340 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); in rm_cfg_finish()
342 k_mutex_unlock(&pd->dma_lock); in rm_cfg_finish()
346 static inline void set_ring_active(struct dma_iproc_pax_data *pd, in set_ring_active() argument
352 val = sys_read32(RM_RING_REG(pd, idx, RING_CONTROL)); in set_ring_active()
358 sys_write32(val, RM_RING_REG(pd, idx, RING_CONTROL)); in set_ring_active()
361 static int init_ring(struct dma_iproc_pax_data *pd, enum ring_idx idx) in init_ring() argument
364 uintptr_t desc = (uintptr_t)pd->ring[idx].bd; in init_ring()
365 uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl; in init_ring()
368 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_ring()
371 sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); in init_ring()
374 sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); in init_ring()
377 sys_write32(RING_CONTROL_FLUSH, RM_RING_REG(pd, idx, RING_CONTROL)); in init_ring()
379 if (sys_read32(RM_RING_REG(pd, idx, RING_FLUSH_DONE)) & in init_ring()
384 } while (--timeout); in init_ring()
388 ret = -ETIMEDOUT; in init_ring()
393 sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); in init_ring()
396 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); in init_ring()
398 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); in init_ring()
405 sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); in init_ring()
408 sys_write32(val, RM_RING_REG(pd, idx, RING_BD_START_ADDR)); in init_ring()
410 sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_START_ADDR)); in init_ring()
411 val = sys_read32(RM_RING_REG(pd, idx, RING_BD_READ_PTR)); in init_ring()
414 set_ring_active(pd, idx, false); in init_ring()
415 rm_ring_clear_stats(pd, idx); in init_ring()
417 k_mutex_unlock(&pd->dma_lock); in init_ring()
425 const struct dma_iproc_pax_cfg *cfg = dev->config; in poll_on_write_sync()
433 sent = &(ring->curr.sync_data); in poll_on_write_sync()
436 pci32[0] = ring->sync_pci.addr_lo; in poll_on_write_sync()
437 pci32[1] = ring->sync_pci.addr_hi; in poll_on_write_sync()
441 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
447 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
456 } while (--timeout); in poll_on_write_sync()
459 LOG_DBG("[ring %d]: not recvd write sync!\n", ring->idx); in poll_on_write_sync()
460 ret = -ETIMEDOUT; in poll_on_write_sync()
469 struct dma_iproc_pax_data *pd = dev->data; in process_cmpl_event() local
471 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in process_cmpl_event()
476 rd_offs = ring->curr.cmpl_rd_offs; in process_cmpl_event()
478 wr_offs = sys_read32(RM_RING_REG(pd, idx, in process_cmpl_event()
482 ring->curr.cmpl_rd_offs = wr_offs; in process_cmpl_event()
495 c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl + in process_cmpl_event()
499 idx, wr_offs, c->opq, c->rm_status, c->dma_status); in process_cmpl_event()
501 is_outstanding = sys_read32(RM_RING_REG(pd, idx, in process_cmpl_event()
503 if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) { in process_cmpl_event()
505 idx, ring->curr.opq, c->opq, is_outstanding); in process_cmpl_event()
506 ret = -EIO; in process_cmpl_event()
509 if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) { in process_cmpl_event()
511 idx, wr_offs, c->rm_status); in process_cmpl_event()
514 ret = -ETIMEDOUT; in process_cmpl_event()
517 if (ring->dma_callback) { in process_cmpl_event()
518 ring->dma_callback(dev, ring->callback_arg, idx, ret); in process_cmpl_event()
528 struct dma_iproc_pax_data *pd = dev->data; in peek_ring_cmpl() local
530 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in peek_ring_cmpl()
533 rd_offs = ring->curr.cmpl_rd_offs; in peek_ring_cmpl()
537 wr_offs = sys_read32(RM_RING_REG(pd, idx, in peek_ring_cmpl()
543 } while (--timeout); in peek_ring_cmpl()
550 return -ETIMEDOUT; in peek_ring_cmpl()
559 struct dma_iproc_pax_data *pd = dev->data; in rm_isr() local
562 status = sys_read32(RM_COMM_REG(pd, RM_COMM_MSI_INTR_INTERRUPT_STATUS)); in rm_isr()
563 sys_write32(status, RM_COMM_REG(pd, in rm_isr()
567 err_stat = sys_read32(RM_COMM_REG(pd, in rm_isr()
570 RM_COMM_REG(pd, RM_COMM_DME_INTERRUPT_STATUS_CLEAR)); in rm_isr()
572 sys_read32(RM_COMM_REG(pd, in rm_isr()
575 RM_COMM_REG(pd, in rm_isr()
581 k_sem_give(&pd->ring[idx].alert); in rm_isr()
589 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_init()
590 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_init() local
594 if (!device_is_ready(cfg->pcie_dev)) { in dma_iproc_pax_init()
596 return -ENODEV; in dma_iproc_pax_init()
599 pd->dma_base = cfg->dma_base; in dma_iproc_pax_init()
600 pd->rm_comm_base = cfg->rm_comm_base; in dma_iproc_pax_init()
601 pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ? in dma_iproc_pax_init()
602 cfg->use_rings : PAX_DMA_RINGS_MAX; in dma_iproc_pax_init()
605 pd->dma_base, pd->rm_comm_base, pd->used_rings); in dma_iproc_pax_init()
608 k_mutex_init(&pd->dma_lock); in dma_iproc_pax_init()
611 if (init_rm(pd)) { in dma_iproc_pax_init()
612 return -ETIMEDOUT; in dma_iproc_pax_init()
616 rm_cfg_start(pd); in dma_iproc_pax_init()
619 for (r = 0; r < pd->used_rings; r++) { in dma_iproc_pax_init()
620 /* per-ring mutex lock */ in dma_iproc_pax_init()
621 k_mutex_init(&pd->ring[r].lock); in dma_iproc_pax_init()
623 k_sem_init(&pd->ring[r].alert, 0, 1); in dma_iproc_pax_init()
625 pd->ring[r].idx = r; in dma_iproc_pax_init()
626 pd->ring[r].ring_base = cfg->rm_base + in dma_iproc_pax_init()
628 LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx, in dma_iproc_pax_init()
629 sys_read32(RM_RING_REG(pd, r, RING_VER))); in dma_iproc_pax_init()
632 pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base + in dma_iproc_pax_init()
635 if (!pd->ring[r].ring_mem) { in dma_iproc_pax_init()
637 return -ENOMEM; in dma_iproc_pax_init()
640 mem_aligned = ((uintptr_t)pd->ring[r].ring_mem + in dma_iproc_pax_init()
641 PAX_DMA_RING_ALIGN - 1) & in dma_iproc_pax_init()
642 ~(PAX_DMA_RING_ALIGN - 1); in dma_iproc_pax_init()
644 pd->ring[r].cmpl = (void *)mem_aligned; in dma_iproc_pax_init()
645 pd->ring[r].bd = (void *)(mem_aligned + in dma_iproc_pax_init()
647 pd->ring[r].payload = (void *)((uintptr_t)pd->ring[r].bd + in dma_iproc_pax_init()
652 pd->ring[r].idx, in dma_iproc_pax_init()
653 pd->ring[r].ring_mem, in dma_iproc_pax_init()
656 pd->ring[r].idx, in dma_iproc_pax_init()
657 pd->ring[r].bd, in dma_iproc_pax_init()
658 pd->ring[r].cmpl, in dma_iproc_pax_init()
659 pd->ring[r].payload); in dma_iproc_pax_init()
662 prepare_ring(&(pd->ring[r])); in dma_iproc_pax_init()
665 init_ring(pd, r); in dma_iproc_pax_init()
669 rm_cfg_finish(pd); in dma_iproc_pax_init()
680 LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name); in dma_iproc_pax_init()
682 LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings); in dma_iproc_pax_init()
707 struct dma_iproc_pax_data *pd = dev->data; in set_pkt_count() local
711 val = sys_read32(RM_RING_REG(pd, idx, in set_pkt_count()
715 sys_write32(val, RM_RING_REG(pd, idx, in set_pkt_count()
723 struct dma_iproc_pax_data *pd = dev->data; in wait_for_pkt_completion() local
726 ring = &(pd->ring[idx]); in wait_for_pkt_completion()
729 if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) { in wait_for_pkt_completion()
731 return -ETIMEDOUT; in wait_for_pkt_completion()
743 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_do_xfer() local
744 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_do_xfer()
752 ring = &(pd->ring[idx]); in dma_iproc_pax_do_xfer()
753 pl = ring->payload; in dma_iproc_pax_do_xfer()
756 * Host sync buffer isn't ready at zephyr/driver init-time in dma_iproc_pax_do_xfer()
760 if ((ring->sync_pci.addr_lo == 0x0) && in dma_iproc_pax_do_xfer()
761 (ring->sync_pci.addr_hi == 0x0)) { in dma_iproc_pax_do_xfer()
763 LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc); in dma_iproc_pax_do_xfer()
765 sync.addr_lo = sys_read32(cfg->scr_addr_loc in dma_iproc_pax_do_xfer()
767 sync.addr_hi = sys_read32(cfg->scr_addr_loc); in dma_iproc_pax_do_xfer()
768 ring->sync_pci.addr_lo = sync.addr_lo + idx * 4; in dma_iproc_pax_do_xfer()
769 ring->sync_pci.addr_hi = sync.addr_hi; in dma_iproc_pax_do_xfer()
771 ring->sync_pci.addr_hi, in dma_iproc_pax_do_xfer()
772 ring->sync_pci.addr_lo); in dma_iproc_pax_do_xfer()
776 ring->curr.sync_data.opaque = ring->curr.opq; in dma_iproc_pax_do_xfer()
777 ring->curr.sync_data.total_pkts = pl_len; in dma_iproc_pax_do_xfer()
778 memcpy((void *)&ring->sync_loc, in dma_iproc_pax_do_xfer()
779 (void *)&(ring->curr.sync_data), 4); in dma_iproc_pax_do_xfer()
780 sync_pl.pci_addr = ring->sync_pci.addr_lo | in dma_iproc_pax_do_xfer()
781 (uint64_t)ring->sync_pci.addr_hi << 32; in dma_iproc_pax_do_xfer()
782 sync_pl.axi_addr = (uintptr_t)&ring->sync_loc; in dma_iproc_pax_do_xfer()
784 sync_pl.xfer_sz = 4; /* 4-bytes */ in dma_iproc_pax_do_xfer()
788 hdr = (void *)ring->curr.write_ptr; in dma_iproc_pax_do_xfer()
790 toggle_bit = ring->curr.toggle; in dma_iproc_pax_do_xfer()
792 ring->curr.opq = curr_pkt_id(ring); in dma_iproc_pax_do_xfer()
826 set_ring_active(pd, idx, true); in dma_iproc_pax_do_xfer()
834 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_do_xfer()
835 ring->ring_active = 0; in dma_iproc_pax_do_xfer()
836 k_mutex_unlock(&ring->lock); in dma_iproc_pax_do_xfer()
839 ring->ring_active = 0; in dma_iproc_pax_do_xfer()
841 set_ring_active(pd, idx, false); in dma_iproc_pax_do_xfer()
849 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_configure() local
860 return -EINVAL; in dma_iproc_pax_configure()
863 ring = &(pd->ring[channel]); in dma_iproc_pax_configure()
864 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_configure()
866 if (cfg->block_count > 1) { in dma_iproc_pax_configure()
868 ret = -ENOTSUP; in dma_iproc_pax_configure()
872 if (ring->ring_active) { in dma_iproc_pax_configure()
873 ret = -EBUSY; in dma_iproc_pax_configure()
877 ring->ring_active = 1; in dma_iproc_pax_configure()
879 if (cfg->channel_direction == MEMORY_TO_PERIPHERAL) { in dma_iproc_pax_configure()
881 axi_addr32 = (uint32_t *)&cfg->head_block->source_address; in dma_iproc_pax_configure()
882 pci_addr32 = (uint32_t *)&cfg->head_block->dest_address; in dma_iproc_pax_configure()
884 ring->payload->direction = CARD_TO_HOST; in dma_iproc_pax_configure()
885 ring->payload->pci_addr = cfg->head_block->dest_address; in dma_iproc_pax_configure()
886 ring->payload->axi_addr = cfg->head_block->source_address; in dma_iproc_pax_configure()
887 } else if (cfg->channel_direction == PERIPHERAL_TO_MEMORY) { in dma_iproc_pax_configure()
889 axi_addr32 = (uint32_t *)&cfg->head_block->dest_address; in dma_iproc_pax_configure()
890 pci_addr32 = (uint32_t *)&cfg->head_block->source_address; in dma_iproc_pax_configure()
892 ring->payload->direction = HOST_TO_CARD; in dma_iproc_pax_configure()
893 ring->payload->pci_addr = cfg->head_block->source_address; in dma_iproc_pax_configure()
894 ring->payload->axi_addr = cfg->head_block->dest_address; in dma_iproc_pax_configure()
896 ring->ring_active = 0; in dma_iproc_pax_configure()
897 ret = -ENOTSUP; in dma_iproc_pax_configure()
901 xfer_sz = cfg->head_block->block_size; in dma_iproc_pax_configure()
906 ring->ring_active = 0; in dma_iproc_pax_configure()
907 ret = -EINVAL; in dma_iproc_pax_configure()
913 ring->ring_active = 0; in dma_iproc_pax_configure()
914 ret = -EINVAL; in dma_iproc_pax_configure()
921 ring->ring_active = 0; in dma_iproc_pax_configure()
922 ret = -EINVAL; in dma_iproc_pax_configure()
929 ring->ring_active = 0; in dma_iproc_pax_configure()
930 ret = -EINVAL; in dma_iproc_pax_configure()
934 ring->payload->xfer_sz = xfer_sz; in dma_iproc_pax_configure()
936 ring->dma_callback = cfg->dma_callback; in dma_iproc_pax_configure()
937 ring->callback_arg = cfg->user_data; in dma_iproc_pax_configure()
939 k_mutex_unlock(&ring->lock); in dma_iproc_pax_configure()
948 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_transfer_start() local
953 return -EINVAL; in dma_iproc_pax_transfer_start()
955 ring = &(pd->ring[channel]); in dma_iproc_pax_transfer_start()
957 ret = dma_iproc_pax_do_xfer(dev, channel, ring->payload, 1); in dma_iproc_pax_transfer_start()