Lines Matching +full:chain +full:- +full:transfer
4 * SPDX-License-Identifier: Apache-2.0
36 return ring->pkt_id = 0x0; in reset_pkt_id()
41 ring->pkt_id = (ring->pkt_id + 1) % 32; in alloc_pkt_id()
42 return ring->pkt_id; in alloc_pkt_id()
47 return ring->pkt_id; in curr_pkt_id()
52 return ring->curr.toggle; in curr_toggle_val()
64 r->opq = opq; in rm_write_header_desc()
65 r->bdf = 0x0; in rm_write_header_desc()
66 r->res1 = 0x0; in rm_write_header_desc()
68 r->bdcount = bdcount; in rm_write_header_desc()
69 r->prot = 0x0; in rm_write_header_desc()
70 r->res2 = 0x0; in rm_write_header_desc()
72 r->start = 1; in rm_write_header_desc()
73 r->end = 1; in rm_write_header_desc()
75 r->type = PAX_DMA_TYPE_RM_HEADER; in rm_write_header_desc()
76 r->pcie_addr_msb = PAX_DMA_PCI_ADDR_HI_MSB8(pci_addr); in rm_write_header_desc()
77 r->res3 = 0x0; in rm_write_header_desc()
78 r->res4 = 0x0; in rm_write_header_desc()
80 r->toggle = toggle; in rm_write_header_desc()
82 r->toggle = 0; in rm_write_header_desc()
95 pcie->pcie_addr_lsb = pci_addr; in rm_write_pcie_desc()
96 pcie->res1 = 0x0; in rm_write_pcie_desc()
98 pcie->type = PAX_DMA_TYPE_PCIE_DESC; in rm_write_pcie_desc()
100 pcie->toggle = toggle; in rm_write_pcie_desc()
102 pcie->toggle = 0; in rm_write_pcie_desc()
119 desc->axi_addr = axi_addr; in rm_write_src_dst_desc()
120 desc->length = size; in rm_write_src_dst_desc()
122 desc->toggle = toggle; in rm_write_src_dst_desc()
124 desc->toggle = 0; in rm_write_src_dst_desc()
128 desc->type = is_mega ? in rm_write_src_dst_desc()
131 desc->type = is_mega ? in rm_write_src_dst_desc()
141 r->toggle = toggle; in init_toggle()
154 curr = (uintptr_t)ring->curr.write_ptr; in get_curr_desc_addr()
157 if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) { in get_curr_desc_addr()
159 curr, nxt->toggle, (uintptr_t)nxt->addr); in get_curr_desc_addr()
160 uintptr_t last = (uintptr_t)ring->bd + in get_curr_desc_addr()
162 nxt->toggle = ring->curr.toggle; in get_curr_desc_addr()
163 ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0; in get_curr_desc_addr()
167 curr = (uintptr_t)ring->bd; in get_curr_desc_addr()
171 ring->descs_inflight++; in get_curr_desc_addr()
174 ring->curr.write_ptr = (void *)(curr + PAX_DMA_RM_DESC_BDWIDTH); in get_curr_desc_addr()
175 ring->descs_inflight++; in get_curr_desc_addr()
188 nxt->addr = (uintptr_t)next_ptr; in rm_write_next_table_desc()
189 nxt->type = PAX_DMA_TYPE_NEXT_PTR; in rm_write_next_table_desc()
190 nxt->toggle = toggle; in rm_write_next_table_desc()
202 memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); in prepare_ring()
203 memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE); in prepare_ring()
209 curr = (uintptr_t)ring->bd; in prepare_ring()
230 /* last entry, chain back to first buffer */ in prepare_ring()
232 next = (uintptr_t)ring->bd; in prepare_ring()
235 } while (--buff_count); in prepare_ring()
240 ring->curr.write_ptr = ring->bd; in prepare_ring()
242 ring->curr.toggle = 1; in prepare_ring()
244 ring->curr.cmpl_rd_offs = 0; in prepare_ring()
246 ring->descs_inflight = 0; in prepare_ring()
249 ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE; in prepare_ring()
250 ring->curr.sync_data.ring = ring->idx; in prepare_ring()
252 ring->curr.sync_data.opaque = 0x0; in prepare_ring()
254 ring->curr.sync_data.total_pkts = 0x0; in prepare_ring()
259 int ret = -ETIMEDOUT, timeout = 1000; in init_rm()
261 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_rm()
271 } while (--timeout); in init_rm()
272 k_mutex_unlock(&pd->dma_lock); in init_rm()
287 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_start()
372 k_mutex_unlock(&pd->dma_lock); in rm_cfg_start()
390 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_finish()
397 k_mutex_unlock(&pd->dma_lock); in rm_cfg_finish()
403 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in write_doorbell()
405 sys_write32(ring->descs_inflight, in write_doorbell()
407 ring->descs_inflight = 0; in write_doorbell()
428 uintptr_t desc = (uintptr_t)pd->ring[idx].bd; in init_ring()
429 uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl; in init_ring()
432 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_ring()
452 } while (--timeout); in init_ring()
456 ret = -ETIMEDOUT; in init_ring()
501 k_mutex_unlock(&pd->dma_lock); in init_ring()
509 const struct dma_iproc_pax_cfg *cfg = dev->config; in poll_on_write_sync()
517 sent = &(ring->curr.sync_data); in poll_on_write_sync()
520 pci32[0] = ring->sync_pci.addr_lo; in poll_on_write_sync()
521 pci32[1] = ring->sync_pci.addr_hi; in poll_on_write_sync()
525 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
531 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
540 } while (--timeout); in poll_on_write_sync()
543 LOG_ERR("[ring %d]: not recvd write sync!\n", ring->idx); in poll_on_write_sync()
544 ret = -ETIMEDOUT; in poll_on_write_sync()
553 struct dma_iproc_pax_data *pd = dev->data; in process_cmpl_event()
555 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in process_cmpl_event()
560 rd_offs = ring->curr.cmpl_rd_offs; in process_cmpl_event()
566 ring->curr.cmpl_rd_offs = wr_offs; in process_cmpl_event()
579 c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl + in process_cmpl_event()
583 idx, wr_offs, c->opq, c->rm_status, c->dma_status); in process_cmpl_event()
587 if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) { in process_cmpl_event()
589 idx, ring->curr.opq, c->opq, is_outstanding); in process_cmpl_event()
590 ret = -EIO; in process_cmpl_event()
593 if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) { in process_cmpl_event()
595 idx, wr_offs, c->rm_status); in process_cmpl_event()
598 ret = -ETIMEDOUT; in process_cmpl_event()
601 if (ring->dma_callback) { in process_cmpl_event()
602 ring->dma_callback(dev, ring->callback_arg, idx, ret); in process_cmpl_event()
606 ring->total_pkt_count = 0; in process_cmpl_event()
615 struct dma_iproc_pax_data *pd = dev->data; in peek_ring_cmpl()
617 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in peek_ring_cmpl()
620 rd_offs = ring->curr.cmpl_rd_offs; in peek_ring_cmpl()
630 } while (--timeout); in peek_ring_cmpl()
637 return -ETIMEDOUT; in peek_ring_cmpl()
646 struct dma_iproc_pax_data *pd = dev->data; in rm_isr()
664 k_sem_give(&pd->ring[idx].alert); in rm_isr()
672 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_init()
673 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_init()
677 if (!device_is_ready(cfg->pcie_dev)) { in dma_iproc_pax_init()
679 return -ENODEV; in dma_iproc_pax_init()
682 pd->dma_base = cfg->dma_base; in dma_iproc_pax_init()
683 pd->rm_comm_base = cfg->rm_comm_base; in dma_iproc_pax_init()
684 pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ? in dma_iproc_pax_init()
685 cfg->use_rings : PAX_DMA_RINGS_MAX; in dma_iproc_pax_init()
688 k_mutex_init(&pd->dma_lock); in dma_iproc_pax_init()
692 return -ETIMEDOUT; in dma_iproc_pax_init()
699 for (r = 0; r < pd->used_rings; r++) { in dma_iproc_pax_init()
700 /* per-ring mutex lock */ in dma_iproc_pax_init()
701 k_mutex_init(&pd->ring[r].lock); in dma_iproc_pax_init()
703 k_sem_init(&pd->ring[r].alert, 0, 1); in dma_iproc_pax_init()
705 pd->ring[r].idx = r; in dma_iproc_pax_init()
706 pd->ring[r].ring_base = cfg->rm_base + in dma_iproc_pax_init()
708 LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx, in dma_iproc_pax_init()
712 pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base + in dma_iproc_pax_init()
714 if (!pd->ring[r].ring_mem) { in dma_iproc_pax_init()
716 return -ENOMEM; in dma_iproc_pax_init()
719 mem_aligned = ((uintptr_t)pd->ring[r].ring_mem + in dma_iproc_pax_init()
720 PAX_DMA_RING_ALIGN - 1) & in dma_iproc_pax_init()
721 ~(PAX_DMA_RING_ALIGN - 1); in dma_iproc_pax_init()
723 pd->ring[r].cmpl = (void *)mem_aligned; in dma_iproc_pax_init()
724 pd->ring[r].bd = (void *)(mem_aligned + in dma_iproc_pax_init()
726 pd->ring[r].sync_loc = (void *)((uintptr_t)pd->ring[r].bd + in dma_iproc_pax_init()
731 pd->ring[r].idx, in dma_iproc_pax_init()
732 pd->ring[r].ring_mem, in dma_iproc_pax_init()
735 pd->ring[r].idx, in dma_iproc_pax_init()
736 pd->ring[r].bd, in dma_iproc_pax_init()
737 pd->ring[r].cmpl, in dma_iproc_pax_init()
738 pd->ring[r].sync_loc); in dma_iproc_pax_init()
741 prepare_ring(&(pd->ring[r])); in dma_iproc_pax_init()
759 LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name); in dma_iproc_pax_init()
761 LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings); in dma_iproc_pax_init()
778 ring->current_hdr = (uintptr_t)get_curr_desc_addr(ring); in dma_iproc_pax_gen_desc()
779 rm_write_header_desc((void *)ring->current_hdr, in dma_iproc_pax_gen_desc()
784 ring->total_pkt_count++; in dma_iproc_pax_gen_desc()
796 hdr = (struct rm_header *)ring->current_hdr; in dma_iproc_pax_gen_desc()
797 hdr->bdcount = *non_hdr_bd_count; in dma_iproc_pax_gen_desc()
819 pci_addr = config->dest_address; in dma_iproc_pax_gen_packets()
820 axi_addr = config->source_address; in dma_iproc_pax_gen_packets()
824 axi_addr = config->dest_address; in dma_iproc_pax_gen_packets()
825 pci_addr = config->source_address; in dma_iproc_pax_gen_packets()
829 LOG_ERR("not supported transfer direction"); in dma_iproc_pax_gen_packets()
830 return -EINVAL; in dma_iproc_pax_gen_packets()
833 outstanding = config->block_size; in dma_iproc_pax_gen_packets()
859 outstanding = outstanding - curr; in dma_iproc_pax_gen_packets()
885 struct dma_iproc_pax_data *pd = dev->data; in set_pkt_count()
901 struct dma_iproc_pax_data *pd = dev->data; in wait_for_pkt_completion()
904 ring = &(pd->ring[idx]); in wait_for_pkt_completion()
906 if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) { in wait_for_pkt_completion()
908 return -ETIMEDOUT; in wait_for_pkt_completion()
919 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_process_dma_blocks()
920 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_process_dma_blocks()
926 struct dma_block_config *block_config = config->head_block; in dma_iproc_pax_process_dma_blocks()
930 return -EINVAL; in dma_iproc_pax_process_dma_blocks()
933 ring = &(pd->ring[idx]); in dma_iproc_pax_process_dma_blocks()
936 * Host sync buffer isn't ready at zephyr/driver init-time in dma_iproc_pax_process_dma_blocks()
940 if ((ring->sync_pci.addr_lo == 0x0) && in dma_iproc_pax_process_dma_blocks()
941 (ring->sync_pci.addr_hi == 0x0)) { in dma_iproc_pax_process_dma_blocks()
943 LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc); in dma_iproc_pax_process_dma_blocks()
944 sync.addr_lo = sys_read32(cfg->scr_addr_loc + 4); in dma_iproc_pax_process_dma_blocks()
945 sync.addr_hi = sys_read32(cfg->scr_addr_loc); in dma_iproc_pax_process_dma_blocks()
946 ring->sync_pci.addr_lo = sync.addr_lo + idx * 4; in dma_iproc_pax_process_dma_blocks()
947 ring->sync_pci.addr_hi = sync.addr_hi; in dma_iproc_pax_process_dma_blocks()
949 ring->sync_pci.addr_hi, in dma_iproc_pax_process_dma_blocks()
950 ring->sync_pci.addr_lo); in dma_iproc_pax_process_dma_blocks()
954 ring->curr.sync_data.opaque = ring->curr.opq; in dma_iproc_pax_process_dma_blocks()
955 ring->curr.sync_data.total_pkts = config->block_count; in dma_iproc_pax_process_dma_blocks()
956 memcpy((void *)ring->sync_loc, in dma_iproc_pax_process_dma_blocks()
957 (void *)&(ring->curr.sync_data), 4); in dma_iproc_pax_process_dma_blocks()
958 sync_pl.dest_address = ring->sync_pci.addr_lo | in dma_iproc_pax_process_dma_blocks()
959 (uint64_t)ring->sync_pci.addr_hi << 32; in dma_iproc_pax_process_dma_blocks()
960 sync_pl.source_address = (uintptr_t)ring->sync_loc; in dma_iproc_pax_process_dma_blocks()
961 sync_pl.block_size = 4; /* 4-bytes */ in dma_iproc_pax_process_dma_blocks()
964 toggle_bit = ring->curr.toggle; in dma_iproc_pax_process_dma_blocks()
966 ring->curr.opq = curr_pkt_id(ring); in dma_iproc_pax_process_dma_blocks()
971 config->channel_direction, in dma_iproc_pax_process_dma_blocks()
977 block_config = block_config->next_block; in dma_iproc_pax_process_dma_blocks()
983 * have same data transfer direction. Setting non_hdr_bd_count to 0, in dma_iproc_pax_process_dma_blocks()
986 ring->non_hdr_bd_count = 0; in dma_iproc_pax_process_dma_blocks()
998 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_configure()
1004 return -EINVAL; in dma_iproc_pax_configure()
1007 ring = &(pd->ring[channel]); in dma_iproc_pax_configure()
1008 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_configure()
1010 if (ring->ring_active) { in dma_iproc_pax_configure()
1011 ret = -EBUSY; in dma_iproc_pax_configure()
1015 if (cfg->block_count >= RM_V2_MAX_BLOCK_COUNT) { in dma_iproc_pax_configure()
1017 cfg->block_count, RM_V2_MAX_BLOCK_COUNT); in dma_iproc_pax_configure()
1018 ret = -ENOTSUP; in dma_iproc_pax_configure()
1022 ring->ring_active = 1; in dma_iproc_pax_configure()
1026 ring->ring_active = 0; in dma_iproc_pax_configure()
1030 ring->dma_callback = cfg->dma_callback; in dma_iproc_pax_configure()
1031 ring->callback_arg = cfg->user_data; in dma_iproc_pax_configure()
1033 k_mutex_unlock(&ring->lock); in dma_iproc_pax_configure()
1041 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_transfer_start()
1046 return -EINVAL; in dma_iproc_pax_transfer_start()
1049 ring = &(pd->ring[channel]); in dma_iproc_pax_transfer_start()
1050 set_pkt_count(dev, channel, ring->total_pkt_count); in dma_iproc_pax_transfer_start()
1059 ret = wait_for_pkt_completion(dev, channel, ring->total_pkt_count); in dma_iproc_pax_transfer_start()
1067 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_transfer_start()
1068 ring->ring_active = 0; in dma_iproc_pax_transfer_start()
1069 k_mutex_unlock(&ring->lock); in dma_iproc_pax_transfer_start()
1072 /* deactivate the ring until next active transfer */ in dma_iproc_pax_transfer_start()