Lines Matching +full:header +full:- +full:payload
4 * SPDX-License-Identifier: Apache-2.0
33 return ring->pkt_id = 0x0; in reset_pkt_id()
41 ring->pkt_id = (ring->pkt_id + 1) % 32; in alloc_pkt_id()
42 return ring->pkt_id; in alloc_pkt_id()
47 return ring->pkt_id; in curr_pkt_id()
52 return ring->curr.toggle; in curr_toggle_val()
56 * @brief Populate header descriptor
63 r->opq = opq; in rm_write_header_desc()
65 r->bdcount = bdcount; in rm_write_header_desc()
66 r->prot = 0x0; in rm_write_header_desc()
68 r->start = 1; in rm_write_header_desc()
69 r->end = 1; in rm_write_header_desc()
70 r->toggle = toggle; in rm_write_header_desc()
71 /* RM header type */ in rm_write_header_desc()
72 r->type = PAX_DMA_TYPE_RM_HEADER; in rm_write_header_desc()
76 * @brief Fill RM header descriptor for next transfer
83 /* Toggle bit is invalid until next payload configured */ in rm_write_header_next_desc()
84 rm_write_header_desc(desc, (r->curr.toggle == 0) ? 1 : 0, opq, bdcount); in rm_write_header_next_desc()
92 r->bdcount = bdcount; in rm_header_set_bd_count()
99 r->toggle = toggle; in rm_header_set_toggle()
103 * @brief Populate dma header descriptor
110 hdr->length = pl->xfer_sz; in rm_write_dma_header_desc()
111 hdr->opcode = pl->direction; in rm_write_dma_header_desc()
112 /* DMA header type */ in rm_write_dma_header_desc()
113 hdr->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_dma_header_desc()
124 axi->axi_addr = pl->axi_addr; in rm_write_axi_addr_desc()
125 axi->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_axi_addr_desc()
137 pci->pcie_addr = pl->pci_addr >> PAX_DMA_PCI_ADDR_ALIGNMT_SHIFT; in rm_write_pci_addr_desc()
138 pci->type = PAX_DMA_TYPE_DMA_DESC; in rm_write_pci_addr_desc()
150 curr = (uintptr_t)ring->curr.write_ptr + PAX_DMA_RM_DESC_BDWIDTH; in next_desc_addr()
153 if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) { in next_desc_addr()
155 curr, nxt->toggle, (uintptr_t)nxt->addr); in next_desc_addr()
156 uintptr_t last = (uintptr_t)ring->bd + in next_desc_addr()
158 ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0; in next_desc_addr()
162 curr = (uintptr_t)ring->bd; in next_desc_addr()
167 ring->curr.write_ptr = (void *)curr; in next_desc_addr()
179 nxt->addr = (uintptr_t)next_ptr; in rm_write_next_table_desc()
180 nxt->type = PAX_DMA_TYPE_NEXT_PTR; in rm_write_next_table_desc()
181 nxt->toggle = toggle; in rm_write_next_table_desc()
191 memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); in prepare_ring()
192 memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE); in prepare_ring()
195 rm_write_header_desc(ring->bd, 0x0, reset_pkt_id(ring), in prepare_ring()
199 curr = (uintptr_t)ring->bd; in prepare_ring()
213 next = (uintptr_t)ring->bd; in prepare_ring()
216 } while (--buff_count); in prepare_ring()
220 /* start programming from first RM header */ in prepare_ring()
221 ring->curr.write_ptr = ring->bd; in prepare_ring()
223 ring->curr.toggle = 1; in prepare_ring()
225 ring->curr.cmpl_rd_offs = 0; in prepare_ring()
228 ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE; in prepare_ring()
229 ring->curr.sync_data.ring = ring->idx; in prepare_ring()
231 ring->curr.sync_data.opaque = 0x0; in prepare_ring()
233 ring->curr.sync_data.total_pkts = 0x0; in prepare_ring()
238 int ret = -ETIMEDOUT, timeout = 1000; in init_rm()
240 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_rm()
250 } while (--timeout); in init_rm()
251 k_mutex_unlock(&pd->dma_lock); in init_rm()
266 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_start()
317 k_mutex_unlock(&pd->dma_lock); in rm_cfg_start()
335 k_mutex_lock(&pd->dma_lock, K_FOREVER); in rm_cfg_finish()
342 k_mutex_unlock(&pd->dma_lock); in rm_cfg_finish()
364 uintptr_t desc = (uintptr_t)pd->ring[idx].bd; in init_ring()
365 uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl; in init_ring()
368 k_mutex_lock(&pd->dma_lock, K_FOREVER); in init_ring()
384 } while (--timeout); in init_ring()
388 ret = -ETIMEDOUT; in init_ring()
417 k_mutex_unlock(&pd->dma_lock); in init_ring()
425 const struct dma_iproc_pax_cfg *cfg = dev->config; in poll_on_write_sync()
433 sent = &(ring->curr.sync_data); in poll_on_write_sync()
436 pci32[0] = ring->sync_pci.addr_lo; in poll_on_write_sync()
437 pci32[1] = ring->sync_pci.addr_hi; in poll_on_write_sync()
441 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
447 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, in poll_on_write_sync()
456 } while (--timeout); in poll_on_write_sync()
459 LOG_DBG("[ring %d]: not recvd write sync!\n", ring->idx); in poll_on_write_sync()
460 ret = -ETIMEDOUT; in poll_on_write_sync()
469 struct dma_iproc_pax_data *pd = dev->data; in process_cmpl_event()
471 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in process_cmpl_event()
476 rd_offs = ring->curr.cmpl_rd_offs; in process_cmpl_event()
482 ring->curr.cmpl_rd_offs = wr_offs; in process_cmpl_event()
495 c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl + in process_cmpl_event()
499 idx, wr_offs, c->opq, c->rm_status, c->dma_status); in process_cmpl_event()
503 if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) { in process_cmpl_event()
505 idx, ring->curr.opq, c->opq, is_outstanding); in process_cmpl_event()
506 ret = -EIO; in process_cmpl_event()
509 if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) { in process_cmpl_event()
511 idx, wr_offs, c->rm_status); in process_cmpl_event()
514 ret = -ETIMEDOUT; in process_cmpl_event()
517 if (ring->dma_callback) { in process_cmpl_event()
518 ring->dma_callback(dev, ring->callback_arg, idx, ret); in process_cmpl_event()
528 struct dma_iproc_pax_data *pd = dev->data; in peek_ring_cmpl()
530 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); in peek_ring_cmpl()
533 rd_offs = ring->curr.cmpl_rd_offs; in peek_ring_cmpl()
543 } while (--timeout); in peek_ring_cmpl()
550 return -ETIMEDOUT; in peek_ring_cmpl()
559 struct dma_iproc_pax_data *pd = dev->data; in rm_isr()
581 k_sem_give(&pd->ring[idx].alert); in rm_isr()
589 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_init()
590 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_init()
594 if (!device_is_ready(cfg->pcie_dev)) { in dma_iproc_pax_init()
596 return -ENODEV; in dma_iproc_pax_init()
599 pd->dma_base = cfg->dma_base; in dma_iproc_pax_init()
600 pd->rm_comm_base = cfg->rm_comm_base; in dma_iproc_pax_init()
601 pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ? in dma_iproc_pax_init()
602 cfg->use_rings : PAX_DMA_RINGS_MAX; in dma_iproc_pax_init()
605 pd->dma_base, pd->rm_comm_base, pd->used_rings); in dma_iproc_pax_init()
608 k_mutex_init(&pd->dma_lock); in dma_iproc_pax_init()
612 return -ETIMEDOUT; in dma_iproc_pax_init()
619 for (r = 0; r < pd->used_rings; r++) { in dma_iproc_pax_init()
620 /* per-ring mutex lock */ in dma_iproc_pax_init()
621 k_mutex_init(&pd->ring[r].lock); in dma_iproc_pax_init()
623 k_sem_init(&pd->ring[r].alert, 0, 1); in dma_iproc_pax_init()
625 pd->ring[r].idx = r; in dma_iproc_pax_init()
626 pd->ring[r].ring_base = cfg->rm_base + in dma_iproc_pax_init()
628 LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx, in dma_iproc_pax_init()
631 /* Allocate for 2 BD buffers + cmpl buffer + payload struct */ in dma_iproc_pax_init()
632 pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base + in dma_iproc_pax_init()
635 if (!pd->ring[r].ring_mem) { in dma_iproc_pax_init()
637 return -ENOMEM; in dma_iproc_pax_init()
640 mem_aligned = ((uintptr_t)pd->ring[r].ring_mem + in dma_iproc_pax_init()
641 PAX_DMA_RING_ALIGN - 1) & in dma_iproc_pax_init()
642 ~(PAX_DMA_RING_ALIGN - 1); in dma_iproc_pax_init()
644 pd->ring[r].cmpl = (void *)mem_aligned; in dma_iproc_pax_init()
645 pd->ring[r].bd = (void *)(mem_aligned + in dma_iproc_pax_init()
647 pd->ring[r].payload = (void *)((uintptr_t)pd->ring[r].bd + in dma_iproc_pax_init()
652 pd->ring[r].idx, in dma_iproc_pax_init()
653 pd->ring[r].ring_mem, in dma_iproc_pax_init()
656 pd->ring[r].idx, in dma_iproc_pax_init()
657 pd->ring[r].bd, in dma_iproc_pax_init()
658 pd->ring[r].cmpl, in dma_iproc_pax_init()
659 pd->ring[r].payload); in dma_iproc_pax_init()
662 prepare_ring(&(pd->ring[r])); in dma_iproc_pax_init()
680 LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name); in dma_iproc_pax_init()
682 LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings); in dma_iproc_pax_init()
707 struct dma_iproc_pax_data *pd = dev->data; in set_pkt_count()
723 struct dma_iproc_pax_data *pd = dev->data; in wait_for_pkt_completion()
726 ring = &(pd->ring[idx]); in wait_for_pkt_completion()
729 if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) { in wait_for_pkt_completion()
731 return -ETIMEDOUT; in wait_for_pkt_completion()
743 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_do_xfer()
744 const struct dma_iproc_pax_cfg *cfg = dev->config; in dma_iproc_pax_do_xfer()
752 ring = &(pd->ring[idx]); in dma_iproc_pax_do_xfer()
753 pl = ring->payload; in dma_iproc_pax_do_xfer()
756 * Host sync buffer isn't ready at zephyr/driver init-time in dma_iproc_pax_do_xfer()
760 if ((ring->sync_pci.addr_lo == 0x0) && in dma_iproc_pax_do_xfer()
761 (ring->sync_pci.addr_hi == 0x0)) { in dma_iproc_pax_do_xfer()
763 LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc); in dma_iproc_pax_do_xfer()
765 sync.addr_lo = sys_read32(cfg->scr_addr_loc in dma_iproc_pax_do_xfer()
767 sync.addr_hi = sys_read32(cfg->scr_addr_loc); in dma_iproc_pax_do_xfer()
768 ring->sync_pci.addr_lo = sync.addr_lo + idx * 4; in dma_iproc_pax_do_xfer()
769 ring->sync_pci.addr_hi = sync.addr_hi; in dma_iproc_pax_do_xfer()
771 ring->sync_pci.addr_hi, in dma_iproc_pax_do_xfer()
772 ring->sync_pci.addr_lo); in dma_iproc_pax_do_xfer()
776 ring->curr.sync_data.opaque = ring->curr.opq; in dma_iproc_pax_do_xfer()
777 ring->curr.sync_data.total_pkts = pl_len; in dma_iproc_pax_do_xfer()
778 memcpy((void *)&ring->sync_loc, in dma_iproc_pax_do_xfer()
779 (void *)&(ring->curr.sync_data), 4); in dma_iproc_pax_do_xfer()
780 sync_pl.pci_addr = ring->sync_pci.addr_lo | in dma_iproc_pax_do_xfer()
781 (uint64_t)ring->sync_pci.addr_hi << 32; in dma_iproc_pax_do_xfer()
782 sync_pl.axi_addr = (uintptr_t)&ring->sync_loc; in dma_iproc_pax_do_xfer()
784 sync_pl.xfer_sz = 4; /* 4-bytes */ in dma_iproc_pax_do_xfer()
787 /* Get descriptor write pointer for first header */ in dma_iproc_pax_do_xfer()
788 hdr = (void *)ring->curr.write_ptr; in dma_iproc_pax_do_xfer()
790 toggle_bit = ring->curr.toggle; in dma_iproc_pax_do_xfer()
792 ring->curr.opq = curr_pkt_id(ring); in dma_iproc_pax_do_xfer()
794 /* DMA desc count for first payload */ in dma_iproc_pax_do_xfer()
797 /* Form dma descriptors for total sg payload */ in dma_iproc_pax_do_xfer()
809 /* Append write sync payload descriptors */ in dma_iproc_pax_do_xfer()
814 /* RM header for next transfer, RM wait on (invalid) toggle bit */ in dma_iproc_pax_do_xfer()
823 /* set toggle to valid in first header */ in dma_iproc_pax_do_xfer()
834 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_do_xfer()
835 ring->ring_active = 0; in dma_iproc_pax_do_xfer()
836 k_mutex_unlock(&ring->lock); in dma_iproc_pax_do_xfer()
839 ring->ring_active = 0; in dma_iproc_pax_do_xfer()
849 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_configure()
860 return -EINVAL; in dma_iproc_pax_configure()
863 ring = &(pd->ring[channel]); in dma_iproc_pax_configure()
864 k_mutex_lock(&ring->lock, K_FOREVER); in dma_iproc_pax_configure()
866 if (cfg->block_count > 1) { in dma_iproc_pax_configure()
868 ret = -ENOTSUP; in dma_iproc_pax_configure()
872 if (ring->ring_active) { in dma_iproc_pax_configure()
873 ret = -EBUSY; in dma_iproc_pax_configure()
877 ring->ring_active = 1; in dma_iproc_pax_configure()
879 if (cfg->channel_direction == MEMORY_TO_PERIPHERAL) { in dma_iproc_pax_configure()
881 axi_addr32 = (uint32_t *)&cfg->head_block->source_address; in dma_iproc_pax_configure()
882 pci_addr32 = (uint32_t *)&cfg->head_block->dest_address; in dma_iproc_pax_configure()
884 ring->payload->direction = CARD_TO_HOST; in dma_iproc_pax_configure()
885 ring->payload->pci_addr = cfg->head_block->dest_address; in dma_iproc_pax_configure()
886 ring->payload->axi_addr = cfg->head_block->source_address; in dma_iproc_pax_configure()
887 } else if (cfg->channel_direction == PERIPHERAL_TO_MEMORY) { in dma_iproc_pax_configure()
889 axi_addr32 = (uint32_t *)&cfg->head_block->dest_address; in dma_iproc_pax_configure()
890 pci_addr32 = (uint32_t *)&cfg->head_block->source_address; in dma_iproc_pax_configure()
892 ring->payload->direction = HOST_TO_CARD; in dma_iproc_pax_configure()
893 ring->payload->pci_addr = cfg->head_block->source_address; in dma_iproc_pax_configure()
894 ring->payload->axi_addr = cfg->head_block->dest_address; in dma_iproc_pax_configure()
896 ring->ring_active = 0; in dma_iproc_pax_configure()
897 ret = -ENOTSUP; in dma_iproc_pax_configure()
901 xfer_sz = cfg->head_block->block_size; in dma_iproc_pax_configure()
906 ring->ring_active = 0; in dma_iproc_pax_configure()
907 ret = -EINVAL; in dma_iproc_pax_configure()
913 ring->ring_active = 0; in dma_iproc_pax_configure()
914 ret = -EINVAL; in dma_iproc_pax_configure()
921 ring->ring_active = 0; in dma_iproc_pax_configure()
922 ret = -EINVAL; in dma_iproc_pax_configure()
929 ring->ring_active = 0; in dma_iproc_pax_configure()
930 ret = -EINVAL; in dma_iproc_pax_configure()
934 ring->payload->xfer_sz = xfer_sz; in dma_iproc_pax_configure()
936 ring->dma_callback = cfg->dma_callback; in dma_iproc_pax_configure()
937 ring->callback_arg = cfg->user_data; in dma_iproc_pax_configure()
939 k_mutex_unlock(&ring->lock); in dma_iproc_pax_configure()
948 struct dma_iproc_pax_data *pd = dev->data; in dma_iproc_pax_transfer_start()
953 return -EINVAL; in dma_iproc_pax_transfer_start()
955 ring = &(pd->ring[channel]); in dma_iproc_pax_transfer_start()
957 ret = dma_iproc_pax_do_xfer(dev, channel, ring->payload, 1); in dma_iproc_pax_transfer_start()