1 /*
2 * Copyright 2020 Broadcom
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT brcm_iproc_pax_dma_v2
8
9 #include <zephyr/arch/cpu.h>
10 #include <zephyr/cache.h>
11 #include <errno.h>
12 #include <zephyr/init.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/linker/sections.h>
15 #include <soc.h>
16 #include <string.h>
17 #include <zephyr/toolchain.h>
18 #include <zephyr/types.h>
19 #include <zephyr/drivers/dma.h>
20 #include <zephyr/drivers/pcie/endpoint/pcie_ep.h>
21 #include "dma_iproc_pax_v2.h"
22
23 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
24 #include <zephyr/logging/log.h>
25 #include <zephyr/irq.h>
26 LOG_MODULE_REGISTER(dma_iproc_pax_v2);
27
28 /* Driver runtime data for PAX DMA and RM */
29 static struct dma_iproc_pax_data pax_dma_data;
30
31 /**
32 * @brief Opaque/packet id allocator, range 0 to 31
33 */
reset_pkt_id(struct dma_iproc_pax_ring_data * ring)34 static inline uint32_t reset_pkt_id(struct dma_iproc_pax_ring_data *ring)
35 {
36 return ring->pkt_id = 0x0;
37 }
38
alloc_pkt_id(struct dma_iproc_pax_ring_data * ring)39 static inline uint32_t alloc_pkt_id(struct dma_iproc_pax_ring_data *ring)
40 {
41 ring->pkt_id = (ring->pkt_id + 1) % 32;
42 return ring->pkt_id;
43 }
44
curr_pkt_id(struct dma_iproc_pax_ring_data * ring)45 static inline uint32_t curr_pkt_id(struct dma_iproc_pax_ring_data *ring)
46 {
47 return ring->pkt_id;
48 }
49
curr_toggle_val(struct dma_iproc_pax_ring_data * ring)50 static inline uint32_t curr_toggle_val(struct dma_iproc_pax_ring_data *ring)
51 {
52 return ring->curr.toggle;
53 }
54
55 /**
56 * @brief Populate header descriptor
57 */
rm_write_header_desc(void * desc,uint32_t toggle,uint32_t opq,uint32_t bdcount,uint64_t pci_addr)58 static inline void rm_write_header_desc(void *desc, uint32_t toggle,
59 uint32_t opq, uint32_t bdcount,
60 uint64_t pci_addr)
61 {
62 struct rm_header *r = (struct rm_header *)desc;
63
64 r->opq = opq;
65 r->bdf = 0x0;
66 r->res1 = 0x0;
67 /* DMA descriptor count init value */
68 r->bdcount = bdcount;
69 r->prot = 0x0;
70 r->res2 = 0x0;
71 /* No packet extension, start and end set to '1' */
72 r->start = 1;
73 r->end = 1;
74 /* RM header type */
75 r->type = PAX_DMA_TYPE_RM_HEADER;
76 r->pcie_addr_msb = PAX_DMA_PCI_ADDR_HI_MSB8(pci_addr);
77 r->res3 = 0x0;
78 r->res4 = 0x0;
79 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
80 r->toggle = toggle;
81 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
82 r->toggle = 0;
83 #endif
84 }
85
86 /**
87 * @brief Populate pcie descriptor
88 */
rm_write_pcie_desc(void * desc,uint32_t toggle,uint64_t pci_addr)89 static inline void rm_write_pcie_desc(void *desc,
90 uint32_t toggle,
91 uint64_t pci_addr)
92 {
93 struct pcie_desc *pcie = (struct pcie_desc *)desc;
94
95 pcie->pcie_addr_lsb = pci_addr;
96 pcie->res1 = 0x0;
97 /* PCIE header type */
98 pcie->type = PAX_DMA_TYPE_PCIE_DESC;
99 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
100 pcie->toggle = toggle;
101 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
102 pcie->toggle = 0;
103 #endif
104 }
105
106 /**
107 * @brief Populate src/destination descriptor
108 */
rm_write_src_dst_desc(void * desc_ptr,bool is_mega,uint32_t toggle,uint64_t axi_addr,uint32_t size,enum pax_dma_dir direction)109 static inline void rm_write_src_dst_desc(void *desc_ptr,
110 bool is_mega,
111 uint32_t toggle,
112 uint64_t axi_addr,
113 uint32_t size,
114 enum pax_dma_dir direction)
115 {
116 struct src_dst_desc *desc;
117
118 desc = (struct src_dst_desc *)desc_ptr;
119 desc->axi_addr = axi_addr;
120 desc->length = size;
121 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
122 desc->toggle = toggle;
123 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
124 desc->toggle = 0;
125 #endif
126
127 if (direction == CARD_TO_HOST) {
128 desc->type = is_mega ?
129 PAX_DMA_TYPE_MEGA_SRC_DESC : PAX_DMA_TYPE_SRC_DESC;
130 } else {
131 desc->type = is_mega ?
132 PAX_DMA_TYPE_MEGA_DST_DESC : PAX_DMA_TYPE_DST_DESC;
133 }
134 }
135
136 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
init_toggle(void * desc,uint32_t toggle)137 static void init_toggle(void *desc, uint32_t toggle)
138 {
139 struct rm_header *r = (struct rm_header *)desc;
140
141 r->toggle = toggle;
142 }
143 #endif
144
145 /**
146 * @brief Return current descriptor memory address and
147 * increment to point to next descriptor memory address.
148 */
get_curr_desc_addr(struct dma_iproc_pax_ring_data * ring)149 static inline void *get_curr_desc_addr(struct dma_iproc_pax_ring_data *ring)
150 {
151 struct next_ptr_desc *nxt;
152 uintptr_t curr;
153
154 curr = (uintptr_t)ring->curr.write_ptr;
155 /* if hit next table ptr, skip to next location, flip toggle */
156 nxt = (struct next_ptr_desc *)curr;
157 if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) {
158 LOG_DBG("hit next_ptr@0x%lx %d, next_table@0x%lx\n",
159 curr, nxt->toggle, (uintptr_t)nxt->addr);
160 uintptr_t last = (uintptr_t)ring->bd +
161 PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS;
162 nxt->toggle = ring->curr.toggle;
163 ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0;
164 /* move to next addr, wrap around if hits end */
165 curr += PAX_DMA_RM_DESC_BDWIDTH;
166 if (curr == last) {
167 curr = (uintptr_t)ring->bd;
168 LOG_DBG("hit end of desc:0x%lx, wrap to 0x%lx\n",
169 last, curr);
170 }
171 ring->descs_inflight++;
172 }
173
174 ring->curr.write_ptr = (void *)(curr + PAX_DMA_RM_DESC_BDWIDTH);
175 ring->descs_inflight++;
176
177 return (void *)curr;
178 }
179
180 /**
181 * @brief Populate next ptr descriptor
182 */
rm_write_next_table_desc(void * desc,void * next_ptr,uint32_t toggle)183 static void rm_write_next_table_desc(void *desc, void *next_ptr,
184 uint32_t toggle)
185 {
186 struct next_ptr_desc *nxt = (struct next_ptr_desc *)desc;
187
188 nxt->addr = (uintptr_t)next_ptr;
189 nxt->type = PAX_DMA_TYPE_NEXT_PTR;
190 nxt->toggle = toggle;
191 }
192
prepare_ring(struct dma_iproc_pax_ring_data * ring)193 static void prepare_ring(struct dma_iproc_pax_ring_data *ring)
194 {
195 uintptr_t curr, next, last;
196 int buff_count = PAX_DMA_NUM_BD_BUFFS;
197 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
198 uint32_t toggle;
199 #endif
200
201 /* zero out descriptor area */
202 memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS);
203 memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE);
204
205 /* start with first buffer, valid toggle is 0x1 */
206 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
207 toggle = 0x1;
208 #endif
209 curr = (uintptr_t)ring->bd;
210 next = curr + PAX_DMA_RM_DESC_RING_SIZE;
211 last = curr + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS;
212 do {
213 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
214 init_toggle((void *)curr, toggle);
215 /* Place next_table desc as last BD entry on each buffer */
216 rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr),
217 (void *)next, toggle);
218 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
219 /* Place next_table desc as last BD entry on each buffer */
220 rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr),
221 (void *)next, 0);
222 #endif
223
224 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
225 /* valid toggle flips for each buffer */
226 toggle = toggle ? 0x0 : 0x1;
227 #endif
228 curr += PAX_DMA_RM_DESC_RING_SIZE;
229 next += PAX_DMA_RM_DESC_RING_SIZE;
230 /* last entry, chain back to first buffer */
231 if (next == last) {
232 next = (uintptr_t)ring->bd;
233 }
234
235 } while (--buff_count);
236
237 dma_mb();
238
239 /* start programming from first RM header */
240 ring->curr.write_ptr = ring->bd;
241 /* valid toggle starts with 1 after reset */
242 ring->curr.toggle = 1;
243 /* completion read offset */
244 ring->curr.cmpl_rd_offs = 0;
245 /* inflight descs */
246 ring->descs_inflight = 0;
247
248 /* init sync data for the ring */
249 ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE;
250 ring->curr.sync_data.ring = ring->idx;
251 /* pkt id for active dma xfer */
252 ring->curr.sync_data.opaque = 0x0;
253 /* pkt count for active dma xfer */
254 ring->curr.sync_data.total_pkts = 0x0;
255 }
256
init_rm(struct dma_iproc_pax_data * pd)257 static int init_rm(struct dma_iproc_pax_data *pd)
258 {
259 int ret = -ETIMEDOUT, timeout = 1000;
260
261 k_mutex_lock(&pd->dma_lock, K_FOREVER);
262 /* Wait for Ring Manager ready */
263 do {
264 LOG_DBG("Waiting for RM HW init\n");
265 if ((sys_read32(RM_COMM_REG(pd, RM_COMM_MAIN_HW_INIT_DONE)) &
266 RM_COMM_MAIN_HW_INIT_DONE_MASK)) {
267 ret = 0;
268 break;
269 }
270 k_sleep(K_MSEC(1));
271 } while (--timeout);
272 k_mutex_unlock(&pd->dma_lock);
273
274 if (!timeout) {
275 LOG_WRN("RM HW Init timedout!\n");
276 } else {
277 LOG_INF("PAX DMA RM HW Init Done\n");
278 }
279
280 return ret;
281 }
282
rm_cfg_start(struct dma_iproc_pax_data * pd)283 static void rm_cfg_start(struct dma_iproc_pax_data *pd)
284 {
285 uint32_t val;
286
287 k_mutex_lock(&pd->dma_lock, K_FOREVER);
288
289 /* set config done 0, enable toggle mode */
290 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
291 val &= ~RM_COMM_CONTROL_CONFIG_DONE;
292 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
293
294 val &= ~(RM_COMM_CONTROL_MODE_MASK << RM_COMM_CONTROL_MODE_SHIFT);
295
296 #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
297 val |= (RM_COMM_CONTROL_MODE_DOORBELL <<
298 RM_COMM_CONTROL_MODE_SHIFT);
299 #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
300 val |= (RM_COMM_CONTROL_MODE_ALL_BD_TOGGLE <<
301 RM_COMM_CONTROL_MODE_SHIFT);
302 #endif
303 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
304 sys_write32(RM_COMM_MSI_DISABLE_MASK,
305 RM_COMM_REG(pd, RM_COMM_MSI_DISABLE));
306
307 val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD));
308 val &= ~(RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_MASK <<
309 RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT);
310 val |= RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT_VAL <<
311 RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT;
312 sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD));
313
314 val = sys_read32(RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD));
315 val &= ~(RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_MASK <<
316 RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT);
317 val |= RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_VAL <<
318 RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT;
319
320 val &= ~(RM_COMM_BD_FIFO_FULL_THRESHOLD_MASK <<
321 RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT);
322 val |= RM_COMM_BD_FIFO_FULL_THRESHOLD_VAL <<
323 RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT;
324 sys_write32(val, RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD));
325
326 /* Enable Line interrupt */
327 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
328 val |= RM_COMM_CONTROL_LINE_INTR_EN;
329 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
330
331 /* Enable AE_TIMEOUT */
332 sys_write32(RM_COMM_AE_TIMEOUT_VAL,
333 RM_COMM_REG(pd, RM_COMM_AE_TIMEOUT));
334 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
335 val |= RM_COMM_CONTROL_AE_TIMEOUT_EN;
336 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
337
338 /* AE (Acceleration Engine) grouping to group '0' */
339 val = sys_read32(RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
340 val &= ~RM_AE_CTRL_AE_GROUP_MASK;
341 sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
342 val |= RM_AE_CONTROL_ACTIVE;
343 sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
344
345 /* AXI read/write channel enable */
346 val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_CONTROL));
347 val |= (RM_COMM_AXI_CONTROL_RD_CH_EN | RM_COMM_AXI_CONTROL_WR_CH_EN);
348 sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_CONTROL));
349
350 /* Tune RM control programming for 4 rings */
351 sys_write32(RM_COMM_TIMER_CONTROL0_VAL,
352 RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_0));
353 sys_write32(RM_COMM_TIMER_CONTROL1_VAL,
354 RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_1));
355 val = sys_read32(RM_COMM_REG(pd, RM_COMM_BURST_LENGTH));
356 val |= RM_COMM_BD_FETCH_CACHE_ALIGNED_DISABLED;
357 val |= RM_COMM_VALUE_FOR_DDR_ADDR_GEN_VAL <<
358 RM_COMM_VALUE_FOR_DDR_ADDR_GEN_SHIFT;
359 val |= RM_COMM_VALUE_FOR_TOGGLE_VAL << RM_COMM_VALUE_FOR_TOGGLE_SHIFT;
360 sys_write32(val, RM_COMM_REG(pd, RM_COMM_BURST_LENGTH));
361
362 val = sys_read32(RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL));
363 val |= RM_COMM_DISABLE_GRP_BD_FIFO_FLOW_CONTROL_FOR_PKT_ALIGNMENT;
364 val |= RM_COMM_DISABLE_PKT_ALIGNMENT_BD_FIFO_FLOW_CONTROL;
365 sys_write32(val, RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL));
366
367 /* Set Sequence max count to the max supported value */
368 val = sys_read32(RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT));
369 val = (val | RING_MASK_SEQ_MAX_COUNT_MASK);
370 sys_write32(val, RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT));
371
372 k_mutex_unlock(&pd->dma_lock);
373 }
374
rm_ring_clear_stats(struct dma_iproc_pax_data * pd,enum ring_idx idx)375 static void rm_ring_clear_stats(struct dma_iproc_pax_data *pd,
376 enum ring_idx idx)
377 {
378 /* Read ring Tx, Rx, and Outstanding counts to clear */
379 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_LS));
380 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_MS));
381 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_LS));
382 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_MS));
383 sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND));
384 }
385
rm_cfg_finish(struct dma_iproc_pax_data * pd)386 static void rm_cfg_finish(struct dma_iproc_pax_data *pd)
387 {
388 uint32_t val;
389
390 k_mutex_lock(&pd->dma_lock, K_FOREVER);
391
392 /* set Ring config done */
393 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
394 val |= RM_COMM_CONTROL_CONFIG_DONE;
395 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
396
397 k_mutex_unlock(&pd->dma_lock);
398 }
399
write_doorbell(struct dma_iproc_pax_data * pd,enum ring_idx idx)400 static inline void write_doorbell(struct dma_iproc_pax_data *pd,
401 enum ring_idx idx)
402 {
403 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
404
405 sys_write32(ring->descs_inflight,
406 RM_RING_REG(pd, idx, RING_DOORBELL_BD_WRITE_COUNT));
407 ring->descs_inflight = 0;
408 }
409
set_ring_active(struct dma_iproc_pax_data * pd,enum ring_idx idx,bool active)410 static inline void set_ring_active(struct dma_iproc_pax_data *pd,
411 enum ring_idx idx,
412 bool active)
413 {
414 uint32_t val;
415
416 val = sys_read32(RM_RING_REG(pd, idx, RING_CONTROL));
417 if (active) {
418 val |= RING_CONTROL_ACTIVE;
419 } else {
420 val &= ~RING_CONTROL_ACTIVE;
421 }
422 sys_write32(val, RM_RING_REG(pd, idx, RING_CONTROL));
423 }
424
init_ring(struct dma_iproc_pax_data * pd,enum ring_idx idx)425 static int init_ring(struct dma_iproc_pax_data *pd, enum ring_idx idx)
426 {
427 uint32_t val;
428 uintptr_t desc = (uintptr_t)pd->ring[idx].bd;
429 uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl;
430 int timeout = 5000, ret = 0;
431
432 k_mutex_lock(&pd->dma_lock, K_FOREVER);
433
434 /* Read cmpl write ptr incase previous dma stopped */
435 sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR));
436
437 /* Inactivate ring */
438 sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL));
439
440 /* set Ring config done */
441 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
442 val |= RM_COMM_CONTROL_CONFIG_DONE;
443 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
444 /* Flush ring before loading new descriptor */
445 sys_write32(RING_CONTROL_FLUSH, RM_RING_REG(pd, idx,
446 RING_CONTROL));
447 do {
448 if (sys_read32(RM_RING_REG(pd, idx, RING_FLUSH_DONE)) & RING_FLUSH_DONE_MASK) {
449 break;
450 }
451 k_busy_wait(1);
452 } while (--timeout);
453
454 if (!timeout) {
455 LOG_WRN("Ring %d flush timedout!\n", idx);
456 ret = -ETIMEDOUT;
457 goto err;
458 }
459
460 /* clear ring after flush */
461 sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL));
462
463 /* Clear Ring config done */
464 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
465 val &= ~(RM_COMM_CONTROL_CONFIG_DONE);
466 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
467 /* ring group id set to '0' */
468 val = sys_read32(RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx)));
469 val &= ~RING_COMM_CTRL_AE_GROUP_MASK;
470 sys_write32(val, RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx)));
471
472 /* DDR update control, set timeout value */
473 val = RING_DDR_CONTROL_COUNT(RING_DDR_CONTROL_COUNT_VAL) |
474 RING_DDR_CONTROL_TIMER(RING_DDR_CONTROL_TIMER_VAL) |
475 RING_DDR_CONTROL_ENABLE;
476
477 sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL));
478 /* Disable Ring MSI Timeout */
479 sys_write32(RING_DISABLE_MSI_TIMEOUT_VALUE,
480 RM_RING_REG(pd, idx, RING_DISABLE_MSI_TIMEOUT));
481
482 /* BD and CMPL desc queue start address */
483 sys_write32((uint32_t)desc, RM_RING_REG(pd, idx, RING_BD_START_ADDR));
484 sys_write32((uint32_t)cmpl, RM_RING_REG(pd, idx, RING_CMPL_START_ADDR));
485 val = sys_read32(RM_RING_REG(pd, idx, RING_BD_READ_PTR));
486
487 /* keep ring inactive after init to avoid BD poll */
488 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
489 set_ring_active(pd, idx, false);
490 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
491 set_ring_active(pd, idx, true);
492 #endif
493
494 #if !defined(CONFIG_DMA_IPROC_PAX_POLL_MODE)
495 /* Enable ring completion interrupt */
496 sys_write32(0x0, RM_RING_REG(pd, idx,
497 RING_COMPLETION_INTERRUPT_STAT_MASK));
498 #endif
499 rm_ring_clear_stats(pd, idx);
500 err:
501 k_mutex_unlock(&pd->dma_lock);
502
503 return ret;
504 }
505
poll_on_write_sync(const struct device * dev,struct dma_iproc_pax_ring_data * ring)506 static int poll_on_write_sync(const struct device *dev,
507 struct dma_iproc_pax_ring_data *ring)
508 {
509 const struct dma_iproc_pax_cfg *cfg = dev->config;
510 struct dma_iproc_pax_write_sync_data sync_rd, *recv, *sent;
511 uint64_t pci_addr;
512 uint32_t *pci32, *axi32;
513 uint32_t zero_init = 0, timeout = PAX_DMA_MAX_SYNC_WAIT;
514 int ret;
515
516 recv = &sync_rd;
517 sent = &(ring->curr.sync_data);
518 /* form host pci sync address */
519 pci32 = (uint32_t *)&pci_addr;
520 pci32[0] = ring->sync_pci.addr_lo;
521 pci32[1] = ring->sync_pci.addr_hi;
522 axi32 = (uint32_t *)&sync_rd;
523
524 do {
525 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr,
526 (uintptr_t *)axi32, 4,
527 PCIE_OB_LOWMEM, HOST_TO_DEVICE);
528
529 if (memcmp((void *)recv, (void *)sent, 4) == 0) {
530 /* clear the sync word */
531 ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr,
532 (uintptr_t *)&zero_init,
533 4, PCIE_OB_LOWMEM,
534 DEVICE_TO_HOST);
535 dma_mb();
536 ret = 0;
537 break;
538 }
539 k_busy_wait(1);
540 } while (--timeout);
541
542 if (!timeout) {
543 LOG_ERR("[ring %d]: not recvd write sync!\n", ring->idx);
544 ret = -ETIMEDOUT;
545 }
546
547 return ret;
548 }
549
process_cmpl_event(const struct device * dev,enum ring_idx idx,uint32_t pl_len)550 static int process_cmpl_event(const struct device *dev,
551 enum ring_idx idx, uint32_t pl_len)
552 {
553 struct dma_iproc_pax_data *pd = dev->data;
554 uint32_t wr_offs, rd_offs, ret = 0;
555 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
556 struct cmpl_pkt *c;
557 uint32_t is_outstanding;
558
559 /* cmpl read offset, unprocessed cmpl location */
560 rd_offs = ring->curr.cmpl_rd_offs;
561
562 wr_offs = sys_read32(RM_RING_REG(pd, idx,
563 RING_CMPL_WRITE_PTR));
564
565 /* Update read ptr to "processed" */
566 ring->curr.cmpl_rd_offs = wr_offs;
567
568 /*
569 * Ensure consistency of completion descriptor
570 * The completion desc is updated by RM via AXI stream
571 * CPU need to ensure the memory operations are completed
572 * before reading cmpl area, by a "dsb"
573 * If Dcache enabled, need to invalidate the cachelines to
574 * read updated cmpl desc. The cache API also issues dsb.
575 */
576 dma_mb();
577
578 /* Decode cmpl pkt id to verify */
579 c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl +
580 PAX_DMA_CMPL_DESC_SIZE * PAX_DMA_CURR_CMPL_IDX(wr_offs));
581
582 LOG_DBG("RING%d WR_PTR:%d opq:%d, rm_status:%x dma_status:%x\n",
583 idx, wr_offs, c->opq, c->rm_status, c->dma_status);
584
585 is_outstanding = sys_read32(RM_RING_REG(pd, idx,
586 RING_NUM_REQ_OUTSTAND));
587 if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) {
588 LOG_ERR("RING%d: pkt id should be %d, rcvd %d outst=%d\n",
589 idx, ring->curr.opq, c->opq, is_outstanding);
590 ret = -EIO;
591 }
592 /* check for completion AE timeout */
593 if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) {
594 LOG_ERR("RING%d WR_PTR:%d rm_status:%x AE Timeout!\n",
595 idx, wr_offs, c->rm_status);
596 /* TBD: Issue full card reset to restore operations */
597 LOG_ERR("Needs Card Reset to recover!\n");
598 ret = -ETIMEDOUT;
599 }
600
601 if (ring->dma_callback) {
602 ring->dma_callback(dev, ring->callback_arg, idx, ret);
603 }
604
605 /* clear total packet count and non header bd count */
606 ring->total_pkt_count = 0;
607
608 return ret;
609 }
610
611 #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE
peek_ring_cmpl(const struct device * dev,enum ring_idx idx,uint32_t pl_len)612 static int peek_ring_cmpl(const struct device *dev,
613 enum ring_idx idx, uint32_t pl_len)
614 {
615 struct dma_iproc_pax_data *pd = dev->data;
616 uint32_t wr_offs, rd_offs, timeout = PAX_DMA_MAX_POLL_WAIT;
617 struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
618
619 /* cmpl read offset, unprocessed cmpl location */
620 rd_offs = ring->curr.cmpl_rd_offs;
621
622 /* poll write_ptr until cmpl received for all buffers */
623 do {
624 wr_offs = sys_read32(RM_RING_REG(pd, idx,
625 RING_CMPL_WRITE_PTR));
626 if (PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs) >= pl_len)
627 break;
628 k_busy_wait(1);
629 } while (--timeout);
630
631 if (timeout == 0) {
632 LOG_ERR("RING%d timeout, rcvd %d, expected %d!\n",
633 idx, PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs), pl_len);
634 /* More debug info on current dma instance */
635 LOG_ERR("WR_PTR:%x RD_PTR%x\n", wr_offs, rd_offs);
636 return -ETIMEDOUT;
637 }
638
639 return process_cmpl_event(dev, idx, pl_len);
640 }
641 #else
rm_isr(const struct device * dev)642 static void rm_isr(const struct device *dev)
643 {
644 uint32_t status, err_stat, idx;
645 struct dma_iproc_pax_data *pd = dev->data;
646
647 err_stat =
648 sys_read32(RM_COMM_REG(pd,
649 RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_MASK));
650 sys_write32(err_stat,
651 RM_COMM_REG(pd,
652 RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_CLEAR));
653
654 /* alert waiting thread to process, for each completed ring */
655 for (idx = PAX_DMA_RING0; idx < PAX_DMA_RINGS_MAX; idx++) {
656 status =
657 sys_read32(RM_RING_REG(pd, idx,
658 RING_COMPLETION_INTERRUPT_STAT));
659 sys_write32(status,
660 RM_RING_REG(pd, idx,
661 RING_COMPLETION_INTERRUPT_STAT_CLEAR));
662 if (status & 0x1) {
663 k_sem_give(&pd->ring[idx].alert);
664 }
665 }
666 }
667 #endif
668
dma_iproc_pax_init(const struct device * dev)669 static int dma_iproc_pax_init(const struct device *dev)
670 {
671 const struct dma_iproc_pax_cfg *cfg = dev->config;
672 struct dma_iproc_pax_data *pd = dev->data;
673 int r;
674 uintptr_t mem_aligned;
675
676 if (!device_is_ready(cfg->pcie_dev)) {
677 LOG_ERR("PCIe device not ready");
678 return -ENODEV;
679 }
680
681 pd->dma_base = cfg->dma_base;
682 pd->rm_comm_base = cfg->rm_comm_base;
683 pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ?
684 cfg->use_rings : PAX_DMA_RINGS_MAX;
685
686 /* dma/rm access lock */
687 k_mutex_init(&pd->dma_lock);
688
689 /* Ring Manager H/W init */
690 if (init_rm(pd)) {
691 return -ETIMEDOUT;
692 }
693
694 /* common rm config */
695 rm_cfg_start(pd);
696
697 /* individual ring config */
698 for (r = 0; r < pd->used_rings; r++) {
699 /* per-ring mutex lock */
700 k_mutex_init(&pd->ring[r].lock);
701 /* Init alerts */
702 k_sem_init(&pd->ring[r].alert, 0, 1);
703
704 pd->ring[r].idx = r;
705 pd->ring[r].ring_base = cfg->rm_base +
706 PAX_DMA_RING_ADDR_OFFSET(r);
707 LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx,
708 sys_read32(RM_RING_REG(pd, r, RING_VER)));
709
710 /* Allocate for 2 BD buffers + cmpl buffer + sync location */
711 pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base +
712 r * PAX_DMA_PER_RING_ALLOC_SIZE);
713 if (!pd->ring[r].ring_mem) {
714 LOG_ERR("RING%d failed to alloc desc memory!\n", r);
715 return -ENOMEM;
716 }
717 /* Find 8K aligned address within allocated region */
718 mem_aligned = ((uintptr_t)pd->ring[r].ring_mem +
719 PAX_DMA_RING_ALIGN - 1) &
720 ~(PAX_DMA_RING_ALIGN - 1);
721
722 pd->ring[r].cmpl = (void *)mem_aligned;
723 pd->ring[r].bd = (void *)(mem_aligned +
724 PAX_DMA_RM_CMPL_RING_SIZE);
725 pd->ring[r].sync_loc = (void *)((uintptr_t)pd->ring[r].bd +
726 PAX_DMA_RM_DESC_RING_SIZE *
727 PAX_DMA_NUM_BD_BUFFS);
728
729 LOG_DBG("Ring%d,allocated Mem:0x%p Size %d\n",
730 pd->ring[r].idx,
731 pd->ring[r].ring_mem,
732 PAX_DMA_PER_RING_ALLOC_SIZE);
733 LOG_DBG("Ring%d,BD:0x%p, CMPL:0x%p, SYNC_LOC:0x%p\n",
734 pd->ring[r].idx,
735 pd->ring[r].bd,
736 pd->ring[r].cmpl,
737 pd->ring[r].sync_loc);
738
739 /* Prepare ring desc table */
740 prepare_ring(&(pd->ring[r]));
741
742 /* initialize ring */
743 init_ring(pd, r);
744 }
745
746 /* set ring config done */
747 rm_cfg_finish(pd);
748
749 #ifndef CONFIG_DMA_IPROC_PAX_POLL_MODE
750 /* Register and enable RM interrupt */
751 IRQ_CONNECT(DT_INST_IRQN(0),
752 DT_INST_IRQ(0, priority),
753 rm_isr,
754 DEVICE_DT_INST_GET(0),
755 0);
756 irq_enable(DT_INST_IRQN(0));
757 #else
758 LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name);
759 #endif
760 LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings);
761
762 return 0;
763 }
764
dma_iproc_pax_gen_desc(struct dma_iproc_pax_ring_data * ring,bool is_mega,uint64_t pci_addr,uint64_t axi_addr,uint32_t length,enum pax_dma_dir dir,uint32_t * non_hdr_bd_count)765 static int dma_iproc_pax_gen_desc(struct dma_iproc_pax_ring_data *ring,
766 bool is_mega,
767 uint64_t pci_addr,
768 uint64_t axi_addr,
769 uint32_t length,
770 enum pax_dma_dir dir,
771 uint32_t *non_hdr_bd_count)
772 {
773 struct rm_header *hdr;
774
775 if (*non_hdr_bd_count == 0) {
776 /* Generate Header BD */
777 ring->current_hdr = (uintptr_t)get_curr_desc_addr(ring);
778 rm_write_header_desc((void *)ring->current_hdr,
779 curr_toggle_val(ring),
780 curr_pkt_id(ring),
781 PAX_DMA_RM_DESC_BDCOUNT,
782 pci_addr);
783 ring->total_pkt_count++;
784 }
785
786 rm_write_pcie_desc(get_curr_desc_addr(ring),
787 curr_toggle_val(ring), pci_addr);
788 *non_hdr_bd_count = *non_hdr_bd_count + 1;
789 rm_write_src_dst_desc(get_curr_desc_addr(ring),
790 is_mega, curr_toggle_val(ring),
791 axi_addr, length, dir);
792 *non_hdr_bd_count = *non_hdr_bd_count + 1;
793
794 /* Update Header BD with bd count */
795 hdr = (struct rm_header *)ring->current_hdr;
796 hdr->bdcount = *non_hdr_bd_count;
797 if (*non_hdr_bd_count == MAX_BD_COUNT_PER_HEADER) {
798 *non_hdr_bd_count = 0;
799 }
800
801 return 0;
802 }
803
dma_iproc_pax_gen_packets(const struct device * dev,struct dma_iproc_pax_ring_data * ring,uint32_t direction,struct dma_block_config * config,uint32_t * non_hdr_bd_count)804 static int dma_iproc_pax_gen_packets(const struct device *dev,
805 struct dma_iproc_pax_ring_data *ring,
806 uint32_t direction,
807 struct dma_block_config *config,
808 uint32_t *non_hdr_bd_count)
809 {
810 uint32_t outstanding, remaining_len;
811 uint32_t offset, curr, mega_len;
812 uint64_t axi_addr;
813 uint64_t pci_addr;
814 enum pax_dma_dir dir;
815
816 switch (direction) {
817 case MEMORY_TO_PERIPHERAL:
818 pci_addr = config->dest_address;
819 axi_addr = config->source_address;
820 dir = CARD_TO_HOST;
821 break;
822 case PERIPHERAL_TO_MEMORY:
823 axi_addr = config->dest_address;
824 pci_addr = config->source_address;
825 dir = HOST_TO_CARD;
826 break;
827 default:
828 LOG_ERR("not supported transfer direction");
829 return -EINVAL;
830 }
831
832 outstanding = config->block_size;
833 offset = 0;
834 while (outstanding) {
835 curr = MIN(outstanding, PAX_DMA_MAX_SZ_PER_BD);
836 mega_len = curr / PAX_DMA_MEGA_LENGTH_MULTIPLE;
837 remaining_len = curr % PAX_DMA_MEGA_LENGTH_MULTIPLE;
838 pci_addr = pci_addr + offset;
839 axi_addr = axi_addr + offset;
840
841 if (mega_len) {
842 dma_iproc_pax_gen_desc(ring, true, pci_addr,
843 axi_addr, mega_len, dir,
844 non_hdr_bd_count);
845 offset = offset + mega_len *
846 PAX_DMA_MEGA_LENGTH_MULTIPLE;
847 }
848
849 if (remaining_len) {
850 pci_addr = pci_addr + offset;
851 axi_addr = axi_addr + offset;
852 dma_iproc_pax_gen_desc(ring, false, pci_addr, axi_addr,
853 remaining_len, dir,
854 non_hdr_bd_count);
855 offset = offset + remaining_len;
856 }
857
858 outstanding = outstanding - curr;
859 }
860
861 return 0;
862 }
863
864 #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE
set_pkt_count(const struct device * dev,enum ring_idx idx,uint32_t pl_len)865 static void set_pkt_count(const struct device *dev,
866 enum ring_idx idx,
867 uint32_t pl_len)
868 {
869 /* Nothing needs to be programmed here in poll mode */
870 }
871
wait_for_pkt_completion(const struct device * dev,enum ring_idx idx,uint32_t pl_len)872 static int wait_for_pkt_completion(const struct device *dev,
873 enum ring_idx idx,
874 uint32_t pl_len)
875 {
876 /* poll for completion */
877 return peek_ring_cmpl(dev, idx, pl_len);
878 }
879 #else
set_pkt_count(const struct device * dev,enum ring_idx idx,uint32_t pl_len)880 static void set_pkt_count(const struct device *dev,
881 enum ring_idx idx,
882 uint32_t pl_len)
883 {
884 struct dma_iproc_pax_data *pd = dev->data;
885 uint32_t val;
886
887 /* program packet count for interrupt assertion */
888 val = sys_read32(RM_RING_REG(pd, idx,
889 RING_CMPL_WR_PTR_DDR_CONTROL));
890 val &= ~RING_DDR_CONTROL_COUNT_MASK;
891 val |= RING_DDR_CONTROL_COUNT(pl_len);
892 sys_write32(val, RM_RING_REG(pd, idx,
893 RING_CMPL_WR_PTR_DDR_CONTROL));
894 }
895
wait_for_pkt_completion(const struct device * dev,enum ring_idx idx,uint32_t pl_len)896 static int wait_for_pkt_completion(const struct device *dev,
897 enum ring_idx idx,
898 uint32_t pl_len)
899 {
900 struct dma_iproc_pax_data *pd = dev->data;
901 struct dma_iproc_pax_ring_data *ring;
902
903 ring = &(pd->ring[idx]);
904 /* wait for sg dma completion alert */
905 if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) {
906 LOG_ERR("PAX DMA [ring %d] Timeout!\n", idx);
907 return -ETIMEDOUT;
908 }
909
910 return process_cmpl_event(dev, idx, pl_len);
911 }
912 #endif
913
dma_iproc_pax_process_dma_blocks(const struct device * dev,enum ring_idx idx,struct dma_config * config)914 static int dma_iproc_pax_process_dma_blocks(const struct device *dev,
915 enum ring_idx idx,
916 struct dma_config *config)
917 {
918 struct dma_iproc_pax_data *pd = dev->data;
919 const struct dma_iproc_pax_cfg *cfg = dev->config;
920 int ret = 0;
921 struct dma_iproc_pax_ring_data *ring;
922 uint32_t toggle_bit, non_hdr_bd_count = 0;
923 struct dma_block_config sync_pl;
924 struct dma_iproc_pax_addr64 sync;
925 struct dma_block_config *block_config = config->head_block;
926
927 if (block_config == NULL) {
928 LOG_ERR("head_block is NULL\n");
929 return -EINVAL;
930 }
931
932 ring = &(pd->ring[idx]);
933
934 /*
935 * Host sync buffer isn't ready at zephyr/driver init-time
936 * Read the host address location once at first DMA write
937 * on that ring.
938 */
939 if ((ring->sync_pci.addr_lo == 0x0) &&
940 (ring->sync_pci.addr_hi == 0x0)) {
941 /* populate sync data location */
942 LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc);
943 sync.addr_lo = sys_read32(cfg->scr_addr_loc + 4);
944 sync.addr_hi = sys_read32(cfg->scr_addr_loc);
945 ring->sync_pci.addr_lo = sync.addr_lo + idx * 4;
946 ring->sync_pci.addr_hi = sync.addr_hi;
947 LOG_DBG("ring:%d,sync addr:0x%x.0x%x\n", idx,
948 ring->sync_pci.addr_hi,
949 ring->sync_pci.addr_lo);
950 }
951
952 /* account extra sync packet */
953 ring->curr.sync_data.opaque = ring->curr.opq;
954 ring->curr.sync_data.total_pkts = config->block_count;
955 memcpy((void *)ring->sync_loc,
956 (void *)&(ring->curr.sync_data), 4);
957 sync_pl.dest_address = ring->sync_pci.addr_lo |
958 (uint64_t)ring->sync_pci.addr_hi << 32;
959 sync_pl.source_address = (uintptr_t)ring->sync_loc;
960 sync_pl.block_size = 4; /* 4-bytes */
961
962 /* current toggle bit */
963 toggle_bit = ring->curr.toggle;
964 /* current opq value for cmpl check */
965 ring->curr.opq = curr_pkt_id(ring);
966
967 /* Form descriptors for total block counts */
968 while (block_config != NULL) {
969 ret = dma_iproc_pax_gen_packets(dev, ring,
970 config->channel_direction,
971 block_config,
972 &non_hdr_bd_count);
973 if (ret) {
974 goto err;
975 }
976 block_config = block_config->next_block;
977 }
978
979 /*
980 * Write sync payload descriptors should go with separate RM header
981 * as RM implementation allows all the BD's in a header packet should
982 * have same data transfer direction. Setting non_hdr_bd_count to 0,
983 * helps generate separate packet.
984 */
985 ring->non_hdr_bd_count = 0;
986 dma_iproc_pax_gen_packets(dev, ring, MEMORY_TO_PERIPHERAL, &sync_pl,
987 &non_hdr_bd_count);
988
989 alloc_pkt_id(ring);
990 err:
991 return ret;
992 }
993
dma_iproc_pax_configure(const struct device * dev,uint32_t channel,struct dma_config * cfg)994 static int dma_iproc_pax_configure(const struct device *dev, uint32_t channel,
995 struct dma_config *cfg)
996 {
997 struct dma_iproc_pax_data *pd = dev->data;
998 struct dma_iproc_pax_ring_data *ring;
999 int ret = 0;
1000
1001 if (channel >= PAX_DMA_RINGS_MAX) {
1002 LOG_ERR("Invalid ring/channel %d\n", channel);
1003 return -EINVAL;
1004 }
1005
1006 ring = &(pd->ring[channel]);
1007 k_mutex_lock(&ring->lock, K_FOREVER);
1008
1009 if (ring->ring_active) {
1010 ret = -EBUSY;
1011 goto err;
1012 }
1013
1014 if (cfg->block_count >= RM_V2_MAX_BLOCK_COUNT) {
1015 LOG_ERR("Dma block count[%d] supported exceeds limit[%d]\n",
1016 cfg->block_count, RM_V2_MAX_BLOCK_COUNT);
1017 ret = -ENOTSUP;
1018 goto err;
1019 }
1020
1021 ring->ring_active = 1;
1022 ret = dma_iproc_pax_process_dma_blocks(dev, channel, cfg);
1023
1024 if (ret) {
1025 ring->ring_active = 0;
1026 goto err;
1027 }
1028
1029 ring->dma_callback = cfg->dma_callback;
1030 ring->callback_arg = cfg->user_data;
1031 err:
1032 k_mutex_unlock(&ring->lock);
1033 return ret;
1034 }
1035
dma_iproc_pax_transfer_start(const struct device * dev,uint32_t channel)1036 static int dma_iproc_pax_transfer_start(const struct device *dev,
1037 uint32_t channel)
1038 {
1039 int ret = 0;
1040 struct dma_iproc_pax_data *pd = dev->data;
1041 struct dma_iproc_pax_ring_data *ring;
1042
1043 if (channel >= PAX_DMA_RINGS_MAX) {
1044 LOG_ERR("Invalid ring %d\n", channel);
1045 return -EINVAL;
1046 }
1047
1048 ring = &(pd->ring[channel]);
1049 set_pkt_count(dev, channel, ring->total_pkt_count);
1050
1051 #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
1052 write_doorbell(pd, channel);
1053 #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
1054 /* activate the ring */
1055 set_ring_active(pd, channel, true);
1056 #endif
1057
1058 ret = wait_for_pkt_completion(dev, channel, ring->total_pkt_count);
1059 if (ret) {
1060 goto err_ret;
1061 }
1062
1063 ret = poll_on_write_sync(dev, ring);
1064
1065 err_ret:
1066 k_mutex_lock(&ring->lock, K_FOREVER);
1067 ring->ring_active = 0;
1068 k_mutex_unlock(&ring->lock);
1069
1070 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
1071 /* deactivate the ring until next active transfer */
1072 set_ring_active(pd, channel, false);
1073 #endif
1074 return ret;
1075 }
1076
dma_iproc_pax_transfer_stop(const struct device * dev,uint32_t channel)1077 static int dma_iproc_pax_transfer_stop(const struct device *dev,
1078 uint32_t channel)
1079 {
1080 return 0;
1081 }
1082
1083 static const struct dma_driver_api pax_dma_driver_api = {
1084 .config = dma_iproc_pax_configure,
1085 .start = dma_iproc_pax_transfer_start,
1086 .stop = dma_iproc_pax_transfer_stop,
1087 };
1088
1089 static const struct dma_iproc_pax_cfg pax_dma_cfg = {
1090 .dma_base = DT_INST_REG_ADDR_BY_NAME(0, dme_regs),
1091 .rm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_ring_regs),
1092 .rm_comm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_comm_regs),
1093 .use_rings = DT_INST_PROP(0, dma_channels),
1094 .bd_memory_base = (void *)DT_INST_PROP_BY_IDX(0, bd_memory, 0),
1095 .scr_addr_loc = DT_INST_PROP(0, scr_addr_loc),
1096 .pcie_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, pcie_ep)),
1097 };
1098
1099 DEVICE_DT_INST_DEFINE(0,
1100 &dma_iproc_pax_init,
1101 NULL,
1102 &pax_dma_data,
1103 &pax_dma_cfg,
1104 POST_KERNEL,
1105 CONFIG_DMA_IPROC_PAX_V2_INIT_PRIORITY,
1106 &pax_dma_driver_api);
1107