1 /*
2  * Copyright 2020 Broadcom
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT brcm_iproc_pax_dma_v2
8 
9 #include <arch/cpu.h>
10 #include <cache.h>
11 #include <errno.h>
12 #include <init.h>
13 #include <kernel.h>
14 #include <linker/sections.h>
15 #include <soc.h>
16 #include <string.h>
17 #include <toolchain.h>
18 #include <zephyr/types.h>
19 #include <drivers/dma.h>
20 #include <drivers/pcie/endpoint/pcie_ep.h>
21 #include "dma_iproc_pax_v2.h"
22 
23 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
24 #include <logging/log.h>
25 LOG_MODULE_REGISTER(dma_iproc_pax_v2);
26 
27 #define PAX_DMA_DEV_NAME(dev)	((dev)->name)
28 
29 #define PAX_DMA_DEV_CFG(dev)	\
30 			((struct dma_iproc_pax_cfg *)(dev)->config)
31 
32 #define PAX_DMA_DEV_DATA(dev)	\
33 			((struct dma_iproc_pax_data *)(dev)->data)
34 
35 /* Driver runtime data for PAX DMA and RM */
36 static struct dma_iproc_pax_data pax_dma_data;
37 
38 /**
39  * @brief Opaque/packet id allocator, range 0 to 31
40  */
reset_pkt_id(struct dma_iproc_pax_ring_data * ring)41 static inline uint32_t reset_pkt_id(struct dma_iproc_pax_ring_data *ring)
42 {
43 	return ring->pkt_id = 0x0;
44 }
45 
alloc_pkt_id(struct dma_iproc_pax_ring_data * ring)46 static inline uint32_t alloc_pkt_id(struct dma_iproc_pax_ring_data *ring)
47 {
48 	ring->pkt_id = (ring->pkt_id + 1) % 32;
49 	return ring->pkt_id;
50 }
51 
curr_pkt_id(struct dma_iproc_pax_ring_data * ring)52 static inline uint32_t curr_pkt_id(struct dma_iproc_pax_ring_data *ring)
53 {
54 	return ring->pkt_id;
55 }
56 
curr_toggle_val(struct dma_iproc_pax_ring_data * ring)57 static inline uint32_t curr_toggle_val(struct dma_iproc_pax_ring_data *ring)
58 {
59 	return ring->curr.toggle;
60 }
61 
62 /**
63  * @brief Populate header descriptor
64  */
rm_write_header_desc(void * desc,uint32_t toggle,uint32_t opq,uint32_t bdcount,uint64_t pci_addr)65 static inline void rm_write_header_desc(void *desc, uint32_t toggle,
66 				  uint32_t opq, uint32_t bdcount,
67 				  uint64_t pci_addr)
68 {
69 	struct rm_header *r = (struct rm_header *)desc;
70 
71 	r->opq = opq;
72 	r->bdf = 0x0;
73 	r->res1 = 0x0;
74 	/* DMA descriptor count init vlaue */
75 	r->bdcount = bdcount;
76 	r->prot = 0x0;
77 	r->res2 = 0x0;
78 	/* No packet extension, start and end set to '1' */
79 	r->start = 1;
80 	r->end = 1;
81 	/* RM header type */
82 	r->type = PAX_DMA_TYPE_RM_HEADER;
83 	r->pcie_addr_msb = PAX_DMA_PCI_ADDR_HI_MSB8(pci_addr);
84 	r->res3 = 0x0;
85 	r->res4 = 0x0;
86 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
87 	r->toggle = toggle;
88 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
89 	r->toggle = 0;
90 #endif
91 }
92 
93 /**
94  * @brief Populate pcie descriptor
95  */
rm_write_pcie_desc(void * desc,uint32_t toggle,uint64_t pci_addr)96 static inline void rm_write_pcie_desc(void *desc,
97 			     uint32_t toggle,
98 			     uint64_t pci_addr)
99 {
100 	struct pcie_desc *pcie = (struct pcie_desc *)desc;
101 
102 	pcie->pcie_addr_lsb = pci_addr;
103 	pcie->res1 = 0x0;
104 	/* PCIE header type */
105 	pcie->type = PAX_DMA_TYPE_PCIE_DESC;
106 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
107 	pcie->toggle = toggle;
108 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
109 	pcie->toggle = 0;
110 #endif
111 }
112 
113 /**
114  * @brief Populate src/destionation descriptor
115  */
rm_write_src_dst_desc(void * desc_ptr,bool is_mega,uint32_t toggle,uint64_t axi_addr,uint32_t size,enum pax_dma_dir direction)116 static inline void rm_write_src_dst_desc(void *desc_ptr,
117 				bool is_mega,
118 				uint32_t toggle,
119 				uint64_t axi_addr,
120 				uint32_t size,
121 				enum pax_dma_dir direction)
122 {
123 	struct src_dst_desc *desc;
124 
125 	desc = (struct src_dst_desc *)desc_ptr;
126 	desc->axi_addr = axi_addr;
127 	desc->length = size;
128 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
129 	desc->toggle = toggle;
130 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
131 	desc->toggle = 0;
132 #endif
133 
134 	if (direction == CARD_TO_HOST) {
135 		desc->type = is_mega ?
136 			     PAX_DMA_TYPE_MEGA_SRC_DESC : PAX_DMA_TYPE_SRC_DESC;
137 	} else {
138 		desc->type = is_mega ?
139 			     PAX_DMA_TYPE_MEGA_DST_DESC : PAX_DMA_TYPE_DST_DESC;
140 	}
141 }
142 
143 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
init_toggle(void * desc,uint32_t toggle)144 static void init_toggle(void *desc, uint32_t toggle)
145 {
146 	struct rm_header *r = (struct rm_header *)desc;
147 
148 	r->toggle = toggle;
149 }
150 #endif
151 
152 /**
153  * @brief Return current descriptor memory address and
154  *        increment to point to next descriptor memory address.
155  */
get_curr_desc_addr(struct dma_iproc_pax_ring_data * ring)156 static inline void *get_curr_desc_addr(struct dma_iproc_pax_ring_data *ring)
157 {
158 	struct next_ptr_desc *nxt;
159 	uintptr_t curr;
160 
161 	curr = (uintptr_t)ring->curr.write_ptr;
162 	/* if hit next table ptr, skip to next location, flip toggle */
163 	nxt = (struct next_ptr_desc *)curr;
164 	if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) {
165 		LOG_DBG("hit next_ptr@0x%lx %d, next_table@0x%lx\n",
166 			curr, nxt->toggle, (uintptr_t)nxt->addr);
167 		uintptr_t last = (uintptr_t)ring->bd +
168 			     PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS;
169 		nxt->toggle = ring->curr.toggle;
170 		ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0;
171 		/* move to next addr, wrap around if hits end */
172 		curr += PAX_DMA_RM_DESC_BDWIDTH;
173 		if (curr == last) {
174 			curr = (uintptr_t)ring->bd;
175 			LOG_DBG("hit end of desc:0x%lx, wrap to 0x%lx\n",
176 				last, curr);
177 		}
178 		ring->descs_inflight++;
179 	}
180 
181 	ring->curr.write_ptr = (void *)(curr + PAX_DMA_RM_DESC_BDWIDTH);
182 	ring->descs_inflight++;
183 
184 	return (void *)curr;
185 }
186 
187 /**
188  * @brief Populate next ptr descriptor
189  */
rm_write_next_table_desc(void * desc,void * next_ptr,uint32_t toggle)190 static void rm_write_next_table_desc(void *desc, void *next_ptr,
191 				     uint32_t toggle)
192 {
193 	struct next_ptr_desc *nxt = (struct next_ptr_desc *)desc;
194 
195 	nxt->addr = (uintptr_t)next_ptr;
196 	nxt->type = PAX_DMA_TYPE_NEXT_PTR;
197 	nxt->toggle = toggle;
198 }
199 
prepare_ring(struct dma_iproc_pax_ring_data * ring)200 static void prepare_ring(struct dma_iproc_pax_ring_data *ring)
201 {
202 	uintptr_t curr, next, last;
203 	int buff_count = PAX_DMA_NUM_BD_BUFFS;
204 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
205 	uint32_t toggle;
206 #endif
207 
208 	/* zero out descriptor area */
209 	memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS);
210 	memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE);
211 
212 	/* start with first buffer, valid toggle is 0x1 */
213 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
214 	toggle = 0x1;
215 #endif
216 	curr = (uintptr_t)ring->bd;
217 	next = curr + PAX_DMA_RM_DESC_RING_SIZE;
218 	last = curr + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS;
219 	do {
220 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
221 		init_toggle((void *)curr, toggle);
222 		/* Place next_table desc as last BD entry on each buffer */
223 		rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr),
224 					 (void *)next, toggle);
225 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
226 		/* Place next_table desc as last BD entry on each buffer */
227 		rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr),
228 					 (void *)next, 0);
229 #endif
230 
231 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
232 		/* valid toggle flips for each buffer */
233 		toggle = toggle ? 0x0 : 0x1;
234 #endif
235 		curr += PAX_DMA_RM_DESC_RING_SIZE;
236 		next += PAX_DMA_RM_DESC_RING_SIZE;
237 		/* last entry, chain back to first buffer */
238 		if (next == last) {
239 			next = (uintptr_t)ring->bd;
240 		}
241 
242 	} while (--buff_count);
243 
244 	dma_mb();
245 
246 	/* start programming from first RM header */
247 	ring->curr.write_ptr = ring->bd;
248 	/* valid toggle starts with 1 after reset */
249 	ring->curr.toggle = 1;
250 	/* completion read offset */
251 	ring->curr.cmpl_rd_offs = 0;
252 	/* inflight descs */
253 	ring->descs_inflight = 0;
254 
255 	/* init sync data for the ring */
256 	ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE;
257 	ring->curr.sync_data.ring = ring->idx;
258 	/* pkt id for active dma xfer */
259 	ring->curr.sync_data.opaque = 0x0;
260 	/* pkt count for active dma xfer */
261 	ring->curr.sync_data.total_pkts = 0x0;
262 }
263 
init_rm(struct dma_iproc_pax_data * pd)264 static int init_rm(struct dma_iproc_pax_data *pd)
265 {
266 	int ret = -ETIMEDOUT, timeout = 1000;
267 
268 	k_mutex_lock(&pd->dma_lock, K_FOREVER);
269 	/* Wait for Ring Manager ready */
270 	do {
271 		LOG_DBG("Waiting for RM HW init\n");
272 		if ((sys_read32(RM_COMM_REG(pd, RM_COMM_MAIN_HW_INIT_DONE)) &
273 		    RM_COMM_MAIN_HW_INIT_DONE_MASK)) {
274 			ret = 0;
275 			break;
276 		}
277 		k_sleep(K_MSEC(1));
278 	} while (--timeout);
279 	k_mutex_unlock(&pd->dma_lock);
280 
281 	if (!timeout) {
282 		LOG_WRN("RM HW Init timedout!\n");
283 	} else {
284 		LOG_INF("PAX DMA RM HW Init Done\n");
285 	}
286 
287 	return ret;
288 }
289 
rm_cfg_start(struct dma_iproc_pax_data * pd)290 static void rm_cfg_start(struct dma_iproc_pax_data *pd)
291 {
292 	uint32_t val;
293 
294 	k_mutex_lock(&pd->dma_lock, K_FOREVER);
295 
296 	/* set config done 0, enable toggle mode */
297 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
298 	val &= ~RM_COMM_CONTROL_CONFIG_DONE;
299 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
300 
301 	val &= ~(RM_COMM_CONTROL_MODE_MASK << RM_COMM_CONTROL_MODE_SHIFT);
302 
303 #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
304 	val |= (RM_COMM_CONTROL_MODE_DOORBELL <<
305 		RM_COMM_CONTROL_MODE_SHIFT);
306 #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
307 	val |= (RM_COMM_CONTROL_MODE_ALL_BD_TOGGLE <<
308 		RM_COMM_CONTROL_MODE_SHIFT);
309 #endif
310 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
311 	sys_write32(RM_COMM_MSI_DISABLE_MASK,
312 		    RM_COMM_REG(pd, RM_COMM_MSI_DISABLE));
313 
314 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD));
315 	val &= ~(RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_MASK <<
316 		 RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT);
317 	val |= RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT_VAL <<
318 	       RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT;
319 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD));
320 
321 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD));
322 	val &= ~(RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_MASK <<
323 		 RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT);
324 	val |= RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_VAL <<
325 	       RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT;
326 
327 	val &= ~(RM_COMM_BD_FIFO_FULL_THRESHOLD_MASK <<
328 		 RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT);
329 	val |= RM_COMM_BD_FIFO_FULL_THRESHOLD_VAL <<
330 	       RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT;
331 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD));
332 
333 	/* Enable Line interrupt */
334 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
335 	val |= RM_COMM_CONTROL_LINE_INTR_EN;
336 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
337 
338 	/* Enable AE_TIMEOUT */
339 	sys_write32(RM_COMM_AE_TIMEOUT_VAL,
340 		    RM_COMM_REG(pd, RM_COMM_AE_TIMEOUT));
341 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
342 	val |= RM_COMM_CONTROL_AE_TIMEOUT_EN;
343 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
344 
345 	/* AE (Acceleration Engine) grouping to group '0' */
346 	val = sys_read32(RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
347 	val &= ~RM_AE_CTRL_AE_GROUP_MASK;
348 	sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
349 	val |= RM_AE_CONTROL_ACTIVE;
350 	sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL));
351 
352 	/* AXI read/write channel enable */
353 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_CONTROL));
354 	val |= (RM_COMM_AXI_CONTROL_RD_CH_EN | RM_COMM_AXI_CONTROL_WR_CH_EN);
355 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_CONTROL));
356 
357 	/* Tune RM control programming for 4 rings */
358 	sys_write32(RM_COMM_TIMER_CONTROL0_VAL,
359 		    RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_0));
360 	sys_write32(RM_COMM_TIMER_CONTROL1_VAL,
361 		    RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_1));
362 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_BURST_LENGTH));
363 	val |= RM_COMM_BD_FETCH_CACHE_ALIGNED_DISABLED;
364 	val |= RM_COMM_VALUE_FOR_DDR_ADDR_GEN_VAL <<
365 	       RM_COMM_VALUE_FOR_DDR_ADDR_GEN_SHIFT;
366 	val |= RM_COMM_VALUE_FOR_TOGGLE_VAL << RM_COMM_VALUE_FOR_TOGGLE_SHIFT;
367 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_BURST_LENGTH));
368 
369 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL));
370 	val |= RM_COMM_DISABLE_GRP_BD_FIFO_FLOW_CONTROL_FOR_PKT_ALIGNMENT;
371 	val |= RM_COMM_DISABLE_PKT_ALIGNMENT_BD_FIFO_FLOW_CONTROL;
372 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL));
373 
374 	/* Set Sequence max count to the max supported value */
375 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT));
376 	val = (val | RING_MASK_SEQ_MAX_COUNT_MASK);
377 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT));
378 
379 	k_mutex_unlock(&pd->dma_lock);
380 }
381 
rm_ring_clear_stats(struct dma_iproc_pax_data * pd,enum ring_idx idx)382 static void rm_ring_clear_stats(struct dma_iproc_pax_data *pd,
383 				enum ring_idx idx)
384 {
385 	/* Read ring Tx, Rx, and Outstanding counts to clear */
386 	sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_LS));
387 	sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_MS));
388 	sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_LS));
389 	sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_MS));
390 	sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND));
391 }
392 
rm_cfg_finish(struct dma_iproc_pax_data * pd)393 static void rm_cfg_finish(struct dma_iproc_pax_data *pd)
394 {
395 	uint32_t val;
396 
397 	k_mutex_lock(&pd->dma_lock, K_FOREVER);
398 
399 	/* set Ring config done */
400 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
401 	val |= RM_COMM_CONTROL_CONFIG_DONE;
402 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL));
403 
404 	k_mutex_unlock(&pd->dma_lock);
405 }
406 
write_doorbell(struct dma_iproc_pax_data * pd,enum ring_idx idx)407 static inline void write_doorbell(struct dma_iproc_pax_data *pd,
408 				  enum ring_idx idx)
409 {
410 	struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
411 
412 	sys_write32(ring->descs_inflight,
413 		    RM_RING_REG(pd, idx, RING_DOORBELL_BD_WRITE_COUNT));
414 	ring->descs_inflight = 0;
415 }
416 
set_ring_active(struct dma_iproc_pax_data * pd,enum ring_idx idx,bool active)417 static inline void set_ring_active(struct dma_iproc_pax_data *pd,
418 				   enum ring_idx idx,
419 				   bool active)
420 {
421 	uint32_t val;
422 
423 	val = sys_read32(RM_RING_REG(pd, idx, RING_CONTROL));
424 	if (active)
425 		val |= RING_CONTROL_ACTIVE;
426 	else
427 		val &= ~RING_CONTROL_ACTIVE;
428 	sys_write32(val, RM_RING_REG(pd, idx, RING_CONTROL));
429 }
430 
init_ring(struct dma_iproc_pax_data * pd,enum ring_idx idx)431 static int init_ring(struct dma_iproc_pax_data *pd, enum ring_idx idx)
432 {
433 	uint32_t val;
434 	uintptr_t desc = (uintptr_t)pd->ring[idx].bd;
435 	uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl;
436 	int timeout = 5000, ret = 0;
437 
438 	k_mutex_lock(&pd->dma_lock, K_FOREVER);
439 
440 	/*  Read cmpl write ptr incase previous dma stopped */
441 	sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR));
442 
443 	/* Inactivate ring */
444 	sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL));
445 
446 	/* set Ring config done */
447 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
448 	val |= RM_COMM_CONTROL_CONFIG_DONE;
449 	sys_write32(val,  RM_COMM_REG(pd, RM_COMM_CONTROL));
450 	/* Flush ring before loading new descriptor */
451 	sys_write32(RING_CONTROL_FLUSH, RM_RING_REG(pd, idx,
452 						    RING_CONTROL));
453 	do {
454 		if (sys_read32(RM_RING_REG(pd, idx, RING_FLUSH_DONE)) &
455 		    RING_FLUSH_DONE_MASK)
456 			break;
457 		k_busy_wait(1);
458 	} while (--timeout);
459 
460 	if (!timeout) {
461 		LOG_WRN("Ring %d flush timedout!\n", idx);
462 		ret = -ETIMEDOUT;
463 		goto err;
464 	}
465 
466 	/* clear ring after flush */
467 	sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL));
468 
469 	/* Clear Ring config done */
470 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL));
471 	val &= ~(RM_COMM_CONTROL_CONFIG_DONE);
472 	sys_write32(val,  RM_COMM_REG(pd, RM_COMM_CONTROL));
473 	/* ring group id set to '0' */
474 	val = sys_read32(RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx)));
475 	val &= ~RING_COMM_CTRL_AE_GROUP_MASK;
476 	sys_write32(val, RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx)));
477 
478 	/* DDR update control, set timeout value */
479 	val = RING_DDR_CONTROL_COUNT(RING_DDR_CONTROL_COUNT_VAL) |
480 	      RING_DDR_CONTROL_TIMER(RING_DDR_CONTROL_TIMER_VAL) |
481 	      RING_DDR_CONTROL_ENABLE;
482 
483 	sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL));
484 	/* Disable Ring MSI Timeout */
485 	sys_write32(RING_DISABLE_MSI_TIMEOUT_VALUE,
486 		    RM_RING_REG(pd, idx, RING_DISABLE_MSI_TIMEOUT));
487 
488 	/* BD and CMPL desc queue start address */
489 	sys_write32((uint32_t)desc, RM_RING_REG(pd, idx, RING_BD_START_ADDR));
490 	sys_write32((uint32_t)cmpl, RM_RING_REG(pd, idx, RING_CMPL_START_ADDR));
491 	val = sys_read32(RM_RING_REG(pd, idx, RING_BD_READ_PTR));
492 
493 	/* keep ring inactive after init to avoid BD poll */
494 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
495 	set_ring_active(pd, idx, false);
496 #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
497 	set_ring_active(pd, idx, true);
498 #endif
499 
500 #if !defined(CONFIG_DMA_IPROC_PAX_POLL_MODE)
501 	/* Enable ring completion interrupt */
502 	sys_write32(0x0, RM_RING_REG(pd, idx,
503 				     RING_COMPLETION_INTERRUPT_STAT_MASK));
504 #endif
505 	rm_ring_clear_stats(pd, idx);
506 err:
507 	k_mutex_unlock(&pd->dma_lock);
508 
509 	return ret;
510 }
511 
poll_on_write_sync(const struct device * dev,struct dma_iproc_pax_ring_data * ring)512 static int poll_on_write_sync(const struct device *dev,
513 			      struct dma_iproc_pax_ring_data *ring)
514 {
515 	struct dma_iproc_pax_cfg *cfg = PAX_DMA_DEV_CFG(dev);
516 	const struct device *pcidev;
517 	struct dma_iproc_pax_write_sync_data sync_rd, *recv, *sent;
518 	uint64_t pci_addr;
519 	uint32_t *pci32, *axi32;
520 	uint32_t zero_init = 0, timeout = PAX_DMA_MAX_SYNC_WAIT;
521 	int ret;
522 
523 	pcidev = device_get_binding(cfg->pcie_dev_name);
524 	if (!pcidev) {
525 		LOG_ERR("Cannot get pcie device\n");
526 		return -EINVAL;
527 	}
528 
529 	recv = &sync_rd;
530 	sent = &(ring->curr.sync_data);
531 	/* form host pci sync address */
532 	pci32 = (uint32_t *)&pci_addr;
533 	pci32[0] = ring->sync_pci.addr_lo;
534 	pci32[1] = ring->sync_pci.addr_hi;
535 	axi32 = (uint32_t *)&sync_rd;
536 
537 	do {
538 		ret = pcie_ep_xfer_data_memcpy(pcidev, pci_addr,
539 					       (uintptr_t *)axi32, 4,
540 					       PCIE_OB_LOWMEM, HOST_TO_DEVICE);
541 
542 		if (memcmp((void *)recv, (void *)sent, 4) == 0) {
543 			/* clear the sync word */
544 			ret = pcie_ep_xfer_data_memcpy(pcidev, pci_addr,
545 						       (uintptr_t *)&zero_init,
546 						       4, PCIE_OB_LOWMEM,
547 						       DEVICE_TO_HOST);
548 			dma_mb();
549 			ret = 0;
550 			break;
551 		}
552 		k_busy_wait(1);
553 	} while (--timeout);
554 
555 	if (!timeout) {
556 		LOG_ERR("[ring %d]: not recvd write sync!\n", ring->idx);
557 		ret = -ETIMEDOUT;
558 	}
559 
560 	return ret;
561 }
562 
process_cmpl_event(const struct device * dev,enum ring_idx idx,uint32_t pl_len)563 static int process_cmpl_event(const struct device *dev,
564 			      enum ring_idx idx, uint32_t pl_len)
565 {
566 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
567 	uint32_t wr_offs, rd_offs, ret = 0;
568 	struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
569 	struct cmpl_pkt *c;
570 	uint32_t is_outstanding;
571 
572 	/* cmpl read offset, unprocessed cmpl location */
573 	rd_offs = ring->curr.cmpl_rd_offs;
574 
575 	wr_offs = sys_read32(RM_RING_REG(pd, idx,
576 					 RING_CMPL_WRITE_PTR));
577 
578 	/* Update read ptr to "processed" */
579 	ring->curr.cmpl_rd_offs = wr_offs;
580 
581 	/*
582 	 * Ensure consistency of completion descriptor
583 	 * The completion desc is updated by RM via AXI stream
584 	 * CPU need to ensure the memory operations are completed
585 	 * before reading cmpl area, by a "dsb"
586 	 * If Dcache enabled, need to invalidate the cachelines to
587 	 * read updated cmpl desc. The cache API also issues dsb.
588 	 */
589 	dma_mb();
590 
591 	/* Decode cmpl pkt id to verify */
592 	c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl +
593 	    PAX_DMA_CMPL_DESC_SIZE * PAX_DMA_CURR_CMPL_IDX(wr_offs));
594 
595 	LOG_DBG("RING%d WR_PTR:%d opq:%d, rm_status:%x dma_status:%x\n",
596 		idx, wr_offs, c->opq, c->rm_status, c->dma_status);
597 
598 	is_outstanding = sys_read32(RM_RING_REG(pd, idx,
599 						RING_NUM_REQ_OUTSTAND));
600 	if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) {
601 		LOG_ERR("RING%d: pkt id should be %d, rcvd %d outst=%d\n",
602 			idx, ring->curr.opq, c->opq, is_outstanding);
603 		ret = -EIO;
604 	}
605 	/* check for completion AE timeout */
606 	if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) {
607 		LOG_ERR("RING%d WR_PTR:%d rm_status:%x AE Timeout!\n",
608 			idx, wr_offs, c->rm_status);
609 		/* TBD: Issue full card reset to restore operations */
610 		LOG_ERR("Needs Card Reset to recover!\n");
611 		ret = -ETIMEDOUT;
612 	}
613 
614 	if (ring->dma_callback) {
615 		ring->dma_callback(dev, ring->callback_arg, idx, ret);
616 	}
617 
618 	/* clear total packet count and non header bd count */
619 	ring->total_pkt_count = 0;
620 
621 	return ret;
622 }
623 
624 #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE
peek_ring_cmpl(const struct device * dev,enum ring_idx idx,uint32_t pl_len)625 static int peek_ring_cmpl(const struct device *dev,
626 			  enum ring_idx idx, uint32_t pl_len)
627 {
628 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
629 	uint32_t wr_offs, rd_offs, timeout = PAX_DMA_MAX_POLL_WAIT;
630 	struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]);
631 
632 	/* cmpl read offset, unprocessed cmpl location */
633 	rd_offs = ring->curr.cmpl_rd_offs;
634 
635 	/* poll write_ptr until cmpl received for all buffers */
636 	do {
637 		wr_offs = sys_read32(RM_RING_REG(pd, idx,
638 						 RING_CMPL_WRITE_PTR));
639 		if (PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs) >= pl_len)
640 			break;
641 		k_busy_wait(1);
642 	} while (--timeout);
643 
644 	if (timeout == 0) {
645 		LOG_ERR("RING%d timeout, rcvd %d, expected %d!\n",
646 			idx, PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs), pl_len);
647 		/* More debug info on current dma instance */
648 		LOG_ERR("WR_PTR:%x RD_PTR%x\n", wr_offs, rd_offs);
649 		return  -ETIMEDOUT;
650 	}
651 
652 	return process_cmpl_event(dev, idx, pl_len);
653 }
654 #else
rm_isr(void * arg)655 static void rm_isr(void *arg)
656 {
657 	uint32_t status, err_stat, idx;
658 	const struct device *dev = arg;
659 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
660 
661 	err_stat =
662 	sys_read32(RM_COMM_REG(pd,
663 			       RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_MASK));
664 	sys_write32(err_stat,
665 		    RM_COMM_REG(pd,
666 				RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_CLEAR));
667 
668 	/* alert waiting thread to process, for each completed ring */
669 	for (idx = PAX_DMA_RING0; idx < PAX_DMA_RINGS_MAX; idx++) {
670 		status =
671 		sys_read32(RM_RING_REG(pd, idx,
672 				       RING_COMPLETION_INTERRUPT_STAT));
673 		sys_write32(status,
674 			    RM_RING_REG(pd, idx,
675 					RING_COMPLETION_INTERRUPT_STAT_CLEAR));
676 		if (status & 0x1) {
677 			k_sem_give(&pd->ring[idx].alert);
678 		}
679 	}
680 }
681 #endif
682 
dma_iproc_pax_init(const struct device * dev)683 static int dma_iproc_pax_init(const struct device *dev)
684 {
685 	struct dma_iproc_pax_cfg *cfg = PAX_DMA_DEV_CFG(dev);
686 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
687 	int r;
688 	uintptr_t mem_aligned;
689 
690 	pd->dma_base = cfg->dma_base;
691 	pd->rm_comm_base = cfg->rm_comm_base;
692 	pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ?
693 			 cfg->use_rings : PAX_DMA_RINGS_MAX;
694 
695 	/* dma/rm access lock */
696 	k_mutex_init(&pd->dma_lock);
697 
698 	/* Ring Manager H/W init */
699 	if (init_rm(pd)) {
700 		return -ETIMEDOUT;
701 	}
702 
703 	/* common rm config */
704 	rm_cfg_start(pd);
705 
706 	/* individual ring config */
707 	for (r = 0; r < pd->used_rings; r++) {
708 		/* per-ring mutex lock */
709 		k_mutex_init(&pd->ring[r].lock);
710 		/* Init alerts */
711 		k_sem_init(&pd->ring[r].alert, 0, 1);
712 
713 		pd->ring[r].idx = r;
714 		pd->ring[r].ring_base = cfg->rm_base +
715 					PAX_DMA_RING_ADDR_OFFSET(r);
716 		LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx,
717 			sys_read32(RM_RING_REG(pd, r, RING_VER)));
718 
719 		/* Allocate for 2 BD buffers + cmpl buffer + sync location */
720 		pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base +
721 					r * PAX_DMA_PER_RING_ALLOC_SIZE);
722 		if (!pd->ring[r].ring_mem) {
723 			LOG_ERR("RING%d failed to alloc desc memory!\n", r);
724 			return -ENOMEM;
725 		}
726 		/* Find 8K aligned address within allocated region */
727 		mem_aligned = ((uintptr_t)pd->ring[r].ring_mem +
728 			       PAX_DMA_RING_ALIGN - 1) &
729 			       ~(PAX_DMA_RING_ALIGN - 1);
730 
731 		pd->ring[r].cmpl = (void *)mem_aligned;
732 		pd->ring[r].bd = (void *)(mem_aligned +
733 					  PAX_DMA_RM_CMPL_RING_SIZE);
734 		pd->ring[r].sync_loc = (void *)((uintptr_t)pd->ring[r].bd +
735 				      PAX_DMA_RM_DESC_RING_SIZE *
736 				      PAX_DMA_NUM_BD_BUFFS);
737 
738 		LOG_DBG("Ring%d,allocated Mem:0x%p Size %d\n",
739 			pd->ring[r].idx,
740 			pd->ring[r].ring_mem,
741 			PAX_DMA_PER_RING_ALLOC_SIZE);
742 		LOG_DBG("Ring%d,BD:0x%p, CMPL:0x%p, SYNC_LOC:0x%p\n",
743 			pd->ring[r].idx,
744 			pd->ring[r].bd,
745 			pd->ring[r].cmpl,
746 			pd->ring[r].sync_loc);
747 
748 		/* Prepare ring desc table */
749 		prepare_ring(&(pd->ring[r]));
750 
751 		/* initialize ring */
752 		init_ring(pd, r);
753 	}
754 
755 	/* set ring config done */
756 	rm_cfg_finish(pd);
757 
758 #ifndef CONFIG_DMA_IPROC_PAX_POLL_MODE
759 	/* Register and enable RM interrupt */
760 	IRQ_CONNECT(DT_INST_IRQN(0),
761 		    DT_INST_IRQ(0, priority),
762 		    rm_isr,
763 		    DEVICE_DT_INST_GET(0),
764 		    0);
765 	irq_enable(DT_INST_IRQN(0));
766 #else
767 	LOG_INF("%s PAX DMA rings in poll mode!\n", PAX_DMA_DEV_NAME(dev));
768 #endif
769 	LOG_INF("%s RM setup %d rings\n", PAX_DMA_DEV_NAME(dev),
770 		pd->used_rings);
771 
772 	return 0;
773 }
774 
dma_iproc_pax_gen_desc(struct dma_iproc_pax_ring_data * ring,bool is_mega,uint64_t pci_addr,uint64_t axi_addr,uint32_t length,enum pax_dma_dir dir,uint32_t * non_hdr_bd_count)775 static int dma_iproc_pax_gen_desc(struct dma_iproc_pax_ring_data *ring,
776 				  bool is_mega,
777 				  uint64_t pci_addr,
778 				  uint64_t axi_addr,
779 				  uint32_t length,
780 				  enum pax_dma_dir dir,
781 				  uint32_t *non_hdr_bd_count)
782 {
783 	struct rm_header *hdr;
784 
785 	if (*non_hdr_bd_count == 0) {
786 		/* Generate Header BD */
787 		ring->current_hdr = (uintptr_t)get_curr_desc_addr(ring);
788 		rm_write_header_desc((void *)ring->current_hdr,
789 				     curr_toggle_val(ring),
790 				     curr_pkt_id(ring),
791 				     PAX_DMA_RM_DESC_BDCOUNT,
792 				     pci_addr);
793 		ring->total_pkt_count++;
794 	}
795 
796 	rm_write_pcie_desc(get_curr_desc_addr(ring),
797 			   curr_toggle_val(ring), pci_addr);
798 	*non_hdr_bd_count = *non_hdr_bd_count + 1;
799 	rm_write_src_dst_desc(get_curr_desc_addr(ring),
800 			      is_mega, curr_toggle_val(ring),
801 			      axi_addr, length, dir);
802 	*non_hdr_bd_count = *non_hdr_bd_count + 1;
803 
804 	/* Update Header BD with bd count */
805 	hdr = (struct rm_header *)ring->current_hdr;
806 	hdr->bdcount = *non_hdr_bd_count;
807 	if (*non_hdr_bd_count == MAX_BD_COUNT_PER_HEADER) {
808 		*non_hdr_bd_count = 0;
809 	}
810 
811 	return 0;
812 }
813 
dma_iproc_pax_gen_packets(const struct device * dev,struct dma_iproc_pax_ring_data * ring,uint32_t direction,struct dma_block_config * config,uint32_t * non_hdr_bd_count)814 static int dma_iproc_pax_gen_packets(const struct device *dev,
815 				     struct dma_iproc_pax_ring_data *ring,
816 				     uint32_t direction,
817 				     struct dma_block_config *config,
818 				     uint32_t *non_hdr_bd_count)
819 {
820 	uint32_t outstanding, remaining_len;
821 	uint32_t offset, curr, mega_len;
822 	uint64_t axi_addr;
823 	uint64_t pci_addr;
824 	enum pax_dma_dir dir;
825 
826 	switch (direction) {
827 	case MEMORY_TO_PERIPHERAL:
828 		pci_addr = config->dest_address;
829 		axi_addr = config->source_address;
830 		dir = CARD_TO_HOST;
831 		break;
832 	case PERIPHERAL_TO_MEMORY:
833 		axi_addr = config->dest_address;
834 		pci_addr = config->source_address;
835 		dir = HOST_TO_CARD;
836 		break;
837 	default:
838 		LOG_ERR("not supported transfer direction");
839 		return -EINVAL;
840 	}
841 
842 	outstanding = config->block_size;
843 	offset = 0;
844 	while (outstanding) {
845 		curr = MIN(outstanding, PAX_DMA_MAX_SZ_PER_BD);
846 		mega_len = curr / PAX_DMA_MEGA_LENGTH_MULTIPLE;
847 		remaining_len = curr % PAX_DMA_MEGA_LENGTH_MULTIPLE;
848 		pci_addr = pci_addr + offset;
849 		axi_addr = axi_addr + offset;
850 
851 		if (mega_len) {
852 			dma_iproc_pax_gen_desc(ring, true, pci_addr,
853 					       axi_addr, mega_len, dir,
854 					       non_hdr_bd_count);
855 			offset = offset + mega_len *
856 				PAX_DMA_MEGA_LENGTH_MULTIPLE;
857 		}
858 
859 		if (remaining_len) {
860 			pci_addr = pci_addr + offset;
861 			axi_addr = axi_addr + offset;
862 			dma_iproc_pax_gen_desc(ring, false, pci_addr, axi_addr,
863 					       remaining_len, dir,
864 					       non_hdr_bd_count);
865 			offset = offset + remaining_len;
866 		}
867 
868 		outstanding =  outstanding - curr;
869 	}
870 
871 	return 0;
872 }
873 
874 #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE
set_pkt_count(const struct device * dev,enum ring_idx idx,uint32_t pl_len)875 static void set_pkt_count(const struct device *dev,
876 			  enum ring_idx idx,
877 			  uint32_t pl_len)
878 {
879 	/* Nothing needs to be programmed here in poll mode */
880 }
881 
wait_for_pkt_completion(const struct device * dev,enum ring_idx idx,uint32_t pl_len)882 static int wait_for_pkt_completion(const struct device *dev,
883 				   enum ring_idx idx,
884 				   uint32_t pl_len)
885 {
886 	/* poll for completion */
887 	return peek_ring_cmpl(dev, idx, pl_len);
888 }
889 #else
set_pkt_count(const struct device * dev,enum ring_idx idx,uint32_t pl_len)890 static void set_pkt_count(const struct device *dev,
891 			  enum ring_idx idx,
892 			  uint32_t pl_len)
893 {
894 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
895 	uint32_t val;
896 
897 	/* program packet count for interrupt assertion */
898 	val = sys_read32(RM_RING_REG(pd, idx,
899 				     RING_CMPL_WR_PTR_DDR_CONTROL));
900 	val &= ~RING_DDR_CONTROL_COUNT_MASK;
901 	val |= RING_DDR_CONTROL_COUNT(pl_len);
902 	sys_write32(val, RM_RING_REG(pd, idx,
903 				     RING_CMPL_WR_PTR_DDR_CONTROL));
904 }
905 
wait_for_pkt_completion(const struct device * dev,enum ring_idx idx,uint32_t pl_len)906 static int wait_for_pkt_completion(const struct device *dev,
907 				   enum ring_idx idx,
908 				   uint32_t pl_len)
909 {
910 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
911 	struct dma_iproc_pax_ring_data *ring;
912 
913 	ring = &(pd->ring[idx]);
914 	/* wait for sg dma completion alert */
915 	if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) {
916 		LOG_ERR("PAX DMA [ring %d] Timeout!\n", idx);
917 		return -ETIMEDOUT;
918 	}
919 
920 	return process_cmpl_event(dev, idx, pl_len);
921 }
922 #endif
923 
dma_iproc_pax_process_dma_blocks(const struct device * dev,enum ring_idx idx,struct dma_config * config)924 static int dma_iproc_pax_process_dma_blocks(const struct device *dev,
925 					    enum ring_idx idx,
926 					    struct dma_config *config)
927 {
928 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
929 	struct dma_iproc_pax_cfg *cfg = PAX_DMA_DEV_CFG(dev);
930 	int ret = 0;
931 	struct dma_iproc_pax_ring_data *ring;
932 	uint32_t toggle_bit, non_hdr_bd_count = 0;
933 	struct dma_block_config sync_pl;
934 	struct dma_iproc_pax_addr64 sync;
935 	struct dma_block_config *block_config = config->head_block;
936 
937 	if (block_config == NULL) {
938 		LOG_ERR("head_block is NULL\n");
939 		return -EINVAL;
940 	}
941 
942 	ring = &(pd->ring[idx]);
943 
944 	/*
945 	 * Host sync buffer isn't ready at zephyr/driver init-time
946 	 * Read the host address location once at first DMA write
947 	 * on that ring.
948 	 */
949 	if ((ring->sync_pci.addr_lo == 0x0) &&
950 	    (ring->sync_pci.addr_hi == 0x0)) {
951 		/* populate sync data location */
952 		LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc);
953 		sync.addr_lo = sys_read32(cfg->scr_addr_loc + 4);
954 		sync.addr_hi = sys_read32(cfg->scr_addr_loc);
955 		ring->sync_pci.addr_lo = sync.addr_lo + idx * 4;
956 		ring->sync_pci.addr_hi = sync.addr_hi;
957 		LOG_DBG("ring:%d,sync addr:0x%x.0x%x\n", idx,
958 			ring->sync_pci.addr_hi,
959 			ring->sync_pci.addr_lo);
960 	}
961 
962 	/* account extra sync packet */
963 	ring->curr.sync_data.opaque = ring->curr.opq;
964 	ring->curr.sync_data.total_pkts = config->block_count;
965 	memcpy((void *)ring->sync_loc,
966 	       (void *)&(ring->curr.sync_data), 4);
967 	sync_pl.dest_address = ring->sync_pci.addr_lo |
968 			   (uint64_t)ring->sync_pci.addr_hi << 32;
969 	sync_pl.source_address = (uintptr_t)ring->sync_loc;
970 	sync_pl.block_size = 4; /* 4-bytes */
971 
972 	/* current toggle bit */
973 	toggle_bit = ring->curr.toggle;
974 	/* current opq value for cmpl check */
975 	ring->curr.opq = curr_pkt_id(ring);
976 
977 	/* Form descriptors for total block counts */
978 	while (block_config != NULL) {
979 		ret = dma_iproc_pax_gen_packets(dev, ring,
980 						config->channel_direction,
981 						block_config,
982 						&non_hdr_bd_count);
983 		if (ret)
984 			goto err;
985 		block_config = block_config->next_block;
986 	}
987 
988 	/*
989 	 * Write sync payload descriptors should go with separate RM header
990 	 * as RM implementation allows all the BD's in a header packet should
991 	 * have same data transfer direction. Setting non_hdr_bd_count to 0,
992 	 * helps generate separate packet.
993 	 */
994 	ring->non_hdr_bd_count = 0;
995 	dma_iproc_pax_gen_packets(dev, ring, MEMORY_TO_PERIPHERAL, &sync_pl,
996 				  &non_hdr_bd_count);
997 
998 	alloc_pkt_id(ring);
999 err:
1000 	return ret;
1001 }
1002 
dma_iproc_pax_configure(const struct device * dev,uint32_t channel,struct dma_config * cfg)1003 static int dma_iproc_pax_configure(const struct device *dev, uint32_t channel,
1004 				   struct dma_config *cfg)
1005 {
1006 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
1007 	struct dma_iproc_pax_ring_data *ring;
1008 	int ret = 0;
1009 
1010 	if (channel >= PAX_DMA_RINGS_MAX) {
1011 		LOG_ERR("Invalid ring/channel %d\n", channel);
1012 		return -EINVAL;
1013 	}
1014 
1015 	ring = &(pd->ring[channel]);
1016 	k_mutex_lock(&ring->lock, K_FOREVER);
1017 
1018 	if (ring->ring_active) {
1019 		ret = -EBUSY;
1020 		goto err;
1021 	}
1022 
1023 	if (cfg->block_count >= RM_V2_MAX_BLOCK_COUNT) {
1024 		LOG_ERR("Dma block count[%d] supported exceeds limit[%d]\n",
1025 			cfg->block_count, RM_V2_MAX_BLOCK_COUNT);
1026 		ret = -ENOTSUP;
1027 		goto err;
1028 	}
1029 
1030 	ring->ring_active = 1;
1031 	ret = dma_iproc_pax_process_dma_blocks(dev, channel, cfg);
1032 
1033 	if (ret) {
1034 		ring->ring_active = 0;
1035 		goto err;
1036 	}
1037 
1038 	ring->dma_callback = cfg->dma_callback;
1039 	ring->callback_arg = cfg->user_data;
1040 err:
1041 	k_mutex_unlock(&ring->lock);
1042 	return ret;
1043 }
1044 
dma_iproc_pax_transfer_start(const struct device * dev,uint32_t channel)1045 static int dma_iproc_pax_transfer_start(const struct device *dev,
1046 					uint32_t channel)
1047 {
1048 	int ret = 0;
1049 	struct dma_iproc_pax_data *pd = PAX_DMA_DEV_DATA(dev);
1050 	struct dma_iproc_pax_ring_data *ring;
1051 
1052 	if (channel >= PAX_DMA_RINGS_MAX) {
1053 		LOG_ERR("Invalid ring %d\n", channel);
1054 		return -EINVAL;
1055 	}
1056 
1057 	ring = &(pd->ring[channel]);
1058 	set_pkt_count(dev, channel, ring->total_pkt_count);
1059 
1060 #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE
1061 	write_doorbell(pd, channel);
1062 #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
1063 	/* activate the ring */
1064 	set_ring_active(pd, channel, true);
1065 #endif
1066 
1067 	ret = wait_for_pkt_completion(dev, channel, ring->total_pkt_count);
1068 	if (ret) {
1069 		goto err_ret;
1070 	}
1071 
1072 	ret = poll_on_write_sync(dev, ring);
1073 
1074 err_ret:
1075 	k_mutex_lock(&ring->lock, K_FOREVER);
1076 	ring->ring_active = 0;
1077 	k_mutex_unlock(&ring->lock);
1078 
1079 #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE
1080 	/* deactivate the ring until next active transfer */
1081 	set_ring_active(pd, channel, false);
1082 #endif
1083 	return ret;
1084 }
1085 
dma_iproc_pax_transfer_stop(const struct device * dev,uint32_t channel)1086 static int dma_iproc_pax_transfer_stop(const struct device *dev,
1087 				       uint32_t channel)
1088 {
1089 	return 0;
1090 }
1091 
1092 static const struct dma_driver_api pax_dma_driver_api = {
1093 	.config = dma_iproc_pax_configure,
1094 	.start = dma_iproc_pax_transfer_start,
1095 	.stop = dma_iproc_pax_transfer_stop,
1096 };
1097 
1098 static const struct dma_iproc_pax_cfg pax_dma_cfg = {
1099 	.dma_base = DT_INST_REG_ADDR_BY_NAME(0, dme_regs),
1100 	.rm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_ring_regs),
1101 	.rm_comm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_comm_regs),
1102 	.use_rings = DT_INST_PROP(0, dma_channels),
1103 	.bd_memory_base = (void *)DT_INST_PROP_BY_IDX(0, bd_memory, 0),
1104 	.scr_addr_loc = DT_INST_PROP(0, scr_addr_loc),
1105 	.pcie_dev_name = DT_INST_PROP_BY_PHANDLE(0, pcie_ep, label),
1106 };
1107 
1108 DEVICE_DT_INST_DEFINE(0,
1109 		    &dma_iproc_pax_init,
1110 		    NULL,
1111 		    &pax_dma_data,
1112 		    &pax_dma_cfg,
1113 		    POST_KERNEL,
1114 		    CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
1115 		    &pax_dma_driver_api);
1116