1 /*
2  * Copyright (c) 2023 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT snps_designware_dma_axi
8 
9 #include <zephyr/device.h>
10 #include <zephyr/drivers/dma.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/drivers/reset.h>
13 #include <zephyr/cache.h>
14 
15 LOG_MODULE_REGISTER(dma_designware_axi, CONFIG_DMA_LOG_LEVEL);
16 
17 #define DEV_CFG(_dev)	((const struct dma_dw_axi_dev_cfg *)(_dev)->config)
18 #define DEV_DATA(_dev)		((struct dma_dw_axi_dev_data *const)(_dev)->data)
19 
20 /* mask for block transfer size */
21 #define BLOCK_TS_MASK GENMASK(21, 0)
22 
23 /* blen : number of data units
24  * blen will always be in power of two
25  *
26  * when blen is 1 then set msize to zero otherwise find most significant bit set
27  * and subtract two (as IP doesn't support number  of data items 2)
28  */
29 #define DMA_DW_AXI_GET_MSIZE(blen) ((blen == 1) ? (0U) : (find_msb_set(blen) - 2U))
30 
31 /* Common_Registers_Address_Block */
32 #define DMA_DW_AXI_IDREG                         0x0
33 #define DMA_DW_AXI_COMPVERREG                    0x08
34 #define DMA_DW_AXI_CFGREG                        0x10
35 #define DMA_DW_AXI_CHENREG                       0x18
36 #define DMA_DW_AXI_INTSTATUSREG                  0x30
37 #define DMA_DW_AXI_COMMONREG_INTCLEARREG         0x38
38 #define DMA_DW_AXI_COMMONREG_INTSTATUS_ENABLEREG 0x40
39 #define DMA_DW_AXI_COMMONREG_INTSIGNAL_ENABLEREG 0x48
40 #define DMA_DW_AXI_COMMONREG_INTSTATUSREG        0x50
41 #define DMA_DW_AXI_RESETREG                      0x58
42 #define DMA_DW_AXI_LOWPOWER_CFGREG               0x60
43 
44 /* Channel enable by setting ch_en and ch_en_we */
45 #define CH_EN(chan)    (BIT64(8 + chan) | BIT64(chan))
46 /* Channel enable by setting ch_susp and ch_susp_we */
47 #define CH_SUSP(chan)  (BIT64(24 + chan) | BIT64(16 + chan))
48 /* Channel enable by setting ch_abort and ch_abort_we */
49 #define CH_ABORT(chan) (BIT64(40 + chan) | BIT64(32 + chan))
50 
51 /* channel susp/resume write enable pos */
52 #define CH_RESUME_WE(chan) (BIT64(24 + chan))
53 /* channel resume bit pos */
54 #define CH_RESUME(chan)    (BIT64(16 + chan))
55 
56 #define DMA_DW_AXI_CHAN_OFFSET(chan)             (0x100 * chan)
57 
58 /* source address register for a channel */
59 #define DMA_DW_AXI_CH_SAR(chan)                  (0x100 + DMA_DW_AXI_CHAN_OFFSET(chan))
60 /* destination address register for a channel */
61 #define DMA_DW_AXI_CH_DAR(chan)                  (0x108 + DMA_DW_AXI_CHAN_OFFSET(chan))
62 /* block transfer size register for a channel */
63 #define DMA_DW_AXI_CH_BLOCK_TS(chan)             (0x110 + DMA_DW_AXI_CHAN_OFFSET(chan))
64 /* channel control register */
65 #define DMA_DW_AXI_CH_CTL(chan)                  (0x118 + DMA_DW_AXI_CHAN_OFFSET(chan))
66 /* channel configuration register */
67 #define DMA_DW_AXI_CH_CFG(chan)                  (0x120 + DMA_DW_AXI_CHAN_OFFSET(chan))
68 /* linked list pointer register */
69 #define DMA_DW_AXI_CH_LLP(chan)                  (0x128 + DMA_DW_AXI_CHAN_OFFSET(chan))
70 /* channel status register */
71 #define DMA_DW_AXI_CH_STATUSREG(chan)            (0x130 + DMA_DW_AXI_CHAN_OFFSET(chan))
72 /* channel software handshake source register */
73 #define DMA_DW_AXI_CH_SWHSSRCREG(chan)           (0x138 + DMA_DW_AXI_CHAN_OFFSET(chan))
74 /* channel software handshake destination register */
75 #define DMA_DW_AXI_CH_SWHSDSTREG(chan)           (0x140 + DMA_DW_AXI_CHAN_OFFSET(chan))
76 /* channel block transfer resume request register */
77 #define DMA_DW_AXI_CH_BLK_TFR_RESUMEREQREG(chan) (0x148 + DMA_DW_AXI_CHAN_OFFSET(chan))
78 /* channel AXI ID rester */
79 #define DMA_DW_AXI_CH_AXI_IDREG(chan)            (0x150 + DMA_DW_AXI_CHAN_OFFSET(chan))
80 /* channel AXI QOS register */
81 #define DMA_DW_AXI_CH_AXI_QOSREG(chan)           (0x158 + DMA_DW_AXI_CHAN_OFFSET(chan))
82 /* channel interrupt status enable register */
83 #define DMA_DW_AXI_CH_INTSTATUS_ENABLEREG(chan)  (0x180 + DMA_DW_AXI_CHAN_OFFSET(chan))
84 /* channel interrupt status register */
85 #define DMA_DW_AXI_CH_INTSTATUS(chan)            (0x188 + DMA_DW_AXI_CHAN_OFFSET(chan))
86 /* channel interrupt signal enable register */
87 #define DMA_DW_AXI_CH_INTSIGNAL_ENABLEREG(chan)  (0x190 + DMA_DW_AXI_CHAN_OFFSET(chan))
88 /* channel interrupt clear register */
89 #define DMA_DW_AXI_CH_INTCLEARREG(chan)          (0x198 + DMA_DW_AXI_CHAN_OFFSET(chan))
90 
91 /* bitfield configuration for multi-block transfer */
92 #define DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(x)       FIELD_PREP(GENMASK64(1, 0), x)
93 #define DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(x)       FIELD_PREP(GENMASK64(3, 2), x)
94 
95 /* bitfield configuration to assign handshaking interface to source and destination */
96 #define DMA_DW_AXI_CFG_SRC_PER(x)                FIELD_PREP(GENMASK64(9, 4), x)
97 #define DMA_DW_AXI_CFG_DST_PER(x)                FIELD_PREP(GENMASK64(16, 11), x)
98 
99 /* bitfield configuration for transfer type and flow controller */
100 #define DMA_DW_AXI_CFG_TT_FC(x)                  FIELD_PREP(GENMASK64(34, 32), x)
101 
102 #define DMA_DW_AXI_CFG_HW_HS_SRC_BIT_POS         35
103 #define DMA_DW_AXI_CFG_HW_HS_DST_BIT_POS         36
104 
105 #define DMA_DW_AXI_CFG_PRIORITY(x)               FIELD_PREP(GENMASK64(51, 47), x)
106 
107 /* descriptor valid or not */
108 #define DMA_DW_AXI_CTL_LLI_VALID                 BIT64(63)
109 /* descriptor is last or not in a link */
110 #define DMA_DW_AXI_CTL_LLI_LAST                  BIT64(62)
111 /* interrupt on completion of block transfer */
112 #define DMA_DW_AXI_CTL_IOC_BLK_TFR               BIT64(58)
113 /* source status enable bit */
114 #define DMA_DW_AXI_CTL_SRC_STAT_EN               BIT64(56)
115 /* destination status enable bit */
116 #define DMA_DW_AXI_CTL_DST_STAT_EN               BIT64(57)
117 /* source burst length enable */
118 #define DMA_DW_AXI_CTL_ARLEN_EN                  BIT64(38)
119 /* source burst length(considered when corresponding enable bit is set) */
120 #define DMA_DW_AXI_CTL_ARLEN(x)                  FIELD_PREP(GENMASK64(46, 39), x)
121 /* destination burst length enable */
122 #define DMA_DW_AXI_CTL_AWLEN_EN                  BIT64(47)
123 /* destination burst length(considered when corresponding enable bit is set) */
124 #define DMA_DW_AXI_CTL_AWLEN(x)                  FIELD_PREP(GENMASK64(55, 48), x)
125 
126 /* source burst transaction length */
127 #define DMA_DW_AXI_CTL_SRC_MSIZE(x)              FIELD_PREP(GENMASK64(17, 14), x)
128 /* destination burst transaction length */
129 #define DMA_DW_AXI_CTL_DST_MSIZE(x)              FIELD_PREP(GENMASK64(21, 18), x)
130 /* source transfer width */
131 #define DMA_DW_AXI_CTL_SRC_WIDTH(x)              FIELD_PREP(GENMASK64(10, 8), x)
132 /* destination transfer width */
133 #define DMA_DW_AXI_CTL_DST_WIDTH(x)              FIELD_PREP(GENMASK64(13, 11), x)
134 
135 /* mask all the interrupts */
136 #define DMA_DW_AXI_IRQ_NONE                      0
137 /* enable block completion transfer interrupt */
138 #define DMA_DW_AXI_IRQ_BLOCK_TFR                 BIT64(0)
139 /* enable transfer completion interrupt */
140 #define DMA_DW_AXI_IRQ_DMA_TFR                   BIT64(1)
141 /* enable interrupts on any dma transfer error */
142 #define DMA_DW_AXI_IRQ_ALL_ERR                   (GENMASK64(14, 5) | GENMASK64(21, 16))
143 
144 /* global enable bit for dma controller */
145 #define DMA_DW_AXI_CFG_EN                        BIT64(0)
146 /* global enable bit for interrupt */
147 #define DMA_DW_AXI_CFG_INT_EN                    BIT64(1)
148 
149 /* descriptor used by dw axi dma controller*/
150 struct dma_lli {
151 	uint64_t sar;
152 	uint64_t dar;
153 	uint32_t block_ts_lo;
154 	uint32_t reserved;
155 	uint64_t llp;
156 	uint64_t ctl;
157 	uint32_t sstat;
158 	uint32_t dstat;
159 	uint64_t llp_status;
160 	uint64_t reserved1;
161 } __aligned(64);
162 
163 /* status of the channel */
164 enum dma_dw_axi_ch_state {
165 	DMA_DW_AXI_CH_IDLE,
166 	DMA_DW_AXI_CH_SUSPENDED,
167 	DMA_DW_AXI_CH_ACTIVE,
168 	DMA_DW_AXI_CH_PREPARED,
169 };
170 
171 /* source and destination transfer width */
172 enum dma_dw_axi_ch_width {
173 	BITS_8,
174 	BITS_16,
175 	BITS_32,
176 	BITS_64,
177 	BITS_128,
178 	BITS_256,
179 	BITS_512,
180 };
181 
182 /* transfer direction and flow controller */
183 enum dma_dw_axi_tt_fc {
184 	M2M_DMAC,
185 	M2P_DMAC,
186 	P2M_DMAC,
187 	P2P_DMAC,
188 	P2M_SRC,
189 	P2P_SRC,
190 	M2P_DST,
191 	P2P_DST,
192 };
193 
194 /* type of multi-block transfer */
195 enum dma_dw_axi_multi_blk_type {
196 	MULTI_BLK_CONTIGUOUS,
197 	MULTI_BLK_RELOAD,
198 	MULTI_BLK_SHADOW_REG,
199 	MULTI_BLK_LLI,
200 };
201 
202 /* dma driver channel specific information */
203 struct dma_dw_axi_ch_data {
204 	/* lli descriptor base */
205 	struct dma_lli *lli_desc_base;
206 	/* lli current descriptor */
207 	struct dma_lli *lli_desc_current;
208 	/* dma channel state */
209 	enum dma_dw_axi_ch_state ch_state;
210 	/* direction of transfer */
211 	uint32_t direction;
212 	/* number of descriptors */
213 	uint32_t lli_desc_count;
214 	/* cfg register configuration for dma transfer */
215 	uint64_t cfg;
216 	/* mask and unmask interrupts */
217 	uint64_t irq_unmask;
218 	/* user call back for dma transfer completion */
219 	dma_callback_t dma_xfer_callback;
220 	/* user data for dma callback for dma transfer completion */
221 	void *priv_data_xfer;
222 	/* user call back for dma block transfer completion */
223 	dma_callback_t dma_blk_xfer_callback;
224 	/* user data for dma callback for dma block transfer completion */
225 	void *priv_data_blk_tfr;
226 };
227 
228 /* dma controller driver data structure */
229 struct dma_dw_axi_dev_data {
230 	/* dma context */
231 	struct dma_context dma_ctx;
232 
233 	/* mmio address mapping info for dma controller */
234 	DEVICE_MMIO_NAMED_RAM(dma_mmio);
235 	/* pointer to store channel specific info */
236 	struct dma_dw_axi_ch_data *chan;
237 	/* pointer to hold descriptor base address */
238 	struct dma_lli *dma_desc_pool;
239 };
240 
241 /* Device constant configuration parameters */
242 struct dma_dw_axi_dev_cfg {
243 	/* dma address space to map */
244 	DEVICE_MMIO_NAMED_ROM(dma_mmio);
245 
246 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets)
247 	/* Reset controller device configurations */
248 	const struct reset_dt_spec reset;
249 #endif
250 	/* dma controller interrupt configuration function pointer */
251 	void (*irq_config)(void);
252 };
253 
254 /**
255  * @brief get current status of the channel
256  *
257  * @param dev Pointer to the device structure for the driver instance
258  * @param channel channel number
259  *
260  * @retval status of the channel
261  */
dma_dw_axi_get_ch_status(const struct device * dev,uint32_t ch)262 static enum dma_dw_axi_ch_state dma_dw_axi_get_ch_status(const struct device *dev, uint32_t ch)
263 {
264 	uint32_t bit_status;
265 	uint64_t ch_status;
266 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
267 
268 	ch_status = sys_read64(reg_base + DMA_DW_AXI_CHENREG);
269 
270 	/* channel is active/busy in the dma transfer */
271 	bit_status = ((ch_status >> ch) & 1);
272 	if (bit_status) {
273 		return DMA_DW_AXI_CH_ACTIVE;
274 	}
275 
276 	/* channel is currently suspended */
277 	bit_status = ((ch_status >> (16 + ch)) & 1);
278 	if (bit_status) {
279 		return DMA_DW_AXI_CH_SUSPENDED;
280 	}
281 
282 	/* channel is idle */
283 	return DMA_DW_AXI_CH_IDLE;
284 }
285 
dma_dw_axi_isr(const struct device * dev)286 static void dma_dw_axi_isr(const struct device *dev)
287 {
288 	unsigned int channel;
289 	uint64_t status, ch_status;
290 	int ret_status = 0;
291 	struct dma_dw_axi_ch_data *chan_data;
292 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
293 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
294 
295 	/* read interrupt status register to find interrupt is for which channel */
296 	status = sys_read64(reg_base + DMA_DW_AXI_INTSTATUSREG);
297 	channel = find_lsb_set(status) - 1;
298 	if (channel < 0) {
299 		LOG_ERR("Spurious interrupt received channel:%u\n", channel);
300 		return;
301 	}
302 
303 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
304 		LOG_ERR("Interrupt received on invalid channel:%d\n", channel);
305 		return;
306 	}
307 
308 	/* retrieve channel specific data pointer for a channel */
309 	chan_data = &dw_dev_data->chan[channel];
310 
311 	/* get dma transfer status */
312 	ch_status = sys_read64(reg_base + DMA_DW_AXI_CH_INTSTATUS(channel));
313 	if (!ch_status) {
314 		LOG_ERR("Spurious interrupt received ch_status:0x%llx\n", ch_status);
315 		return;
316 	}
317 
318 	/* handle dma transfer errors if any */
319 	if (ch_status & DMA_DW_AXI_IRQ_ALL_ERR) {
320 		sys_write64(DMA_DW_AXI_IRQ_ALL_ERR,
321 			reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel));
322 		LOG_ERR("DMA Error: Channel:%d Channel interrupt status:0x%llx\n",
323 				channel, ch_status);
324 		ret_status = -(ch_status & DMA_DW_AXI_IRQ_ALL_ERR);
325 	}
326 
327 	/* handle block transfer completion */
328 	if (ch_status & DMA_DW_AXI_IRQ_BLOCK_TFR) {
329 		sys_write64(DMA_DW_AXI_IRQ_ALL_ERR | DMA_DW_AXI_IRQ_BLOCK_TFR,
330 				reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel));
331 
332 		if (chan_data->dma_blk_xfer_callback) {
333 			chan_data->dma_blk_xfer_callback(dev,
334 				chan_data->priv_data_blk_tfr, channel, ret_status);
335 		}
336 	}
337 
338 	/* handle dma transfer completion */
339 	if (ch_status & DMA_DW_AXI_IRQ_DMA_TFR) {
340 		sys_write64(DMA_DW_AXI_IRQ_ALL_ERR | DMA_DW_AXI_IRQ_DMA_TFR,
341 				reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel));
342 
343 		if (chan_data->dma_xfer_callback) {
344 			chan_data->dma_xfer_callback(dev, chan_data->priv_data_xfer,
345 						channel, ret_status);
346 			chan_data->ch_state = dma_dw_axi_get_ch_status(dev, channel);
347 		}
348 	}
349 }
350 
351 /**
352  * @brief set data source and destination data width
353  *
354  * @param lli_desc Pointer to the descriptor
355  * @param src_data_width source data width
356  * @param dest_data_width destination data width
357  *
358  * @retval 0 on success, -ENOTSUP if the data width is not supported
359  */
dma_dw_axi_set_data_width(struct dma_lli * lli_desc,uint32_t src_data_width,uint32_t dest_data_width)360 static int dma_dw_axi_set_data_width(struct dma_lli *lli_desc,
361 				uint32_t src_data_width, uint32_t dest_data_width)
362 {
363 	if (src_data_width > CONFIG_DMA_DW_AXI_DATA_WIDTH ||
364 			dest_data_width > CONFIG_DMA_DW_AXI_DATA_WIDTH) {
365 		LOG_ERR("transfer width more than %u not supported", CONFIG_DMA_DW_AXI_DATA_WIDTH);
366 		return -ENOTSUP;
367 	}
368 
369 	switch (src_data_width) {
370 	case 1:
371 		/* one byte transfer */
372 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_8);
373 		break;
374 	case 2:
375 		/* 2-bytes transfer width */
376 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_16);
377 		break;
378 	case 4:
379 		/* 4-bytes transfer width */
380 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_32);
381 		break;
382 	case 8:
383 		/* 8-bytes transfer width */
384 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_64);
385 		break;
386 	case 16:
387 		/* 16-bytes transfer width */
388 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_128);
389 		break;
390 	case 32:
391 		/* 32-bytes transfer width */
392 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_256);
393 		break;
394 	case 64:
395 		/* 64-bytes transfer width */
396 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_512);
397 		break;
398 	default:
399 		LOG_ERR("Source transfer width not supported");
400 		return -ENOTSUP;
401 	}
402 
403 	switch (dest_data_width) {
404 	case 1:
405 		/* one byte transfer */
406 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_8);
407 		break;
408 	case 2:
409 		/* 2-bytes transfer width */
410 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_16);
411 		break;
412 	case 4:
413 		/* 4-bytes transfer width */
414 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_32);
415 		break;
416 	case 8:
417 		/* 8-bytes transfer width */
418 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_64);
419 		break;
420 	case 16:
421 		/* 16-bytes transfer width */
422 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_128);
423 		break;
424 	case 32:
425 		/* 32-bytes transfer width */
426 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_256);
427 		break;
428 	case 64:
429 		/* 64-bytes transfer width */
430 		lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_512);
431 		break;
432 	default:
433 		LOG_ERR("Destination transfer width not supported");
434 		return -ENOTSUP;
435 	}
436 
437 	return 0;
438 }
439 
dma_dw_axi_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)440 static int dma_dw_axi_config(const struct device *dev, uint32_t channel,
441 						 struct dma_config *cfg)
442 {
443 	int ret;
444 	uint32_t msize_src, msize_dst, i, ch_state;
445 	struct dma_dw_axi_ch_data *chan_data;
446 	struct dma_block_config *blk_cfg;
447 	struct dma_lli *lli_desc;
448 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
449 
450 	/* check for invalid parameters before dereferencing them. */
451 	if (cfg == NULL) {
452 		LOG_ERR("invalid dma config :%p", cfg);
453 		return -ENODATA;
454 	}
455 
456 	/* check if the channel is valid */
457 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
458 		LOG_ERR("invalid dma channel %d", channel);
459 		return -EINVAL;
460 	}
461 
462 	/* return if the channel is not idle */
463 	ch_state = dma_dw_axi_get_ch_status(dev, channel);
464 	if (ch_state != DMA_DW_AXI_CH_IDLE) {
465 		LOG_ERR("DMA channel:%d is not idle(status:%d)", channel, ch_state);
466 		return -EBUSY;
467 	}
468 
469 	if (!cfg->block_count) {
470 		LOG_ERR("no blocks to transfer");
471 		return -EINVAL;
472 	}
473 
474 	/* descriptor should be less than max configured descriptor */
475 	if (cfg->block_count > CONFIG_DMA_DW_AXI_MAX_DESC) {
476 		LOG_ERR("dma:%s channel %d descriptor block count: %d larger than"
477 			" max descriptors in pool: %d", dev->name, channel,
478 			cfg->block_count, CONFIG_DMA_DW_AXI_MAX_DESC);
479 		return -EINVAL;
480 	}
481 
482 	if (cfg->source_burst_length > CONFIG_DMA_DW_AXI_MAX_BURST_TXN_LEN ||
483 			cfg->dest_burst_length > CONFIG_DMA_DW_AXI_MAX_BURST_TXN_LEN ||
484 			cfg->source_burst_length == 0 || cfg->dest_burst_length == 0) {
485 		LOG_ERR("dma:%s burst length not supported", dev->name);
486 		return -ENOTSUP;
487 	}
488 
489 	/* get channel specific data pointer */
490 	chan_data = &dw_dev_data->chan[channel];
491 
492 	/* check if the channel is currently idle */
493 	if (chan_data->ch_state != DMA_DW_AXI_CH_IDLE) {
494 		LOG_ERR("DMA channel:%d is busy", channel);
495 		return -EBUSY;
496 	}
497 
498 	/* burst transaction length for source and destination */
499 	msize_src = DMA_DW_AXI_GET_MSIZE(cfg->source_burst_length);
500 	msize_dst = DMA_DW_AXI_GET_MSIZE(cfg->dest_burst_length);
501 
502 	chan_data->cfg = 0;
503 	chan_data->irq_unmask = 0;
504 
505 	chan_data->direction = cfg->channel_direction;
506 
507 	chan_data->lli_desc_base =
508 			&dw_dev_data->dma_desc_pool[channel * CONFIG_DMA_DW_AXI_MAX_DESC];
509 	chan_data->lli_desc_count = cfg->block_count;
510 	memset(chan_data->lli_desc_base, 0,
511 			sizeof(struct dma_lli) * chan_data->lli_desc_count);
512 
513 	lli_desc = chan_data->lli_desc_base;
514 	blk_cfg = cfg->head_block;
515 
516 	/* max channel priority can be MAX_CHANNEL - 1 */
517 	if (cfg->channel_priority < dw_dev_data->dma_ctx.dma_channels) {
518 		chan_data->cfg |= DMA_DW_AXI_CFG_PRIORITY(cfg->channel_priority);
519 	}
520 
521 	/* configure all the descriptors in a loop */
522 	for (i = 0; i < cfg->block_count; i++) {
523 
524 		ret = dma_dw_axi_set_data_width(lli_desc, cfg->source_data_size,
525 				cfg->dest_data_size);
526 		if (ret) {
527 			return ret;
528 		}
529 
530 		lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_STAT_EN |
531 				DMA_DW_AXI_CTL_DST_STAT_EN | DMA_DW_AXI_CTL_IOC_BLK_TFR;
532 
533 		lli_desc->sar = blk_cfg->source_address;
534 		lli_desc->dar = blk_cfg->dest_address;
535 
536 		/* set block transfer size*/
537 		lli_desc->block_ts_lo = (blk_cfg->block_size / cfg->source_data_size) - 1;
538 		if (lli_desc->block_ts_lo > CONFIG_DMA_DW_AXI_MAX_BLOCK_TS) {
539 			LOG_ERR("block transfer size more than %u not supported",
540 				CONFIG_DMA_DW_AXI_MAX_BLOCK_TS);
541 			return -ENOTSUP;
542 		}
543 
544 		/* configuration based on channel direction */
545 		if (cfg->channel_direction == MEMORY_TO_MEMORY) {
546 			chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(M2M_DMAC);
547 
548 			lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) |
549 					DMA_DW_AXI_CTL_DST_MSIZE(msize_dst);
550 
551 		} else if (cfg->channel_direction == MEMORY_TO_PERIPHERAL) {
552 
553 			chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(M2P_DMAC);
554 			lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) |
555 					DMA_DW_AXI_CTL_DST_MSIZE(msize_dst);
556 			WRITE_BIT(chan_data->cfg, DMA_DW_AXI_CFG_HW_HS_DST_BIT_POS, 0);
557 
558 			/* assign a hardware handshake interface */
559 			chan_data->cfg |= DMA_DW_AXI_CFG_DST_PER(cfg->dma_slot);
560 
561 		} else if (cfg->channel_direction == PERIPHERAL_TO_MEMORY) {
562 			lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) |
563 					DMA_DW_AXI_CTL_DST_MSIZE(msize_dst);
564 			chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(P2M_DMAC);
565 			WRITE_BIT(chan_data->cfg, DMA_DW_AXI_CFG_HW_HS_SRC_BIT_POS, 0);
566 
567 			/* assign a hardware handshake interface */
568 			chan_data->cfg |= DMA_DW_AXI_CFG_SRC_PER(cfg->dma_slot);
569 
570 		} else {
571 			LOG_ERR("%s: dma %s channel %d invalid direction %d",
572 				__func__, dev->name, channel, cfg->channel_direction);
573 
574 			return -EINVAL;
575 		}
576 
577 		/* set pointer to the next descriptor */
578 		lli_desc->llp = ((uint64_t)(lli_desc + 1));
579 
580 #if defined(CONFIG_DMA_DW_AXI_LLI_SUPPORT)
581 		/* configure multi block transfer size as linked list */
582 		chan_data->cfg |= DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(MULTI_BLK_LLI) |
583 				DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(MULTI_BLK_LLI);
584 
585 		lli_desc->ctl |= DMA_DW_AXI_CTL_LLI_VALID;
586 		/* last descriptor*/
587 		if ((i + 1) == chan_data->lli_desc_count) {
588 			lli_desc->ctl |= DMA_DW_AXI_CTL_LLI_LAST | DMA_DW_AXI_CTL_LLI_VALID;
589 			lli_desc->llp = 0;
590 		}
591 #else
592 		/* configure multi-block transfer as contiguous mode */
593 		chan_data->cfg |= DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(MULTI_BLK_CONTIGUOUS) |
594 				DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(MULTI_BLK_CONTIGUOUS);
595 #endif
596 
597 		/* next descriptor to configure*/
598 		lli_desc++;
599 		blk_cfg = blk_cfg->next_block;
600 	}
601 
602 	arch_dcache_flush_range((void *)chan_data->lli_desc_base,
603 				sizeof(struct dma_lli) * cfg->block_count);
604 
605 	chan_data->lli_desc_current = chan_data->lli_desc_base;
606 
607 	/* enable an interrupt depending on whether the callback is requested after dma transfer
608 	 * completion or dma block transfer completion
609 	 *
610 	 * disable an interrupt if callback is not requested
611 	 */
612 	if (cfg->dma_callback && cfg->complete_callback_en) {
613 		chan_data->dma_blk_xfer_callback = cfg->dma_callback;
614 		chan_data->priv_data_blk_tfr = cfg->user_data;
615 
616 		chan_data->irq_unmask = DMA_DW_AXI_IRQ_BLOCK_TFR | DMA_DW_AXI_IRQ_DMA_TFR;
617 	} else if (cfg->dma_callback && !cfg->complete_callback_en) {
618 		chan_data->dma_xfer_callback = cfg->dma_callback;
619 		chan_data->priv_data_xfer = cfg->user_data;
620 
621 		chan_data->irq_unmask = DMA_DW_AXI_IRQ_DMA_TFR;
622 	} else {
623 		chan_data->irq_unmask = DMA_DW_AXI_IRQ_NONE;
624 	}
625 
626 	/* unmask error interrupts when error_callback_dis is 0 */
627 	if (!cfg->error_callback_dis) {
628 		chan_data->irq_unmask |= DMA_DW_AXI_IRQ_ALL_ERR;
629 	}
630 
631 	/* dma descriptors are configured, ready to start dma transfer */
632 	chan_data->ch_state = DMA_DW_AXI_CH_PREPARED;
633 
634 	return 0;
635 }
636 
dma_dw_axi_start(const struct device * dev,uint32_t channel)637 static int dma_dw_axi_start(const struct device *dev, uint32_t channel)
638 {
639 	uint32_t ch_state;
640 	struct dma_dw_axi_ch_data *chan_data;
641 	struct dma_lli *lli_desc;
642 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
643 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
644 
645 	/* validate channel number */
646 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
647 		LOG_ERR("invalid dma channel %d", channel);
648 		return -EINVAL;
649 	}
650 
651 	/* check whether channel is idle before initiating DMA transfer */
652 	ch_state = dma_dw_axi_get_ch_status(dev, channel);
653 	if (ch_state != DMA_DW_AXI_CH_IDLE) {
654 		LOG_ERR("DMA channel:%d is not idle", channel);
655 		return -EBUSY;
656 	}
657 
658 	/* get channel specific data pointer */
659 	chan_data = &dw_dev_data->chan[channel];
660 
661 	if (chan_data->ch_state != DMA_DW_AXI_CH_PREPARED) {
662 		LOG_ERR("DMA descriptors not configured");
663 		return -EINVAL;
664 	}
665 
666 	/* enable dma controller and global interrupt bit */
667 	sys_write64(DMA_DW_AXI_CFG_INT_EN | DMA_DW_AXI_CFG_EN, reg_base + DMA_DW_AXI_CFGREG);
668 
669 	sys_write64(chan_data->cfg, reg_base + DMA_DW_AXI_CH_CFG(channel));
670 
671 	sys_write64(chan_data->irq_unmask,
672 				reg_base + DMA_DW_AXI_CH_INTSTATUS_ENABLEREG(channel));
673 	sys_write64(chan_data->irq_unmask,
674 				reg_base + DMA_DW_AXI_CH_INTSIGNAL_ENABLEREG(channel));
675 
676 	lli_desc = chan_data->lli_desc_current;
677 
678 #if defined(CONFIG_DMA_DW_AXI_LLI_SUPPORT)
679 	sys_write64(((uint64_t)lli_desc), reg_base + DMA_DW_AXI_CH_LLP(channel));
680 #else
681 	/* Program Source and Destination addresses */
682 	sys_write64(lli_desc->sar, reg_base + DMA_DW_AXI_CH_SAR(channel));
683 	sys_write64(lli_desc->dar, reg_base + DMA_DW_AXI_CH_DAR(channel));
684 
685 	sys_write64(lli_desc->block_ts_lo & BLOCK_TS_MASK,
686 			reg_base + DMA_DW_AXI_CH_BLOCK_TS(channel));
687 
688 	/* Program CH.CTL register */
689 	sys_write64(lli_desc->ctl, reg_base + DMA_DW_AXI_CH_CTL(channel));
690 #endif
691 
692 	/* Enable the channel which will initiate DMA transfer */
693 	sys_write64(CH_EN(channel), reg_base + DMA_DW_AXI_CHENREG);
694 
695 	chan_data->ch_state = dma_dw_axi_get_ch_status(dev, channel);
696 
697 	return 0;
698 }
699 
dma_dw_axi_stop(const struct device * dev,uint32_t channel)700 static int dma_dw_axi_stop(const struct device *dev, uint32_t channel)
701 {
702 	bool is_channel_busy;
703 	uint32_t ch_state;
704 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
705 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
706 
707 	/* channel should be valid */
708 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
709 		LOG_ERR("invalid dma channel %d", channel);
710 		return -EINVAL;
711 	}
712 
713 	/* return if the channel is idle as there is nothing to stop */
714 	ch_state = dma_dw_axi_get_ch_status(dev, channel);
715 	if (ch_state == DMA_DW_AXI_CH_IDLE) {
716 		/* channel is already idle */
717 		return 0;
718 	}
719 
720 	/* To stop transfer or abort the channel in case of abnormal state:
721 	 * 1. To disable channel, first suspend channel and drain the FIFO
722 	 * 2. Disable the channel. Channel may get hung and can't be disabled
723 	 * if there is no response from peripheral
724 	 * 3. If channel is not disabled, Abort the channel. Aborting channel will
725 	 * Flush out FIFO and data will be lost. Then corresponding interrupt will
726 	 * be raised for abort and CH_EN bit will be cleared from CHENREG register
727 	 */
728 	sys_write64(CH_SUSP(channel), reg_base + DMA_DW_AXI_CHENREG);
729 
730 	/* Try to disable the channel */
731 	sys_clear_bit(reg_base + DMA_DW_AXI_CHENREG, channel);
732 
733 	is_channel_busy = WAIT_FOR((sys_read64(reg_base + DMA_DW_AXI_CHENREG)) & (BIT(channel)),
734 						CONFIG_DMA_CHANNEL_STATUS_TIMEOUT, k_busy_wait(10));
735 	if (is_channel_busy) {
736 		LOG_WRN("No response from handshaking interface... Aborting a channel...");
737 		sys_write64(CH_ABORT(channel), reg_base + DMA_DW_AXI_CHENREG);
738 
739 		is_channel_busy = WAIT_FOR((sys_read64(reg_base + DMA_DW_AXI_CHENREG)) &
740 				(BIT(channel)), CONFIG_DMA_CHANNEL_STATUS_TIMEOUT,
741 				k_busy_wait(10));
742 		if (is_channel_busy) {
743 			LOG_ERR("Channel abort failed");
744 			return -EBUSY;
745 		}
746 	}
747 
748 	return 0;
749 }
750 
dma_dw_axi_resume(const struct device * dev,uint32_t channel)751 static int dma_dw_axi_resume(const struct device *dev, uint32_t channel)
752 {
753 	uint32_t reg;
754 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
755 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
756 	uint32_t ch_state;
757 
758 	/* channel should be valid */
759 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
760 		LOG_ERR("invalid dma channel %d", channel);
761 		return -EINVAL;
762 	}
763 
764 	ch_state = dma_dw_axi_get_ch_status(dev, channel);
765 	if (ch_state != DMA_DW_AXI_CH_SUSPENDED) {
766 		LOG_INF("channel %u is not in suspended state so cannot resume channel", channel);
767 		return 0;
768 	}
769 
770 	reg = sys_read64(reg_base + DMA_DW_AXI_CHENREG);
771 	/* channel susp write enable bit has to be asserted */
772 	WRITE_BIT(reg, CH_RESUME_WE(channel), 1);
773 	/* channel susp bit must be cleared to resume a channel*/
774 	WRITE_BIT(reg, CH_RESUME(channel), 0);
775 	/* resume a channel by writing 0: ch_susp and 1: ch_susp_we */
776 	sys_write64(reg, reg_base + DMA_DW_AXI_CHENREG);
777 
778 	return 0;
779 }
780 
781 /* suspend a dma channel */
dma_dw_axi_suspend(const struct device * dev,uint32_t channel)782 static int dma_dw_axi_suspend(const struct device *dev, uint32_t channel)
783 {
784 	int ret;
785 	uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio);
786 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
787 	uint32_t ch_state;
788 
789 	/* channel should be valid */
790 	if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) {
791 		LOG_ERR("invalid dma channel %u", channel);
792 		return -EINVAL;
793 	}
794 
795 	ch_state = dma_dw_axi_get_ch_status(dev, channel);
796 	if (ch_state != DMA_DW_AXI_CH_ACTIVE) {
797 		LOG_INF("nothing to suspend as dma channel %u is not busy", channel);
798 		return 0;
799 	}
800 
801 	/* suspend dma transfer */
802 	sys_write64(CH_SUSP(channel), reg_base + DMA_DW_AXI_CHENREG);
803 
804 	ret = WAIT_FOR(dma_dw_axi_get_ch_status(dev, channel) &
805 			DMA_DW_AXI_CH_SUSPENDED, CONFIG_DMA_CHANNEL_STATUS_TIMEOUT,
806 			k_busy_wait(10));
807 	if (ret == 0) {
808 		LOG_ERR("channel suspend failed");
809 		return ret;
810 	}
811 
812 	return 0;
813 }
814 
dma_dw_axi_init(const struct device * dev)815 static int dma_dw_axi_init(const struct device *dev)
816 {
817 	DEVICE_MMIO_NAMED_MAP(dev, dma_mmio, K_MEM_CACHE_NONE);
818 	int i, ret;
819 	struct dma_dw_axi_ch_data *chan_data;
820 	const struct dma_dw_axi_dev_cfg *dw_dma_config = DEV_CFG(dev);
821 	struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev);
822 
823 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets)
824 
825 	if (dw_dma_config->reset.dev != NULL) {
826 	/* check if reset manager is in ready state */
827 		if (!device_is_ready(dw_dma_config->reset.dev)) {
828 			LOG_ERR("reset controller device not found");
829 			return -ENODEV;
830 		}
831 
832 		/* assert and de-assert dma controller */
833 		ret = reset_line_toggle(dw_dma_config->reset.dev, dw_dma_config->reset.id);
834 		if (ret != 0) {
835 			LOG_ERR("failed to reset dma controller");
836 			return ret;
837 		}
838 	}
839 #endif
840 
841 	/* initialize channel state variable */
842 	for (i = 0; i < dw_dev_data->dma_ctx.dma_channels; i++) {
843 		chan_data = &dw_dev_data->chan[i];
844 		/* initialize channel state */
845 		chan_data->ch_state = DMA_DW_AXI_CH_IDLE;
846 	}
847 
848 	/* configure and enable interrupt lines */
849 	dw_dma_config->irq_config();
850 
851 	return 0;
852 }
853 
854 static DEVICE_API(dma, dma_dw_axi_driver_api) = {
855 	.config = dma_dw_axi_config,
856 	.start = dma_dw_axi_start,
857 	.stop = dma_dw_axi_stop,
858 	.suspend = dma_dw_axi_suspend,
859 	.resume = dma_dw_axi_resume,
860 };
861 
862 /* enable irq lines */
863 #define CONFIGURE_DMA_IRQ(idx, inst) \
864 	IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, idx), ( \
865 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, idx, irq), \
866 			DT_INST_IRQ_BY_IDX(inst, idx, priority), \
867 			dma_dw_axi_isr, \
868 			DEVICE_DT_INST_GET(inst), 0); \
869 			irq_enable(DT_INST_IRQ_BY_IDX(inst, idx, irq)); \
870 	))
871 
872 #define DW_AXI_DMA_RESET_SPEC_INIT(inst) \
873 	.reset = RESET_DT_SPEC_INST_GET(inst), \
874 
875 #define DW_AXI_DMAC_INIT(inst)								\
876 	static struct dma_dw_axi_ch_data chan_##inst[DT_INST_PROP(inst, dma_channels)];	\
877 	static struct dma_lli								\
878 		dma_desc_pool_##inst[DT_INST_PROP(inst, dma_channels) *			\
879 			CONFIG_DMA_DW_AXI_MAX_DESC];					\
880 	ATOMIC_DEFINE(dma_dw_axi_atomic##inst,						\
881 		      DT_INST_PROP(inst, dma_channels));				\
882 	static struct dma_dw_axi_dev_data dma_dw_axi_data_##inst = {			\
883 		.dma_ctx = {								\
884 			.magic = DMA_MAGIC,						\
885 			.atomic = dma_dw_axi_atomic##inst,				\
886 			.dma_channels = DT_INST_PROP(inst, dma_channels),		\
887 		},									\
888 		.chan = chan_##inst,							\
889 		.dma_desc_pool = dma_desc_pool_##inst,					\
890 		};									\
891 	static void dw_dma_irq_config_##inst(void);					\
892 	static const struct dma_dw_axi_dev_cfg dma_dw_axi_config_##inst = {		\
893 		DEVICE_MMIO_NAMED_ROM_INIT(dma_mmio, DT_DRV_INST(inst)),		\
894 		IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, resets),				\
895 			(DW_AXI_DMA_RESET_SPEC_INIT(inst)))				\
896 		.irq_config = dw_dma_irq_config_##inst,					\
897 	};										\
898 											\
899 	DEVICE_DT_INST_DEFINE(inst,							\
900 				&dma_dw_axi_init,					\
901 				NULL,							\
902 				&dma_dw_axi_data_##inst,				\
903 				&dma_dw_axi_config_##inst, POST_KERNEL,			\
904 				CONFIG_DMA_INIT_PRIORITY,				\
905 				&dma_dw_axi_driver_api);				\
906 											\
907 	static void dw_dma_irq_config_##inst(void)					\
908 	{										\
909 		LISTIFY(DT_NUM_IRQS(DT_DRV_INST(inst)), CONFIGURE_DMA_IRQ, (), inst)	\
910 	}
911 
912 DT_INST_FOREACH_STATUS_OKAY(DW_AXI_DMAC_INIT)
913