1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 
9 #include <stdio.h>
10 #include <string.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/init.h>
14 #include <zephyr/drivers/dma.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/pm/device_runtime.h>
17 #include <soc.h>
18 #include "dma_dw_common.h"
19 
20 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_REGISTER(dma_dw_common);
23 
24 /* number of tries to wait for reset */
25 #define DW_DMA_CFG_TRIES	10000
26 
dw_dma_isr(const struct device * dev)27 void dw_dma_isr(const struct device *dev)
28 {
29 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
30 	struct dw_dma_dev_data *const dev_data = dev->data;
31 	struct dw_dma_chan_data *chan_data;
32 
33 	uint32_t status_tfr = 0U;
34 	uint32_t status_block = 0U;
35 	uint32_t status_err = 0U;
36 	uint32_t status_intr;
37 	uint32_t channel;
38 
39 	status_intr = dw_read(dev_cfg->base, DW_INTR_STATUS);
40 	if (!status_intr) {
41 		LOG_ERR("%s: status_intr = %d", dev->name, status_intr);
42 	}
43 
44 	/* get the source of our IRQ. */
45 	status_block = dw_read(dev_cfg->base, DW_STATUS_BLOCK);
46 	status_tfr = dw_read(dev_cfg->base, DW_STATUS_TFR);
47 
48 	/* TODO: handle errors, just clear them atm */
49 	status_err = dw_read(dev_cfg->base, DW_STATUS_ERR);
50 	if (status_err) {
51 		LOG_ERR("%s: status_err = %d\n", dev->name, status_err);
52 		dw_write(dev_cfg->base, DW_CLEAR_ERR, status_err);
53 	}
54 
55 	/* clear interrupts */
56 	dw_write(dev_cfg->base, DW_CLEAR_BLOCK, status_block);
57 	dw_write(dev_cfg->base, DW_CLEAR_TFR, status_tfr);
58 
59 	/* Dispatch callbacks for channels depending upon the bit set */
60 	while (status_block) {
61 		channel = find_lsb_set(status_block) - 1;
62 		status_block &= ~(1 << channel);
63 		chan_data = &dev_data->chan[channel];
64 
65 		if (chan_data->dma_blkcallback) {
66 			LOG_DBG("%s: Dispatching block complete callback fro channel %d", dev->name,
67 				channel);
68 
69 			/* Ensure the linked list (chan_data->lli) is
70 			 * freed in the user callback function once
71 			 * all the blocks are transferred.
72 			 */
73 			chan_data->dma_blkcallback(dev,
74 						   chan_data->blkuser_data,
75 						   channel, DMA_STATUS_BLOCK);
76 		}
77 	}
78 
79 	while (status_tfr) {
80 		channel = find_lsb_set(status_tfr) - 1;
81 		status_tfr &= ~(1 << channel);
82 		chan_data = &dev_data->chan[channel];
83 
84 		/* Transfer complete, channel now idle, a reload
85 		 * could safely occur in the callback via dma_config
86 		 * and dma_start
87 		 */
88 		chan_data->state = DW_DMA_IDLE;
89 
90 		if (chan_data->dma_tfrcallback) {
91 			LOG_DBG("%s: Dispatching transfer callback for channel %d", dev->name,
92 				channel);
93 			chan_data->dma_tfrcallback(dev,
94 						   chan_data->tfruser_data,
95 						   channel, DMA_STATUS_COMPLETE);
96 		}
97 	}
98 }
99 
100 
101 /* mask address for dma to identify memory space. */
dw_dma_mask_address(struct dma_block_config * block_cfg,struct dw_lli * lli_desc,uint32_t direction)102 static void dw_dma_mask_address(struct dma_block_config *block_cfg,
103 				struct dw_lli *lli_desc, uint32_t direction)
104 {
105 	lli_desc->sar = block_cfg->source_address;
106 	lli_desc->dar = block_cfg->dest_address;
107 
108 	switch (direction) {
109 	case MEMORY_TO_PERIPHERAL:
110 		lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
111 		break;
112 	case PERIPHERAL_TO_MEMORY:
113 		lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
114 		break;
115 	case MEMORY_TO_MEMORY:
116 		lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
117 		lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
118 		break;
119 	default:
120 		break;
121 	}
122 }
123 
dw_dma_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)124 int dw_dma_config(const struct device *dev, uint32_t channel,
125 			 struct dma_config *cfg)
126 {
127 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
128 	struct dw_dma_dev_data *const dev_data = dev->data;
129 	struct dma_block_config *block_cfg;
130 
131 
132 	struct dw_lli *lli_desc;
133 	struct dw_lli *lli_desc_head;
134 	struct dw_lli *lli_desc_tail;
135 	uint32_t msize = 3;/* default msize, 8 bytes */
136 	int ret = 0;
137 
138 	if (channel >= DW_CHAN_COUNT) {
139 		LOG_ERR("%s: invalid dma channel %d", dev->name, channel);
140 		ret = -EINVAL;
141 		goto out;
142 	}
143 
144 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
145 
146 	if (chan_data->state != DW_DMA_IDLE && chan_data->state != DW_DMA_PREPARED) {
147 		LOG_ERR("%s: channel %d must be inactive to reconfigure, currently %d", dev->name,
148 			channel, chan_data->state);
149 		ret = -EBUSY;
150 		goto out;
151 	}
152 
153 	LOG_DBG("%s: channel %d config", dev->name, channel);
154 
155 	__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
156 	__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);
157 	__ASSERT_NO_MSG(cfg->block_count > 0);
158 	__ASSERT_NO_MSG(cfg->head_block != NULL);
159 
160 	if (cfg->source_data_size != 1 && cfg->source_data_size != 2 &&
161 	    cfg->source_data_size != 4 && cfg->source_data_size != 8 &&
162 	    cfg->source_data_size != 16) {
163 		LOG_ERR("%s: channel %d 'invalid source_data_size' value %d", dev->name, channel,
164 			cfg->source_data_size);
165 		ret = -EINVAL;
166 		goto out;
167 	}
168 
169 	if (cfg->block_count > CONFIG_DMA_DW_LLI_POOL_SIZE) {
170 		LOG_ERR("%s: channel %d scatter gather list larger than"
171 			" descriptors in pool, consider increasing CONFIG_DMA_DW_LLI_POOL_SIZE",
172 			dev->name, channel);
173 		ret = -EINVAL;
174 		goto out;
175 	}
176 
177 	/* burst_size = (2 ^ msize) */
178 	msize = find_msb_set(cfg->source_burst_length) - 1;
179 	LOG_DBG("%s: channel %d m_size=%d", dev->name, channel, msize);
180 	__ASSERT_NO_MSG(msize < 5);
181 
182 	/* default channel config */
183 	chan_data->direction = cfg->channel_direction;
184 	chan_data->cfg_lo = 0;
185 	chan_data->cfg_hi = 0;
186 
187 	/* setup a list of lli structs. we don't need to allocate */
188 	chan_data->lli = &dev_data->lli_pool[channel][0]; /* TODO allocate here */
189 	chan_data->lli_count = cfg->block_count;
190 
191 	/* zero the scatter gather list */
192 	memset(chan_data->lli, 0, sizeof(struct dw_lli) * chan_data->lli_count);
193 	lli_desc = chan_data->lli;
194 	lli_desc_head = &chan_data->lli[0];
195 	lli_desc_tail = &chan_data->lli[chan_data->lli_count - 1];
196 
197 	chan_data->ptr_data.buffer_bytes = 0;
198 
199 	/* copy the scatter gather list from dma_cfg to dw_lli */
200 	block_cfg = cfg->head_block;
201 	for (int i = 0; i < cfg->block_count; i++) {
202 		__ASSERT_NO_MSG(block_cfg != NULL);
203 		LOG_DBG("%s: copying block_cfg %p to lli_desc %p", dev->name, block_cfg, lli_desc);
204 
205 		/* write CTL_LO for each lli */
206 		switch (cfg->source_data_size) {
207 		case 1:
208 			/* byte at a time transfer */
209 			lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(0);
210 			break;
211 		case 2:
212 			/* non peripheral copies are optimal using words */
213 			switch (cfg->channel_direction) {
214 			case MEMORY_TO_MEMORY:
215 				/* config the src tr width for 32 bit words */
216 				lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
217 				break;
218 			default:
219 				/* config the src width for 16 bit samples */
220 				lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(1);
221 				break;
222 			}
223 			break;
224 		case 4:
225 			/* config the src tr width for 24, 32 bit samples */
226 			lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
227 			break;
228 		default:
229 			LOG_ERR("%s: channel %d invalid src width %d", dev->name, channel,
230 				cfg->source_data_size);
231 			ret = -EINVAL;
232 			goto out;
233 		}
234 
235 		LOG_DBG("%s: source data size: lli_desc %p, ctrl_lo %x", dev->name,
236 			lli_desc, lli_desc->ctrl_lo);
237 
238 		switch (cfg->dest_data_size) {
239 		case 1:
240 			/* byte at a time transfer */
241 			lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(0);
242 			break;
243 		case 2:
244 			/* non peripheral copies are optimal using words */
245 			switch (cfg->channel_direction) {
246 			case MEMORY_TO_MEMORY:
247 				/* config the dest tr width for 32 bit words */
248 				lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
249 				break;
250 			default:
251 				/* config the dest width for 16 bit samples */
252 				lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(1);
253 				break;
254 			}
255 			break;
256 		case 4:
257 			/* config the dest tr width for 24, 32 bit samples */
258 			lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
259 			break;
260 		default:
261 			LOG_ERR("%s: channel %d invalid dest width %d", dev->name, channel,
262 				cfg->dest_data_size);
263 			ret = -EINVAL;
264 			goto out;
265 		}
266 
267 		LOG_DBG("%s: dest data size: lli_desc %p, ctrl_lo %x", dev->name,
268 			lli_desc, lli_desc->ctrl_lo);
269 
270 		lli_desc->ctrl_lo |= DW_CTLL_SRC_MSIZE(msize) |
271 			DW_CTLL_DST_MSIZE(msize);
272 
273 		if (cfg->dma_callback) {
274 			lli_desc->ctrl_lo |= DW_CTLL_INT_EN; /* enable interrupt */
275 		}
276 
277 		LOG_DBG("%s: msize, int_en: lli_desc %p, ctrl_lo %x", dev->name,
278 			lli_desc, lli_desc->ctrl_lo);
279 
280 		/* config the SINC and DINC fields of CTL_LO,
281 		 * SRC/DST_PER fields of CFG_HI
282 		 */
283 		switch (cfg->channel_direction) {
284 		case MEMORY_TO_MEMORY:
285 			lli_desc->ctrl_lo |= DW_CTLL_FC_M2M | DW_CTLL_SRC_INC |
286 				DW_CTLL_DST_INC;
287 #if CONFIG_DMA_DW_HW_LLI
288 			LOG_DBG("%s: setting LLP_D_EN, LLP_S_EN in lli_desc->ctrl_lo %x", dev->name,
289 				lli_desc->ctrl_lo);
290 			lli_desc->ctrl_lo |=
291 				DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN;
292 			LOG_DBG("%s: lli_desc->ctrl_lo %x", dev->name, lli_desc->ctrl_lo);
293 #endif
294 #if CONFIG_DMA_DW
295 			chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS;
296 			chan_data->cfg_lo |= DW_CFGL_DST_SW_HS;
297 #endif
298 			break;
299 		case MEMORY_TO_PERIPHERAL:
300 			lli_desc->ctrl_lo |= DW_CTLL_FC_M2P | DW_CTLL_SRC_INC |
301 				DW_CTLL_DST_FIX;
302 #if CONFIG_DMA_DW_HW_LLI
303 			lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN;
304 			chan_data->cfg_lo |= DW_CFGL_RELOAD_DST;
305 #endif
306 			/* Assign a hardware handshake interface (0-15) to the
307 			 * destination of the channel
308 			 */
309 			chan_data->cfg_hi |= DW_CFGH_DST(cfg->dma_slot);
310 #if CONFIG_DMA_DW
311 			chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS;
312 #endif
313 			break;
314 		case PERIPHERAL_TO_MEMORY:
315 			lli_desc->ctrl_lo |= DW_CTLL_FC_P2M | DW_CTLL_SRC_FIX |
316 				DW_CTLL_DST_INC;
317 #if CONFIG_DMA_DW_HW_LLI
318 			if (!block_cfg->dest_scatter_en) {
319 				lli_desc->ctrl_lo |= DW_CTLL_LLP_D_EN;
320 			} else {
321 				/* Use contiguous auto-reload. Line 3 in
322 				 * table 3-3
323 				 */
324 				lli_desc->ctrl_lo |= DW_CTLL_D_SCAT_EN;
325 			}
326 			chan_data->cfg_lo |= DW_CFGL_RELOAD_SRC;
327 #endif
328 			/* Assign a hardware handshake interface (0-15) to the
329 			 * source of the channel
330 			 */
331 			chan_data->cfg_hi |= DW_CFGH_SRC(cfg->dma_slot);
332 #if CONFIG_DMA_DW
333 			chan_data->cfg_lo |= DW_CFGL_DST_SW_HS;
334 #endif
335 			break;
336 		default:
337 			LOG_ERR("%s: channel %d invalid direction %d", dev->name, channel,
338 				cfg->channel_direction);
339 			ret = -EINVAL;
340 			goto out;
341 		}
342 
343 		LOG_DBG("%s: direction: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x", dev->name,
344 			lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo);
345 
346 		dw_dma_mask_address(block_cfg, lli_desc, cfg->channel_direction);
347 
348 		LOG_DBG("%s: mask address: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
349 			dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi,
350 			chan_data->cfg_lo);
351 
352 		if (block_cfg->block_size > DW_CTLH_BLOCK_TS_MASK) {
353 			LOG_ERR("%s: channel %d block size too big %d", dev->name, channel,
354 				block_cfg->block_size);
355 			ret = -EINVAL;
356 			goto out;
357 		}
358 
359 		/* Set class and transfer size */
360 		lli_desc->ctrl_hi |= DW_CTLH_CLASS(dev_data->channel_data->chan[channel].class) |
361 			(block_cfg->block_size & DW_CTLH_BLOCK_TS_MASK);
362 
363 		LOG_DBG("%s: block_size, class: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
364 			dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi,
365 			chan_data->cfg_lo);
366 
367 		chan_data->ptr_data.buffer_bytes += block_cfg->block_size;
368 
369 		/* set next descriptor in list */
370 		lli_desc->llp = (uintptr_t)(lli_desc + 1);
371 
372 		LOG_DBG("%s: lli_desc llp %x", dev->name, lli_desc->llp);
373 
374 		/* next descriptor */
375 		lli_desc++;
376 
377 		block_cfg = block_cfg->next_block;
378 	}
379 
380 #if CONFIG_DMA_DW_HW_LLI
381 	chan_data->cfg_lo |= DW_CFGL_CTL_HI_UPD_EN;
382 #endif
383 
384 	/* end of list or cyclic buffer */
385 	if (cfg->cyclic) {
386 		lli_desc_tail->llp = (uintptr_t)lli_desc_head;
387 	} else {
388 		lli_desc_tail->llp = 0;
389 #if CONFIG_DMA_DW_HW_LLI
390 		LOG_DBG("%s: Clearing LLP_S_EN, LLP_D_EN from tail LLI %x", dev->name,
391 			lli_desc_tail->ctrl_lo);
392 		lli_desc_tail->ctrl_lo &= ~(DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN);
393 		LOG_DBG("%s: ctrl_lo %x", dev->name, lli_desc_tail->ctrl_lo);
394 #endif
395 	}
396 
397 	/* set the initial lli, mark the channel as prepared (ready to be started) */
398 	chan_data->state = DW_DMA_PREPARED;
399 	chan_data->lli_current = chan_data->lli;
400 
401 	/* initialize pointers */
402 	chan_data->ptr_data.start_ptr = DW_DMA_LLI_ADDRESS(chan_data->lli,
403 							 chan_data->direction);
404 	chan_data->ptr_data.end_ptr = chan_data->ptr_data.start_ptr +
405 				    chan_data->ptr_data.buffer_bytes;
406 	chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr;
407 	chan_data->ptr_data.hw_ptr = chan_data->ptr_data.start_ptr;
408 
409 	/* Configure a callback appropriately depending on whether the
410 	 * interrupt is requested at the end of transaction completion or
411 	 * at the end of each block.
412 	 */
413 	if (cfg->complete_callback_en) {
414 		chan_data->dma_blkcallback = cfg->dma_callback;
415 		chan_data->blkuser_data = cfg->user_data;
416 		dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_UNMASK(channel));
417 	} else {
418 		chan_data->dma_tfrcallback = cfg->dma_callback;
419 		chan_data->tfruser_data = cfg->user_data;
420 		dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_UNMASK(channel));
421 	}
422 
423 	dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_UNMASK(channel));
424 
425 	/* write interrupt clear registers for the channel
426 	 * ClearTfr, ClearBlock, ClearSrcTran, ClearDstTran, ClearErr
427 	 */
428 	dw_write(dev_cfg->base, DW_CLEAR_TFR, 0x1 << channel);
429 	dw_write(dev_cfg->base, DW_CLEAR_BLOCK, 0x1 << channel);
430 	dw_write(dev_cfg->base, DW_CLEAR_SRC_TRAN, 0x1 << channel);
431 	dw_write(dev_cfg->base, DW_CLEAR_DST_TRAN, 0x1 << channel);
432 	dw_write(dev_cfg->base, DW_CLEAR_ERR, 0x1 << channel);
433 
434 
435 out:
436 	return ret;
437 }
438 
dw_dma_is_enabled(const struct device * dev,uint32_t channel)439 bool dw_dma_is_enabled(const struct device *dev, uint32_t channel)
440 {
441 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
442 
443 	return dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel);
444 }
445 
dw_dma_start(const struct device * dev,uint32_t channel)446 int dw_dma_start(const struct device *dev, uint32_t channel)
447 {
448 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
449 	struct dw_dma_dev_data *dev_data = dev->data;
450 	int ret = 0;
451 
452 	/* validate channel */
453 	if (channel >= DW_CHAN_COUNT) {
454 		ret = -EINVAL;
455 		goto out;
456 	}
457 
458 	if (dw_dma_is_enabled(dev, channel)) {
459 		goto out;
460 	}
461 
462 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
463 
464 	/* validate channel state */
465 	if (chan_data->state != DW_DMA_PREPARED) {
466 		LOG_ERR("%s: channel %d not ready ena 0x%x status 0x%x", dev->name, channel,
467 			dw_read(dev_cfg->base, DW_DMA_CHAN_EN), chan_data->state);
468 		ret = -EBUSY;
469 		goto out;
470 	}
471 
472 	/* is valid stream */
473 	if (!chan_data->lli) {
474 		LOG_ERR("%s: channel %d invalid stream", dev->name, channel);
475 		ret = -EINVAL;
476 		goto out;
477 	}
478 
479 	LOG_INF("%s: channel %d start", dev->name, channel);
480 
481 	struct dw_lli *lli = chan_data->lli_current;
482 
483 #ifdef CONFIG_DMA_DW_HW_LLI
484 	/* LLP mode - write LLP pointer */
485 
486 	uint32_t masked_ctrl_lo = lli->ctrl_lo & (DW_CTLL_LLP_D_EN | DW_CTLL_LLP_S_EN);
487 	uint32_t llp = 0;
488 
489 	if (masked_ctrl_lo) {
490 		llp = (uint32_t)lli;
491 		LOG_DBG("%s: Setting llp", dev->name);
492 	}
493 	dw_write(dev_cfg->base, DW_LLP(channel), llp);
494 	LOG_DBG("%s: ctrl_lo %x, masked ctrl_lo %x, LLP %x", dev->name,
495 		lli->ctrl_lo, masked_ctrl_lo, dw_read(dev_cfg->base, DW_LLP(channel)));
496 #endif /* CONFIG_DMA_DW_HW_LLI */
497 
498 	/* channel needs to start from scratch, so write SAR and DAR */
499 #ifdef CONFIG_DMA_64BIT
500 	dw_write(dev_cfg->base, DW_SAR(channel), (uint32_t)(lli->sar & DW_ADDR_MASK_32));
501 	dw_write(dev_cfg->base, DW_SAR_HI(channel), (uint32_t)(lli->sar >> DW_ADDR_RIGHT_SHIFT));
502 	dw_write(dev_cfg->base, DW_DAR(channel), (uint32_t)(lli->dar & DW_ADDR_MASK_32));
503 	dw_write(dev_cfg->base, DW_DAR_HI(channel), (uint32_t)(lli->dar >> DW_ADDR_RIGHT_SHIFT));
504 #else
505 	dw_write(dev_cfg->base, DW_SAR(channel), lli->sar);
506 	dw_write(dev_cfg->base, DW_DAR(channel), lli->dar);
507 #endif /* CONFIG_DMA_64BIT */
508 
509 	/* program CTL_LO and CTL_HI */
510 	dw_write(dev_cfg->base, DW_CTRL_LOW(channel), lli->ctrl_lo);
511 	dw_write(dev_cfg->base, DW_CTRL_HIGH(channel), lli->ctrl_hi);
512 
513 	/* program CFG_LO and CFG_HI */
514 	dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
515 	dw_write(dev_cfg->base, DW_CFG_HIGH(channel), chan_data->cfg_hi);
516 
517 #ifdef CONFIG_DMA_64BIT
518 	LOG_DBG("%s: sar %llx, dar %llx, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x",
519 		dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo,
520 		chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel))
521 		);
522 #else
523 	LOG_DBG("%s: sar %x, dar %x, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x",
524 		dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo,
525 		chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel))
526 		);
527 #endif /* CONFIG_DMA_64BIT */
528 
529 #ifdef CONFIG_DMA_DW_HW_LLI
530 	if (lli->ctrl_lo & DW_CTLL_D_SCAT_EN) {
531 		LOG_DBG("%s: configuring DW_DSR", dev->name);
532 		uint32_t words_per_tfr = (lli->ctrl_hi & DW_CTLH_BLOCK_TS_MASK) >>
533 			((lli->ctrl_lo & DW_CTLL_DST_WIDTH_MASK) >> DW_CTLL_DST_WIDTH_SHIFT);
534 		dw_write(dev_cfg->base, DW_DSR(channel),
535 			 DW_DSR_DSC(words_per_tfr) | DW_DSR_DSI(words_per_tfr));
536 	}
537 #endif /* CONFIG_DMA_DW_HW_LLI */
538 
539 	chan_data->state = DW_DMA_ACTIVE;
540 
541 	/* enable the channel */
542 	dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_UNMASK(channel));
543 	ret = pm_device_runtime_get(dev);
544 
545 out:
546 	return ret;
547 }
548 
dw_dma_stop(const struct device * dev,uint32_t channel)549 int dw_dma_stop(const struct device *dev, uint32_t channel)
550 {
551 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
552 	struct dw_dma_dev_data *dev_data = dev->data;
553 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
554 	enum pm_device_state pm_state;
555 	int ret = 0;
556 
557 	if (channel >= DW_CHAN_COUNT) {
558 		ret = -EINVAL;
559 		goto out;
560 	}
561 
562 	/*
563 	 * skip if device is not active. if we get an error for state_get,
564 	 * do not skip but check actual hardware state and stop if
565 	 * needed
566 	 */
567 	ret = pm_device_state_get(dev, &pm_state);
568 	if (!ret && pm_state != PM_DEVICE_STATE_ACTIVE) {
569 		goto out;
570 	}
571 
572 	if (!dw_dma_is_enabled(dev, channel) && chan_data->state != DW_DMA_SUSPENDED) {
573 		ret = 0;
574 		goto out;
575 	}
576 
577 #ifdef CONFIG_DMA_DW_HW_LLI
578 	struct dw_lli *lli = chan_data->lli;
579 	int i;
580 #endif
581 
582 	LOG_INF("%s: channel %d stop", dev->name, channel);
583 
584 	/* Validate the channel state */
585 	if (chan_data->state != DW_DMA_ACTIVE &&
586 	    chan_data->state != DW_DMA_SUSPENDED) {
587 		ret = -EINVAL;
588 		goto out;
589 	}
590 
591 #ifdef CONFIG_DMA_DW_SUSPEND_DRAIN
592 	/* channel cannot be disabled right away, so first we need to)
593 	 * suspend it and drain the FIFO
594 	 */
595 	dw_write(dev_cfg->base, DW_CFG_LOW(channel),
596 		 chan_data->cfg_lo | DW_CFGL_SUSPEND | DW_CFGL_DRAIN);
597 
598 	/* now we wait for FIFO to be empty */
599 	bool fifo_empty = WAIT_FOR(dw_read(dev_cfg->base, DW_CFG_LOW(channel)) & DW_CFGL_FIFO_EMPTY,
600 				DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10));
601 	if (!fifo_empty) {
602 		LOG_WRN("%s: channel %d drain time out", dev->name, channel);
603 
604 		/* Continue even if draining timed out to make sure that the channel is going to be
605 		 * disabled.
606 		 * The same channel might be requested for other purpose (or for same) next time
607 		 * which will fail if the channel has been left enabled.
608 		 */
609 	}
610 #endif
611 
612 	dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_MASK(channel));
613 
614 	/* now we wait for channel to be disabled */
615 	bool is_disabled = WAIT_FOR(!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel)),
616 				    DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10));
617 	if (!is_disabled) {
618 		LOG_ERR("%s: channel %d disable timeout", dev->name, channel);
619 		return -ETIMEDOUT;
620 	}
621 
622 #if CONFIG_DMA_DW_HW_LLI
623 	for (i = 0; i < chan_data->lli_count; i++) {
624 		lli->ctrl_hi &= ~DW_CTLH_DONE(1);
625 		lli++;
626 	}
627 #endif
628 	chan_data->state = DW_DMA_IDLE;
629 	ret = pm_device_runtime_put(dev);
630 out:
631 	return ret;
632 }
633 
dw_dma_resume(const struct device * dev,uint32_t channel)634 int dw_dma_resume(const struct device *dev, uint32_t channel)
635 {
636 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
637 	struct dw_dma_dev_data *dev_data = dev->data;
638 	int ret = 0;
639 
640 	/* Validate channel index */
641 	if (channel >= DW_CHAN_COUNT) {
642 		ret = -EINVAL;
643 		goto out;
644 	}
645 
646 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
647 
648 	/* Validate channel state */
649 	if (chan_data->state != DW_DMA_SUSPENDED) {
650 		ret = -EINVAL;
651 		goto out;
652 	}
653 
654 	LOG_DBG("%s: channel %d resume", dev->name, channel);
655 
656 	dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
657 
658 	/* Channel is now active */
659 	chan_data->state = DW_DMA_ACTIVE;
660 
661 out:
662 	return ret;
663 }
664 
dw_dma_suspend(const struct device * dev,uint32_t channel)665 int dw_dma_suspend(const struct device *dev, uint32_t channel)
666 {
667 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
668 	struct dw_dma_dev_data *dev_data = dev->data;
669 	int ret = 0;
670 
671 	/* Validate channel index */
672 	if (channel >= DW_CHAN_COUNT) {
673 		ret = -EINVAL;
674 		goto out;
675 	}
676 
677 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
678 
679 	/* Validate channel state */
680 	if (chan_data->state != DW_DMA_ACTIVE) {
681 		ret = -EINVAL;
682 		goto out;
683 	}
684 
685 
686 	LOG_DBG("%s: channel %d suspend", dev->name, channel);
687 
688 	dw_write(dev_cfg->base, DW_CFG_LOW(channel),
689 		      chan_data->cfg_lo | DW_CFGL_SUSPEND);
690 
691 	/* Channel is now suspended */
692 	chan_data->state = DW_DMA_SUSPENDED;
693 
694 
695 out:
696 	return ret;
697 }
698 
699 
dw_dma_setup(const struct device * dev)700 int dw_dma_setup(const struct device *dev)
701 {
702 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
703 
704 	int i, ret = 0;
705 
706 	/* we cannot config DMAC if DMAC has been already enabled by host */
707 	if (dw_read(dev_cfg->base, DW_DMA_CFG) != 0) {
708 		dw_write(dev_cfg->base, DW_DMA_CFG, 0x0);
709 	}
710 
711 	for (i = DW_DMA_CFG_TRIES; i > 0; i--) {
712 		if (!dw_read(dev_cfg->base, DW_DMA_CFG)) {
713 			break;
714 		}
715 	}
716 
717 	if (!i) {
718 		LOG_ERR("%s: setup failed", dev->name);
719 		ret = -EIO;
720 		goto out;
721 	}
722 
723 	LOG_DBG("%s: ENTER", dev->name);
724 
725 	for (i = 0; i <  DW_CHAN_COUNT; i++) {
726 		dw_read(dev_cfg->base, DW_DMA_CHAN_EN);
727 	}
728 
729 
730 	/* enable the DMA controller */
731 	dw_write(dev_cfg->base, DW_DMA_CFG, 1);
732 
733 	/* mask all interrupts for all 8 channels */
734 	dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_MASK_ALL);
735 	dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_MASK_ALL);
736 	dw_write(dev_cfg->base, DW_MASK_SRC_TRAN, DW_CHAN_MASK_ALL);
737 	dw_write(dev_cfg->base, DW_MASK_DST_TRAN, DW_CHAN_MASK_ALL);
738 	dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_MASK_ALL);
739 
740 #ifdef CONFIG_DMA_DW_FIFO_PARTITION
741 	/* allocate FIFO partitions for each channel */
742 	dw_write(dev_cfg->base, DW_FIFO_PART1_HI,
743 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
744 	dw_write(dev_cfg->base, DW_FIFO_PART1_LO,
745 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
746 	dw_write(dev_cfg->base, DW_FIFO_PART0_HI,
747 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
748 	dw_write(dev_cfg->base, DW_FIFO_PART0_LO,
749 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE) |
750 		 DW_FIFO_UPD);
751 #endif /* CONFIG_DMA_DW_FIFO_PARTITION */
752 
753 	/* TODO add baytrail/cherrytrail workaround */
754 out:
755 	return ret;
756 }
757 
dw_dma_avail_data_size(const struct device * dev,uint32_t base,struct dw_dma_chan_data * chan_data,uint32_t channel)758 static int dw_dma_avail_data_size(const struct device *dev, uint32_t base,
759 				  struct dw_dma_chan_data *chan_data,
760 				  uint32_t channel)
761 {
762 	int32_t read_ptr = chan_data->ptr_data.current_ptr;
763 	int32_t write_ptr = dw_read(base, DW_DAR(channel));
764 	int32_t delta = write_ptr - chan_data->ptr_data.hw_ptr;
765 	int size;
766 
767 	chan_data->ptr_data.hw_ptr = write_ptr;
768 
769 	size = write_ptr - read_ptr;
770 
771 	if (size < 0) {
772 		size += chan_data->ptr_data.buffer_bytes;
773 	} else if (!size) {
774 		/*
775 		 * Buffer is either full or empty. If the DMA pointer has
776 		 * changed, then the DMA has filled the buffer.
777 		 */
778 		if (delta) {
779 			size = chan_data->ptr_data.buffer_bytes;
780 		} else {
781 			LOG_DBG("%s: channel %d: size is 0!", dev->name, channel);
782 		}
783 	}
784 
785 	LOG_DBG("%s: channel %d: DAR %x reader 0x%x free 0x%x avail 0x%x", dev->name, channel,
786 		write_ptr, read_ptr, chan_data->ptr_data.buffer_bytes - size, size);
787 
788 	return size;
789 }
790 
dw_dma_free_data_size(const struct device * dev,uint32_t base,struct dw_dma_chan_data * chan_data,uint32_t channel)791 static int dw_dma_free_data_size(const struct device *dev, uint32_t base,
792 				 struct dw_dma_chan_data *chan_data,
793 				 uint32_t channel)
794 {
795 	int32_t read_ptr = dw_read(base, DW_SAR(channel));
796 	int32_t write_ptr = chan_data->ptr_data.current_ptr;
797 	int32_t delta = read_ptr - chan_data->ptr_data.hw_ptr;
798 	int size;
799 
800 	chan_data->ptr_data.hw_ptr = read_ptr;
801 
802 	size = read_ptr - write_ptr;
803 	if (size < 0) {
804 		size += chan_data->ptr_data.buffer_bytes;
805 	} else if (!size) {
806 		/*
807 		 * Buffer is either full or empty. If the DMA pointer has
808 		 * changed, then the DMA has emptied the buffer.
809 		 */
810 		if (delta) {
811 			size = chan_data->ptr_data.buffer_bytes;
812 		} else {
813 			LOG_DBG("%s: channel %d: size is 0!", dev->name, channel);
814 		}
815 	}
816 
817 	LOG_DBG("%s: channel %d: SAR %x writer 0x%x free 0x%x avail 0x%x", dev->name, channel,
818 		read_ptr, write_ptr, size, chan_data->ptr_data.buffer_bytes - size);
819 
820 	return size;
821 }
822 
dw_dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)823 int dw_dma_get_status(const struct device *dev, uint32_t channel,
824 		      struct dma_status *stat)
825 {
826 	struct dw_dma_dev_data *const dev_data = dev->data;
827 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
828 	struct dw_dma_chan_data *chan_data;
829 
830 	if (channel >= DW_CHAN_COUNT) {
831 		return -EINVAL;
832 	}
833 
834 	chan_data = &dev_data->chan[channel];
835 
836 	if (chan_data->direction == MEMORY_TO_MEMORY ||
837 	    chan_data->direction == PERIPHERAL_TO_MEMORY) {
838 		stat->pending_length = dw_dma_avail_data_size(dev, dev_cfg->base, chan_data,
839 							      channel);
840 		stat->free = chan_data->ptr_data.buffer_bytes - stat->pending_length;
841 
842 	} else {
843 		stat->free = dw_dma_free_data_size(dev, dev_cfg->base, chan_data, channel);
844 		stat->pending_length = chan_data->ptr_data.buffer_bytes - stat->free;
845 	}
846 #if CONFIG_DMA_DW_HW_LLI
847 	if (!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel))) {
848 		LOG_ERR("%s: xrun detected", dev->name);
849 		return -EPIPE;
850 	}
851 #endif
852 	return 0;
853 }
854