1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 
9 #include <stdio.h>
10 #include <string.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/init.h>
14 #include <zephyr/drivers/dma.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/pm/device_runtime.h>
17 #include <soc.h>
18 #include "dma_dw_common.h"
19 
20 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_REGISTER(dma_dw_common);
23 
24 /* number of tries to wait for reset */
25 #define DW_DMA_CFG_TRIES	10000
26 
dw_dma_isr(const struct device * dev)27 void dw_dma_isr(const struct device *dev)
28 {
29 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
30 	struct dw_dma_dev_data *const dev_data = dev->data;
31 	struct dw_dma_chan_data *chan_data;
32 
33 	uint32_t status_tfr = 0U;
34 	uint32_t status_block = 0U;
35 	uint32_t status_err = 0U;
36 	uint32_t status_intr;
37 	uint32_t channel;
38 
39 	status_intr = dw_read(dev_cfg->base, DW_INTR_STATUS);
40 	if (!status_intr) {
41 		LOG_ERR("%s: status_intr = %d", dev->name, status_intr);
42 	}
43 
44 	/* get the source of our IRQ. */
45 	status_block = dw_read(dev_cfg->base, DW_STATUS_BLOCK);
46 	status_tfr = dw_read(dev_cfg->base, DW_STATUS_TFR);
47 
48 	/* TODO: handle errors, just clear them atm */
49 	status_err = dw_read(dev_cfg->base, DW_STATUS_ERR);
50 	if (status_err) {
51 		LOG_ERR("%s: status_err = %d\n", dev->name, status_err);
52 		dw_write(dev_cfg->base, DW_CLEAR_ERR, status_err);
53 	}
54 
55 	/* clear interrupts */
56 	dw_write(dev_cfg->base, DW_CLEAR_BLOCK, status_block);
57 	dw_write(dev_cfg->base, DW_CLEAR_TFR, status_tfr);
58 
59 	/* Dispatch callbacks for channels depending upon the bit set */
60 	while (status_block) {
61 		channel = find_lsb_set(status_block) - 1;
62 		status_block &= ~(1 << channel);
63 		chan_data = &dev_data->chan[channel];
64 
65 		if (chan_data->dma_blkcallback) {
66 			LOG_DBG("%s: Dispatching block complete callback fro channel %d", dev->name,
67 				channel);
68 
69 			/* Ensure the linked list (chan_data->lli) is
70 			 * freed in the user callback function once
71 			 * all the blocks are transferred.
72 			 */
73 			chan_data->dma_blkcallback(dev,
74 						   chan_data->blkuser_data,
75 						   channel, DMA_STATUS_BLOCK);
76 		}
77 	}
78 
79 	while (status_tfr) {
80 		channel = find_lsb_set(status_tfr) - 1;
81 		status_tfr &= ~(1 << channel);
82 		chan_data = &dev_data->chan[channel];
83 
84 		/* Transfer complete, channel now idle, a reload
85 		 * could safely occur in the callback via dma_config
86 		 * and dma_start
87 		 */
88 		chan_data->state = DW_DMA_IDLE;
89 
90 		if (chan_data->dma_tfrcallback) {
91 			LOG_DBG("%s: Dispatching transfer callback for channel %d", dev->name,
92 				channel);
93 			chan_data->dma_tfrcallback(dev,
94 						   chan_data->tfruser_data,
95 						   channel, DMA_STATUS_COMPLETE);
96 		}
97 	}
98 }
99 
100 
101 /* mask address for dma to identify memory space. */
dw_dma_mask_address(struct dma_block_config * block_cfg,struct dw_lli * lli_desc,uint32_t direction)102 static void dw_dma_mask_address(struct dma_block_config *block_cfg,
103 				struct dw_lli *lli_desc, uint32_t direction)
104 {
105 	lli_desc->sar = block_cfg->source_address;
106 	lli_desc->dar = block_cfg->dest_address;
107 
108 	switch (direction) {
109 	case MEMORY_TO_PERIPHERAL:
110 		lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
111 		break;
112 	case PERIPHERAL_TO_MEMORY:
113 		lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
114 		break;
115 	case MEMORY_TO_MEMORY:
116 		lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
117 		lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
118 		break;
119 	default:
120 		break;
121 	}
122 }
123 
dw_dma_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)124 int dw_dma_config(const struct device *dev, uint32_t channel,
125 			 struct dma_config *cfg)
126 {
127 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
128 	struct dw_dma_dev_data *const dev_data = dev->data;
129 	struct dma_block_config *block_cfg;
130 
131 
132 	struct dw_lli *lli_desc;
133 	struct dw_lli *lli_desc_head;
134 	struct dw_lli *lli_desc_tail;
135 	uint32_t msize = 3;/* default msize, 8 bytes */
136 	int ret = 0;
137 
138 	if (channel >= DW_CHAN_COUNT) {
139 		LOG_ERR("%s: invalid dma channel %d", dev->name, channel);
140 		ret = -EINVAL;
141 		goto out;
142 	}
143 
144 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
145 
146 	if (chan_data->state != DW_DMA_IDLE && chan_data->state != DW_DMA_PREPARED) {
147 		LOG_ERR("%s: channel %d must be inactive to reconfigure, currently %d", dev->name,
148 			channel, chan_data->state);
149 		ret = -EBUSY;
150 		goto out;
151 	}
152 
153 	LOG_DBG("%s: channel %d config", dev->name, channel);
154 
155 	__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
156 	__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);
157 	__ASSERT_NO_MSG(cfg->block_count > 0);
158 	__ASSERT_NO_MSG(cfg->head_block != NULL);
159 
160 	if (cfg->source_data_size != 1 && cfg->source_data_size != 2 &&
161 	    cfg->source_data_size != 4 && cfg->source_data_size != 8 &&
162 	    cfg->source_data_size != 16) {
163 		LOG_ERR("%s: channel %d 'invalid source_data_size' value %d", dev->name, channel,
164 			cfg->source_data_size);
165 		ret = -EINVAL;
166 		goto out;
167 	}
168 
169 	if (cfg->block_count > CONFIG_DMA_DW_LLI_POOL_SIZE) {
170 		LOG_ERR("%s: channel %d scatter gather list larger than"
171 			" descriptors in pool, consider increasing CONFIG_DMA_DW_LLI_POOL_SIZE",
172 			dev->name, channel);
173 		ret = -EINVAL;
174 		goto out;
175 	}
176 
177 	/* burst_size = (2 ^ msize) */
178 	msize = find_msb_set(cfg->source_burst_length) - 1;
179 	LOG_DBG("%s: channel %d m_size=%d", dev->name, channel, msize);
180 	__ASSERT_NO_MSG(msize < 5);
181 
182 	/* default channel config */
183 	chan_data->direction = cfg->channel_direction;
184 	chan_data->cfg_lo = 0;
185 	chan_data->cfg_hi = 0;
186 
187 	/* setup a list of lli structs. we don't need to allocate */
188 	chan_data->lli = &dev_data->lli_pool[channel][0]; /* TODO allocate here */
189 	chan_data->lli_count = cfg->block_count;
190 
191 	/* zero the scatter gather list */
192 	memset(chan_data->lli, 0, sizeof(struct dw_lli) * chan_data->lli_count);
193 	lli_desc = chan_data->lli;
194 	lli_desc_head = &chan_data->lli[0];
195 	lli_desc_tail = &chan_data->lli[chan_data->lli_count - 1];
196 
197 	chan_data->ptr_data.buffer_bytes = 0;
198 
199 	/* copy the scatter gather list from dma_cfg to dw_lli */
200 	block_cfg = cfg->head_block;
201 	for (int i = 0; i < cfg->block_count; i++) {
202 		__ASSERT_NO_MSG(block_cfg != NULL);
203 		LOG_DBG("%s: copying block_cfg %p to lli_desc %p", dev->name, block_cfg, lli_desc);
204 
205 		/* write CTL_LO for each lli */
206 		switch (cfg->source_data_size) {
207 		case 1:
208 			/* byte at a time transfer */
209 			lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(0);
210 			break;
211 		case 2:
212 			/* non peripheral copies are optimal using words */
213 			switch (cfg->channel_direction) {
214 			case MEMORY_TO_MEMORY:
215 				/* config the src tr width for 32 bit words */
216 				lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
217 				break;
218 			default:
219 				/* config the src width for 16 bit samples */
220 				lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(1);
221 				break;
222 			}
223 			break;
224 		case 4:
225 			/* config the src tr width for 24, 32 bit samples */
226 			lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
227 			break;
228 		default:
229 			LOG_ERR("%s: channel %d invalid src width %d", dev->name, channel,
230 				cfg->source_data_size);
231 			ret = -EINVAL;
232 			goto out;
233 		}
234 
235 		LOG_DBG("%s: source data size: lli_desc %p, ctrl_lo %x", dev->name,
236 			lli_desc, lli_desc->ctrl_lo);
237 
238 		switch (cfg->dest_data_size) {
239 		case 1:
240 			/* byte at a time transfer */
241 			lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(0);
242 			break;
243 		case 2:
244 			/* non peripheral copies are optimal using words */
245 			switch (cfg->channel_direction) {
246 			case MEMORY_TO_MEMORY:
247 				/* config the dest tr width for 32 bit words */
248 				lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
249 				break;
250 			default:
251 				/* config the dest width for 16 bit samples */
252 				lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(1);
253 				break;
254 			}
255 			break;
256 		case 4:
257 			/* config the dest tr width for 24, 32 bit samples */
258 			lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
259 			break;
260 		default:
261 			LOG_ERR("%s: channel %d invalid dest width %d", dev->name, channel,
262 				cfg->dest_data_size);
263 			ret = -EINVAL;
264 			goto out;
265 		}
266 
267 		LOG_DBG("%s: dest data size: lli_desc %p, ctrl_lo %x", dev->name,
268 			lli_desc, lli_desc->ctrl_lo);
269 
270 		lli_desc->ctrl_lo |= DW_CTLL_SRC_MSIZE(msize) |
271 			DW_CTLL_DST_MSIZE(msize);
272 
273 		if (cfg->dma_callback) {
274 			lli_desc->ctrl_lo |= DW_CTLL_INT_EN; /* enable interrupt */
275 		}
276 
277 		LOG_DBG("%s: msize, int_en: lli_desc %p, ctrl_lo %x", dev->name,
278 			lli_desc, lli_desc->ctrl_lo);
279 
280 		/* config the SINC and DINC fields of CTL_LO,
281 		 * SRC/DST_PER fields of CFG_HI
282 		 */
283 		switch (cfg->channel_direction) {
284 		case MEMORY_TO_MEMORY:
285 			lli_desc->ctrl_lo |= DW_CTLL_FC_M2M | DW_CTLL_SRC_INC |
286 				DW_CTLL_DST_INC;
287 #if CONFIG_DMA_DW_HW_LLI
288 			LOG_DBG("%s: setting LLP_D_EN, LLP_S_EN in lli_desc->ctrl_lo %x", dev->name,
289 				lli_desc->ctrl_lo);
290 			lli_desc->ctrl_lo |=
291 				DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN;
292 			LOG_DBG("%s: lli_desc->ctrl_lo %x", dev->name, lli_desc->ctrl_lo);
293 #endif
294 #if CONFIG_DMA_DW
295 			chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS;
296 			chan_data->cfg_lo |= DW_CFGL_DST_SW_HS;
297 #endif
298 			break;
299 		case MEMORY_TO_PERIPHERAL:
300 			lli_desc->ctrl_lo |= DW_CTLL_FC_M2P | DW_CTLL_SRC_INC |
301 				DW_CTLL_DST_FIX;
302 #if CONFIG_DMA_DW_HW_LLI
303 			lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN;
304 			chan_data->cfg_lo |= DW_CFGL_RELOAD_DST;
305 #endif
306 			/* Assign a hardware handshake interface (0-15) to the
307 			 * destination of the channel
308 			 */
309 			chan_data->cfg_hi |= DW_CFGH_DST(cfg->dma_slot);
310 #if CONFIG_DMA_DW
311 			chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS;
312 #endif
313 			break;
314 		case PERIPHERAL_TO_MEMORY:
315 			lli_desc->ctrl_lo |= DW_CTLL_FC_P2M | DW_CTLL_SRC_FIX |
316 				DW_CTLL_DST_INC;
317 #if CONFIG_DMA_DW_HW_LLI
318 			if (!block_cfg->dest_scatter_en) {
319 				lli_desc->ctrl_lo |= DW_CTLL_LLP_D_EN;
320 			} else {
321 				/* Use contiguous auto-reload. Line 3 in
322 				 * table 3-3
323 				 */
324 				lli_desc->ctrl_lo |= DW_CTLL_D_SCAT_EN;
325 			}
326 			chan_data->cfg_lo |= DW_CFGL_RELOAD_SRC;
327 #endif
328 			/* Assign a hardware handshake interface (0-15) to the
329 			 * source of the channel
330 			 */
331 			chan_data->cfg_hi |= DW_CFGH_SRC(cfg->dma_slot);
332 #if CONFIG_DMA_DW
333 			chan_data->cfg_lo |= DW_CFGL_DST_SW_HS;
334 #endif
335 			break;
336 		default:
337 			LOG_ERR("%s: channel %d invalid direction %d", dev->name, channel,
338 				cfg->channel_direction);
339 			ret = -EINVAL;
340 			goto out;
341 		}
342 
343 		LOG_DBG("%s: direction: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x", dev->name,
344 			lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo);
345 
346 		dw_dma_mask_address(block_cfg, lli_desc, cfg->channel_direction);
347 
348 		LOG_DBG("%s: mask address: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
349 			dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi,
350 			chan_data->cfg_lo);
351 
352 		if (block_cfg->block_size > DW_CTLH_BLOCK_TS_MASK) {
353 			LOG_ERR("%s: channel %d block size too big %d", dev->name, channel,
354 				block_cfg->block_size);
355 			ret = -EINVAL;
356 			goto out;
357 		}
358 
359 		/* Set class and transfer size */
360 		lli_desc->ctrl_hi |= DW_CTLH_CLASS(dev_data->channel_data->chan[channel].class) |
361 			(block_cfg->block_size & DW_CTLH_BLOCK_TS_MASK);
362 
363 		LOG_DBG("%s: block_size, class: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
364 			dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi,
365 			chan_data->cfg_lo);
366 
367 		chan_data->ptr_data.buffer_bytes += block_cfg->block_size;
368 
369 		/* set next descriptor in list */
370 		lli_desc->llp = (uintptr_t)(lli_desc + 1);
371 
372 		LOG_DBG("%s: lli_desc llp %x", dev->name, lli_desc->llp);
373 
374 		/* next descriptor */
375 		lli_desc++;
376 
377 		block_cfg = block_cfg->next_block;
378 	}
379 
380 #if CONFIG_DMA_DW_HW_LLI
381 	chan_data->cfg_lo |= DW_CFGL_CTL_HI_UPD_EN;
382 #endif
383 
384 	/* end of list or cyclic buffer */
385 	if (cfg->cyclic) {
386 		lli_desc_tail->llp = (uintptr_t)lli_desc_head;
387 	} else {
388 		lli_desc_tail->llp = 0;
389 #if CONFIG_DMA_DW_HW_LLI
390 		LOG_DBG("%s: Clearing LLP_S_EN, LLP_D_EN from tail LLI %x", dev->name,
391 			lli_desc_tail->ctrl_lo);
392 		lli_desc_tail->ctrl_lo &= ~(DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN);
393 		LOG_DBG("%s: ctrl_lo %x", dev->name, lli_desc_tail->ctrl_lo);
394 #endif
395 	}
396 
397 	/* set the initial lli, mark the channel as prepared (ready to be started) */
398 	chan_data->state = DW_DMA_PREPARED;
399 	chan_data->lli_current = chan_data->lli;
400 
401 	/* initialize pointers */
402 	chan_data->ptr_data.start_ptr = DW_DMA_LLI_ADDRESS(chan_data->lli,
403 							 chan_data->direction);
404 	chan_data->ptr_data.end_ptr = chan_data->ptr_data.start_ptr +
405 				    chan_data->ptr_data.buffer_bytes;
406 	chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr;
407 	chan_data->ptr_data.hw_ptr = chan_data->ptr_data.start_ptr;
408 
409 	/* Configure a callback appropriately depending on whether the
410 	 * interrupt is requested at the end of transaction completion or
411 	 * at the end of each block.
412 	 */
413 	if (cfg->complete_callback_en) {
414 		chan_data->dma_blkcallback = cfg->dma_callback;
415 		chan_data->blkuser_data = cfg->user_data;
416 		dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_UNMASK(channel));
417 	} else {
418 		chan_data->dma_tfrcallback = cfg->dma_callback;
419 		chan_data->tfruser_data = cfg->user_data;
420 		dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_UNMASK(channel));
421 	}
422 
423 	dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_UNMASK(channel));
424 
425 	/* write interrupt clear registers for the channel
426 	 * ClearTfr, ClearBlock, ClearSrcTran, ClearDstTran, ClearErr
427 	 */
428 	dw_write(dev_cfg->base, DW_CLEAR_TFR, 0x1 << channel);
429 	dw_write(dev_cfg->base, DW_CLEAR_BLOCK, 0x1 << channel);
430 	dw_write(dev_cfg->base, DW_CLEAR_SRC_TRAN, 0x1 << channel);
431 	dw_write(dev_cfg->base, DW_CLEAR_DST_TRAN, 0x1 << channel);
432 	dw_write(dev_cfg->base, DW_CLEAR_ERR, 0x1 << channel);
433 
434 
435 out:
436 	return ret;
437 }
438 
dw_dma_is_enabled(const struct device * dev,uint32_t channel)439 bool dw_dma_is_enabled(const struct device *dev, uint32_t channel)
440 {
441 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
442 
443 	return dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel);
444 }
445 
dw_dma_start(const struct device * dev,uint32_t channel)446 int dw_dma_start(const struct device *dev, uint32_t channel)
447 {
448 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
449 	struct dw_dma_dev_data *dev_data = dev->data;
450 	int ret = 0;
451 
452 	/* validate channel */
453 	if (channel >= DW_CHAN_COUNT) {
454 		ret = -EINVAL;
455 		goto out;
456 	}
457 
458 	if (dw_dma_is_enabled(dev, channel)) {
459 		goto out;
460 	}
461 
462 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
463 
464 	/* validate channel state */
465 	if (chan_data->state != DW_DMA_PREPARED) {
466 		LOG_ERR("%s: channel %d not ready ena 0x%x status 0x%x", dev->name, channel,
467 			dw_read(dev_cfg->base, DW_DMA_CHAN_EN), chan_data->state);
468 		ret = -EBUSY;
469 		goto out;
470 	}
471 
472 	/* is valid stream */
473 	if (!chan_data->lli) {
474 		LOG_ERR("%s: channel %d invalid stream", dev->name, channel);
475 		ret = -EINVAL;
476 		goto out;
477 	}
478 
479 	LOG_INF("%s: channel %d start", dev->name, channel);
480 
481 	struct dw_lli *lli = chan_data->lli_current;
482 
483 #ifdef CONFIG_DMA_DW_HW_LLI
484 	/* LLP mode - write LLP pointer */
485 
486 	uint32_t masked_ctrl_lo = lli->ctrl_lo & (DW_CTLL_LLP_D_EN | DW_CTLL_LLP_S_EN);
487 	uint32_t llp = 0;
488 
489 	if (masked_ctrl_lo) {
490 		llp = (uint32_t)lli;
491 		LOG_DBG("%s: Setting llp", dev->name);
492 	}
493 	dw_write(dev_cfg->base, DW_LLP(channel), llp);
494 	LOG_DBG("%s: ctrl_lo %x, masked ctrl_lo %x, LLP %x", dev->name,
495 		lli->ctrl_lo, masked_ctrl_lo, dw_read(dev_cfg->base, DW_LLP(channel)));
496 #endif /* CONFIG_DMA_DW_HW_LLI */
497 
498 	/* channel needs to start from scratch, so write SAR and DAR */
499 #ifdef CONFIG_DMA_64BIT
500 	dw_write(dev_cfg->base, DW_SAR(channel), (uint32_t)(lli->sar & DW_ADDR_MASK_32));
501 	dw_write(dev_cfg->base, DW_SAR_HI(channel), (uint32_t)(lli->sar >> DW_ADDR_RIGHT_SHIFT));
502 	dw_write(dev_cfg->base, DW_DAR(channel), (uint32_t)(lli->dar & DW_ADDR_MASK_32));
503 	dw_write(dev_cfg->base, DW_DAR_HI(channel), (uint32_t)(lli->dar >> DW_ADDR_RIGHT_SHIFT));
504 #else
505 	dw_write(dev_cfg->base, DW_SAR(channel), lli->sar);
506 	dw_write(dev_cfg->base, DW_DAR(channel), lli->dar);
507 #endif /* CONFIG_DMA_64BIT */
508 
509 	/* program CTL_LO and CTL_HI */
510 	dw_write(dev_cfg->base, DW_CTRL_LOW(channel), lli->ctrl_lo);
511 	dw_write(dev_cfg->base, DW_CTRL_HIGH(channel), lli->ctrl_hi);
512 
513 	/* program CFG_LO and CFG_HI */
514 	dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
515 	dw_write(dev_cfg->base, DW_CFG_HIGH(channel), chan_data->cfg_hi);
516 
517 #ifdef CONFIG_DMA_64BIT
518 	LOG_DBG("%s: sar %llx, dar %llx, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x",
519 		dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo,
520 		chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel))
521 		);
522 #else
523 	LOG_DBG("%s: sar %x, dar %x, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x",
524 		dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo,
525 		chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel))
526 		);
527 #endif /* CONFIG_DMA_64BIT */
528 
529 #ifdef CONFIG_DMA_DW_HW_LLI
530 	if (lli->ctrl_lo & DW_CTLL_D_SCAT_EN) {
531 		LOG_DBG("%s: configuring DW_DSR", dev->name);
532 		uint32_t words_per_tfr = (lli->ctrl_hi & DW_CTLH_BLOCK_TS_MASK) >>
533 			((lli->ctrl_lo & DW_CTLL_DST_WIDTH_MASK) >> DW_CTLL_DST_WIDTH_SHIFT);
534 		dw_write(dev_cfg->base, DW_DSR(channel),
535 			 DW_DSR_DSC(words_per_tfr) | DW_DSR_DSI(words_per_tfr));
536 	}
537 #endif /* CONFIG_DMA_DW_HW_LLI */
538 
539 	chan_data->state = DW_DMA_ACTIVE;
540 
541 	/* enable the channel */
542 	dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_UNMASK(channel));
543 	ret = pm_device_runtime_get(dev);
544 
545 out:
546 	return ret;
547 }
548 
dw_dma_stop(const struct device * dev,uint32_t channel)549 int dw_dma_stop(const struct device *dev, uint32_t channel)
550 {
551 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
552 	struct dw_dma_dev_data *dev_data = dev->data;
553 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
554 	enum pm_device_state pm_state;
555 	int ret = 0;
556 
557 	if (channel >= DW_CHAN_COUNT) {
558 		ret = -EINVAL;
559 		goto out;
560 	}
561 
562 	/*
563 	 * skip if device is not active. if we get an error for state_get,
564 	 * do not skip but check actual hardware state and stop if
565 	 * needed
566 	 */
567 	ret = pm_device_state_get(dev, &pm_state);
568 	if (!ret && pm_state != PM_DEVICE_STATE_ACTIVE)
569 		goto out;
570 
571 	if (!dw_dma_is_enabled(dev, channel) && chan_data->state != DW_DMA_SUSPENDED) {
572 		ret = 0;
573 		goto out;
574 	}
575 
576 #ifdef CONFIG_DMA_DW_HW_LLI
577 	struct dw_lli *lli = chan_data->lli;
578 	int i;
579 #endif
580 
581 	LOG_INF("%s: channel %d stop", dev->name, channel);
582 
583 	/* Validate the channel state */
584 	if (chan_data->state != DW_DMA_ACTIVE &&
585 	    chan_data->state != DW_DMA_SUSPENDED) {
586 		ret = -EINVAL;
587 		goto out;
588 	}
589 
590 #ifdef CONFIG_DMA_DW_SUSPEND_DRAIN
591 	/* channel cannot be disabled right away, so first we need to)
592 	 * suspend it and drain the FIFO
593 	 */
594 	dw_write(dev_cfg->base, DW_CFG_LOW(channel),
595 		 chan_data->cfg_lo | DW_CFGL_SUSPEND | DW_CFGL_DRAIN);
596 
597 	/* now we wait for FIFO to be empty */
598 	bool fifo_empty = WAIT_FOR(dw_read(dev_cfg->base, DW_CFG_LOW(channel)) & DW_CFGL_FIFO_EMPTY,
599 				DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10));
600 	if (!fifo_empty) {
601 		LOG_WRN("%s: channel %d drain time out", dev->name, channel);
602 
603 		/* Continue even if draining timed out to make sure that the channel is going to be
604 		 * disabled.
605 		 * The same channel might be requested for other purpose (or for same) next time
606 		 * which will fail if the channel has been left enabled.
607 		 */
608 	}
609 #endif
610 
611 	dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_MASK(channel));
612 
613 	/* now we wait for channel to be disabled */
614 	bool is_disabled = WAIT_FOR(!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel)),
615 				    DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10));
616 	if (!is_disabled) {
617 		LOG_ERR("%s: channel %d disable timeout", dev->name, channel);
618 		return -ETIMEDOUT;
619 	}
620 
621 #if CONFIG_DMA_DW_HW_LLI
622 	for (i = 0; i < chan_data->lli_count; i++) {
623 		lli->ctrl_hi &= ~DW_CTLH_DONE(1);
624 		lli++;
625 	}
626 #endif
627 	chan_data->state = DW_DMA_IDLE;
628 	ret = pm_device_runtime_put(dev);
629 out:
630 	return ret;
631 }
632 
dw_dma_resume(const struct device * dev,uint32_t channel)633 int dw_dma_resume(const struct device *dev, uint32_t channel)
634 {
635 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
636 	struct dw_dma_dev_data *dev_data = dev->data;
637 	int ret = 0;
638 
639 	/* Validate channel index */
640 	if (channel >= DW_CHAN_COUNT) {
641 		ret = -EINVAL;
642 		goto out;
643 	}
644 
645 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
646 
647 	/* Validate channel state */
648 	if (chan_data->state != DW_DMA_SUSPENDED) {
649 		ret = -EINVAL;
650 		goto out;
651 	}
652 
653 	LOG_DBG("%s: channel %d resume", dev->name, channel);
654 
655 	dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
656 
657 	/* Channel is now active */
658 	chan_data->state = DW_DMA_ACTIVE;
659 
660 out:
661 	return ret;
662 }
663 
dw_dma_suspend(const struct device * dev,uint32_t channel)664 int dw_dma_suspend(const struct device *dev, uint32_t channel)
665 {
666 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
667 	struct dw_dma_dev_data *dev_data = dev->data;
668 	int ret = 0;
669 
670 	/* Validate channel index */
671 	if (channel >= DW_CHAN_COUNT) {
672 		ret = -EINVAL;
673 		goto out;
674 	}
675 
676 	struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
677 
678 	/* Validate channel state */
679 	if (chan_data->state != DW_DMA_ACTIVE) {
680 		ret = -EINVAL;
681 		goto out;
682 	}
683 
684 
685 	LOG_DBG("%s: channel %d suspend", dev->name, channel);
686 
687 	dw_write(dev_cfg->base, DW_CFG_LOW(channel),
688 		      chan_data->cfg_lo | DW_CFGL_SUSPEND);
689 
690 	/* Channel is now suspended */
691 	chan_data->state = DW_DMA_SUSPENDED;
692 
693 
694 out:
695 	return ret;
696 }
697 
698 
dw_dma_setup(const struct device * dev)699 int dw_dma_setup(const struct device *dev)
700 {
701 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
702 
703 	int i, ret = 0;
704 
705 	/* we cannot config DMAC if DMAC has been already enabled by host */
706 	if (dw_read(dev_cfg->base, DW_DMA_CFG) != 0) {
707 		dw_write(dev_cfg->base, DW_DMA_CFG, 0x0);
708 	}
709 
710 	for (i = DW_DMA_CFG_TRIES; i > 0; i--) {
711 		if (!dw_read(dev_cfg->base, DW_DMA_CFG)) {
712 			break;
713 		}
714 	}
715 
716 	if (!i) {
717 		LOG_ERR("%s: setup failed", dev->name);
718 		ret = -EIO;
719 		goto out;
720 	}
721 
722 	LOG_DBG("%s: ENTER", dev->name);
723 
724 	for (i = 0; i <  DW_CHAN_COUNT; i++) {
725 		dw_read(dev_cfg->base, DW_DMA_CHAN_EN);
726 	}
727 
728 
729 	/* enable the DMA controller */
730 	dw_write(dev_cfg->base, DW_DMA_CFG, 1);
731 
732 	/* mask all interrupts for all 8 channels */
733 	dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_MASK_ALL);
734 	dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_MASK_ALL);
735 	dw_write(dev_cfg->base, DW_MASK_SRC_TRAN, DW_CHAN_MASK_ALL);
736 	dw_write(dev_cfg->base, DW_MASK_DST_TRAN, DW_CHAN_MASK_ALL);
737 	dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_MASK_ALL);
738 
739 #ifdef CONFIG_DMA_DW_FIFO_PARTITION
740 	/* allocate FIFO partitions for each channel */
741 	dw_write(dev_cfg->base, DW_FIFO_PART1_HI,
742 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
743 	dw_write(dev_cfg->base, DW_FIFO_PART1_LO,
744 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
745 	dw_write(dev_cfg->base, DW_FIFO_PART0_HI,
746 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
747 	dw_write(dev_cfg->base, DW_FIFO_PART0_LO,
748 		      DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE) |
749 		 DW_FIFO_UPD);
750 #endif /* CONFIG_DMA_DW_FIFO_PARTITION */
751 
752 	/* TODO add baytrail/cherrytrail workaround */
753 out:
754 	return ret;
755 }
756 
dw_dma_avail_data_size(const struct device * dev,uint32_t base,struct dw_dma_chan_data * chan_data,uint32_t channel)757 static int dw_dma_avail_data_size(const struct device *dev, uint32_t base,
758 				  struct dw_dma_chan_data *chan_data,
759 				  uint32_t channel)
760 {
761 	int32_t read_ptr = chan_data->ptr_data.current_ptr;
762 	int32_t write_ptr = dw_read(base, DW_DAR(channel));
763 	int32_t delta = write_ptr - chan_data->ptr_data.hw_ptr;
764 	int size;
765 
766 	chan_data->ptr_data.hw_ptr = write_ptr;
767 
768 	size = write_ptr - read_ptr;
769 
770 	if (size < 0) {
771 		size += chan_data->ptr_data.buffer_bytes;
772 	} else if (!size) {
773 		/*
774 		 * Buffer is either full or empty. If the DMA pointer has
775 		 * changed, then the DMA has filled the buffer.
776 		 */
777 		if (delta) {
778 			size = chan_data->ptr_data.buffer_bytes;
779 		} else {
780 			LOG_DBG("%s: channel %d: size is 0!", dev->name, channel);
781 		}
782 	}
783 
784 	LOG_DBG("%s: channel %d: DAR %x reader 0x%x free 0x%x avail 0x%x", dev->name, channel,
785 		write_ptr, read_ptr, chan_data->ptr_data.buffer_bytes - size, size);
786 
787 	return size;
788 }
789 
dw_dma_free_data_size(const struct device * dev,uint32_t base,struct dw_dma_chan_data * chan_data,uint32_t channel)790 static int dw_dma_free_data_size(const struct device *dev, uint32_t base,
791 				 struct dw_dma_chan_data *chan_data,
792 				 uint32_t channel)
793 {
794 	int32_t read_ptr = dw_read(base, DW_SAR(channel));
795 	int32_t write_ptr = chan_data->ptr_data.current_ptr;
796 	int32_t delta = read_ptr - chan_data->ptr_data.hw_ptr;
797 	int size;
798 
799 	chan_data->ptr_data.hw_ptr = read_ptr;
800 
801 	size = read_ptr - write_ptr;
802 	if (size < 0) {
803 		size += chan_data->ptr_data.buffer_bytes;
804 	} else if (!size) {
805 		/*
806 		 * Buffer is either full or empty. If the DMA pointer has
807 		 * changed, then the DMA has emptied the buffer.
808 		 */
809 		if (delta) {
810 			size = chan_data->ptr_data.buffer_bytes;
811 		} else {
812 			LOG_DBG("%s: channel %d: size is 0!", dev->name, channel);
813 		}
814 	}
815 
816 	LOG_DBG("%s: channel %d: SAR %x writer 0x%x free 0x%x avail 0x%x", dev->name, channel,
817 		read_ptr, write_ptr, size, chan_data->ptr_data.buffer_bytes - size);
818 
819 	return size;
820 }
821 
dw_dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)822 int dw_dma_get_status(const struct device *dev, uint32_t channel,
823 		      struct dma_status *stat)
824 {
825 	struct dw_dma_dev_data *const dev_data = dev->data;
826 	const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
827 	struct dw_dma_chan_data *chan_data;
828 
829 	if (channel >= DW_CHAN_COUNT) {
830 		return -EINVAL;
831 	}
832 
833 	chan_data = &dev_data->chan[channel];
834 
835 	if (chan_data->direction == MEMORY_TO_MEMORY ||
836 	    chan_data->direction == PERIPHERAL_TO_MEMORY) {
837 		stat->pending_length = dw_dma_avail_data_size(dev, dev_cfg->base, chan_data,
838 							      channel);
839 		stat->free = chan_data->ptr_data.buffer_bytes - stat->pending_length;
840 
841 	} else {
842 		stat->free = dw_dma_free_data_size(dev, dev_cfg->base, chan_data, channel);
843 		stat->pending_length = chan_data->ptr_data.buffer_bytes - stat->free;
844 	}
845 #if CONFIG_DMA_DW_HW_LLI
846 	if (!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel))) {
847 		LOG_ERR("%s: xrun detected", dev->name);
848 		return -EPIPE;
849 	}
850 #endif
851 	return 0;
852 }
853