1 /*
2  * Copyright (c) 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "sedi_dma_ann_1p0.h"
8 #include "sedi_driver_pm.h"
9 
10 #include "sedi_soc_regs.h"
11 #include <sedi_driver_dma.h>
12 #include <sedi_driver_ipc.h>
13 #include "sedi_driver_core.h"
14 
15 #define DMA_RS0 0       /* root space for common memory */
16 #define DMA_RS1 1       /* root space for csme memory */
17 #define DMA_RS3 3       /* root space for IMR memory */
18 #define DT_IS_IN_DRAM (1 << 0)
19 #define SR_IS_IN_DRAM (1 << 1)
20 
21 #define DMA_WRITE_ENABLE(i) (BIT(i) | BIT(DMA_CHANNEL_NUM + i))
22 #define DMA_WRITE_DISABLE(i) BIT(DMA_CHANNEL_NUM + i)
23 #define GET_MSB(data64) ((uint32_t)(data64 >> 32))
24 #define GET_LSB(data64) ((uint32_t)(data64))
25 
26 /*driver version*/
27 static const sedi_driver_version_t driver_version = { SEDI_DMA_API_VERSION,
28 						      SEDI_DMA_DRIVER_VERSION };
29 
30 typedef enum {
31 	SEDI_CONFIG_DMA_TRANS_TYPE = SEDI_CONFIG_DMA_CONTROL_ID_MAX,
32 	SEDI_CONFIG_DMA_LL_HEADER
33 } dma_inner_control_code;
34 
35 /*!
36  * DMA Transfer Type, inner usage
37  */
38 typedef enum {
39 	DMA_TYPE_SINGLE,                /**< Single block mode. */
40 	DMA_TYPE_MULTI_CONT,            /**< Contiguous multiblock mode. */
41 	DMA_TYPE_MULTI_LL,              /**< Link list multiblock mode. */
42 	DMA_TYPE_MULTI_LL_CIRCULAR,     /**< Link list multiblock mode with
43 					  cyclic operation. */
44 	DMA_TYPE_MAX
45 } dma_transfer_type_t;
46 
47 /* driver capabilities */
48 static sedi_dma_capabilities_t driver_capabilities[SEDI_DMA_NUM] = { 0 };
49 
50 /* channel config*/
51 typedef struct {
52 	uint8_t config_applied;
53 	uint8_t tf_mode;
54 	uint8_t sr_mem_type;
55 	uint8_t dt_mem_type;
56 	uint8_t sr_msb;
57 	uint8_t dt_msb;
58 	uint8_t burst_length;
59 	uint8_t sr_width;
60 	uint8_t dt_width;
61 	uint8_t direction;
62 	uint8_t handshake_polarity;
63 	uint8_t peripheral_direction;
64 	uint16_t handshake_device_id;
65 	dma_linked_list_item_t *linked_list_header;
66 } channel_config_t;
67 
68 /* dma runtime context information */
69 typedef struct {
70 	channel_config_t channel_config[DMA_CHANNEL_NUM];
71 	sedi_dma_event_cb_t cb_event[DMA_CHANNEL_NUM];  /*event callback*/
72 	void *cb_param[DMA_CHANNEL_NUM];                /*event callback*/
73 	sedi_dma_status_t status[DMA_CHANNEL_NUM];      /*status flags*/
74 	uint32_t done_byte[DMA_CHANNEL_NUM];            /*the transferred byte*/
75 	dma_linked_list_item_t *next_llp[DMA_CHANNEL_NUM];
76 	uint32_t flags[DMA_CHANNEL_NUM];            /*control and state flags*/
77 	uint8_t vnn_status;
78 	uint8_t power_status;
79 	/*other private runtime data*/
80 } dma_context_t;
81 
82 /*resources data structure*/
83 typedef struct {
84 	dma_ann_1p0_regs_t *regs; /* register interface*/
85 	/*other private data can go here,  DMA related information etc. */
86 } dma_resources_t;
87 
88 static dma_context_t dma_context[SEDI_DMA_NUM] = { 0 };
89 
90 static const dma_resources_t resources[SEDI_DMA_NUM] = {
91 	{ .regs = (dma_ann_1p0_regs_t *)SEDI_DMA_0_REG_BASE },
92 };
93 
sedi_dma_get_version(void)94 sedi_driver_version_t sedi_dma_get_version(void)
95 {
96 	return driver_version;
97 }
98 
sedi_dma_get_capabilities(IN sedi_dma_t dma_device,INOUT sedi_dma_capabilities_t * cap)99 int sedi_dma_get_capabilities(IN sedi_dma_t dma_device,
100 			      INOUT sedi_dma_capabilities_t *cap)
101 {
102 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
103 
104 	if (sedi_dev_is_self_owned(SEDI_DEVID_DMA0 + dma_device)) {
105 		driver_capabilities[dma_device].is_available = 1;
106 	} else {
107 		driver_capabilities[dma_device].is_available = 0;
108 	}
109 
110 	*cap = driver_capabilities[dma_device];
111 	return SEDI_DRIVER_OK;
112 }
113 
dma_vnn_req(sedi_dma_t dma_device,int channel_id)114 static inline void dma_vnn_req(sedi_dma_t dma_device, int channel_id)
115 {
116 	if (dma_context[dma_device].vnn_status == 0) {
117 		PM_VNN_DRIVER_REQ(VNN_ID_DMA0 + dma_device);
118 	}
119 	dma_context[dma_device].vnn_status |= BIT(channel_id);
120 }
121 
dma_vnn_dereq(sedi_dma_t dma_device,int channel_id)122 static inline void dma_vnn_dereq(sedi_dma_t dma_device, int channel_id)
123 {
124 	if (dma_context[dma_device].vnn_status & (BIT(channel_id))) {
125 		dma_context[dma_device].vnn_status &= (~BIT(channel_id));
126 	}
127 	if (dma_context[dma_device].vnn_status == 0) {
128 		PM_VNN_DRIVER_DEREQ(VNN_ID_DMA0 + dma_device);
129 	}
130 }
131 
dma_set_default_channel_config(OUT channel_config_t * config)132 static void dma_set_default_channel_config(OUT channel_config_t *config)
133 {
134 	config->tf_mode = DMA_TYPE_MAX;
135 	config->sr_mem_type = DMA_SRAM_MEM;
136 	config->dt_mem_type = DMA_SRAM_MEM;
137 	config->sr_msb = 0;
138 	config->dt_msb = 0;
139 	config->burst_length = DMA_BURST_TRANS_LENGTH_MAX;
140 	config->sr_width = DMA_TRANS_WIDTH_MAX;
141 	config->dt_width = DMA_TRANS_WIDTH_MAX;
142 	config->direction = DMA_DIRECTION_MAX;
143 	config->handshake_device_id = 0;
144 	config->linked_list_header = NULL;
145 	config->config_applied = 0;
146 }
147 
148 /*  mask  channel interrupt */
mask_channel_interrupt(IN sedi_dma_t dma_device,IN int channel_id)149 static void mask_channel_interrupt(IN sedi_dma_t dma_device, IN int channel_id)
150 {
151 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
152 
153 	regs->int_reg.mask_tfr_low = DMA_WRITE_DISABLE(channel_id);
154 	regs->int_reg.mask_block_low = DMA_WRITE_DISABLE(channel_id);
155 	regs->int_reg.mask_src_trans_low = DMA_WRITE_DISABLE(channel_id);
156 	regs->int_reg.mask_dst_trans_low = DMA_WRITE_DISABLE(channel_id);
157 	regs->int_reg.mask_err_low = DMA_WRITE_DISABLE(channel_id);
158 }
159 
160 /* clear channel interrupt */
clear_channel_interrupt(IN sedi_dma_t dma_device,IN int channel_id)161 static void clear_channel_interrupt(IN sedi_dma_t dma_device, IN int channel_id)
162 {
163 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
164 
165 	regs->int_reg.clear_tfr_low = BIT(channel_id);
166 	regs->int_reg.clear_block_low = BIT(channel_id);
167 	regs->int_reg.clear_src_trans_low = BIT(channel_id);
168 	regs->int_reg.clear_dst_trans_low = BIT(channel_id);
169 	regs->int_reg.clear_err_low = BIT(channel_id);
170 }
171 
172 /* enable channel interrupt */
unmask_channel_interrupt(IN sedi_dma_t dma_device,IN int channel_id)173 static void unmask_channel_interrupt(IN sedi_dma_t dma_device,
174 				     IN int channel_id)
175 {
176 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
177 
178 	regs->int_reg.mask_tfr_low = DMA_WRITE_ENABLE(channel_id);
179 	regs->int_reg.mask_err_low = DMA_WRITE_ENABLE(channel_id);
180 }
181 
sedi_dma_init(IN sedi_dma_t dma_device,IN int channel_id,IN sedi_dma_event_cb_t cb,INOUT void * param)182 int32_t sedi_dma_init(IN sedi_dma_t dma_device, IN int channel_id,
183 		      IN sedi_dma_event_cb_t cb, INOUT void *param)
184 {
185 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
186 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
187 
188 	/* init default config context */
189 	channel_config_t *config =
190 		&(dma_context[dma_device].channel_config[channel_id]);
191 	dma_set_default_channel_config(config);
192 	/*add callback*/
193 	dma_context[dma_device].cb_event[channel_id] = cb;
194 	dma_context[dma_device].cb_param[channel_id] = param;
195 
196 	mask_channel_interrupt(dma_device, channel_id);
197 	clear_channel_interrupt(dma_device, channel_id);
198 	dma_context[dma_device].status[channel_id].busy = 0;
199 	dma_context[dma_device].status[channel_id].bus_error = 0;
200 
201 	return SEDI_DRIVER_OK;
202 }
203 
sedi_dma_uninit(IN sedi_dma_t dma_device,IN int channel_id)204 int32_t sedi_dma_uninit(IN sedi_dma_t dma_device, IN int channel_id)
205 {
206 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
207 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
208 
209 	dma_context[dma_device].cb_event[channel_id] = NULL;
210 
211 	mask_channel_interrupt(dma_device, channel_id);
212 	dma_context[dma_device].status[channel_id].busy = 0;
213 	dma_context[dma_device].status[channel_id].bus_error = 0;
214 	return SEDI_DRIVER_OK;
215 }
216 
config_snoop(sedi_dma_t dma_device,int channel_id)217 static void config_snoop(sedi_dma_t dma_device, int channel_id)
218 {
219 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
220 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
221 	volatile dma_misc_regs_t *misc_regs = &(regs->dev_misc_reg);
222 
223 	/* snoop */
224 	misc_regs->dma_regaccess_chid = channel_id;
225 
226 	SET_BITS(chan_regs->cfg_low, WR_SNP_LOC, WR_SNP_LEN, 0);
227 	SET_BITS(chan_regs->cfg_low, RD_SNP_LOC, RD_SNP_LEN, 0);
228 	SET_BITS(chan_regs->cfg_low, RD_LLP_SNP_LOC, RD_LLP_SNP_LEN, 1);
229 	SET_BITS(chan_regs->cfg_low, RD_STAT_SNP_LOC, RD_STAT_SNP_LEN, 1);
230 	SET_BITS(chan_regs->cfg_low, WR_STAT_SNP_LOC, WR_STAT_SNP_LEN, 1);
231 	SET_BITS(chan_regs->cfg_low, WR_CTLHI_SNP_LOC, WR_CTLHI_SNP_LEN, 1);
232 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], RD_NON_SNOOP_LOC,
233 		 RD_NON_SNOOP_LEN, 1);
234 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], WR_NON_SNOOP_LOC,
235 		 WR_NON_SNOOP_LEN, 1);
236 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], NON_SNOOP_LOC,
237 		 NON_SNOOP_LEN, 1);
238 }
239 
sedi_dma_set_power(IN sedi_dma_t dma_device,IN int channel_id,IN sedi_power_state_t state)240 int32_t sedi_dma_set_power(IN sedi_dma_t dma_device, IN int channel_id,
241 			   IN sedi_power_state_t state)
242 {
243 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
244 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
245 
246 	sedi_devid_t devid = SEDI_DEVID_DMA0 + dma_device;
247 
248 	switch (state) {
249 	case SEDI_POWER_FULL:
250 		if (dma_context[dma_device].power_status == 0) {
251 			sedi_pm_set_device_power(devid, SEDI_POWER_FULL);
252 		}
253 		dma_context[dma_device].power_status |= BIT(channel_id);
254 		config_snoop(dma_device, channel_id);
255 		break;
256 	case SEDI_POWER_LOW:
257 	case SEDI_POWER_SUSPEND:
258 	case SEDI_POWER_FORCE_SUSPEND:
259 		sedi_dma_abort_transfer(dma_device, channel_id);
260 		if (dma_context[dma_device].power_status == 0) {
261 			break;
262 		}
263 		dma_context[dma_device].power_status &= (~BIT(channel_id));
264 		if (dma_context[dma_device].power_status == 0) {
265 			sedi_pm_set_device_power(devid, SEDI_POWER_SUSPEND);
266 		}
267 		break;
268 	case SEDI_POWER_OFF:
269 	default:
270 		return SEDI_DRIVER_ERROR_PARAMETER;
271 	}
272 	return SEDI_DRIVER_OK;
273 }
274 
sedi_dma_control_aux(IN sedi_dma_t dma_device,IN int channel_id,IN uint32_t control_id,IN uint32_t arg)275 static int32_t sedi_dma_control_aux(IN sedi_dma_t dma_device, IN int channel_id,
276 				    IN uint32_t control_id, IN uint32_t arg)
277 {
278 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
279 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
280 
281 	channel_config_t *config =
282 		&(dma_context[dma_device].channel_config[channel_id]);
283 
284 	switch (control_id) {
285 	case SEDI_CONFIG_DMA_TRANS_TYPE:
286 		DBG_CHECK(arg < DMA_TYPE_MAX, SEDI_DRIVER_ERROR_PARAMETER);
287 		config->tf_mode = arg;
288 		break;
289 	case SEDI_CONFIG_DMA_SR_MEM_TYPE:
290 		DBG_CHECK(arg < DMA_MEM_TYPE_MAX, SEDI_DRIVER_ERROR_PARAMETER);
291 		config->sr_mem_type = arg;
292 		break;
293 	case SEDI_CONFIG_DMA_DT_MEM_TYPE:
294 		DBG_CHECK(arg < DMA_MEM_TYPE_MAX, SEDI_DRIVER_ERROR_PARAMETER);
295 		config->dt_mem_type = arg;
296 		break;
297 	case SEDI_CONFIG_DMA_LL_SR_MSB:
298 		config->sr_msb = arg;
299 		break;
300 	case SEDI_CONFIG_DMA_LL_DT_MSB:
301 		config->dt_msb = arg;
302 		break;
303 	case SEDI_CONFIG_DMA_DIRECTION:
304 		DBG_CHECK(arg < DMA_DIRECTION_MAX, SEDI_DRIVER_ERROR_PARAMETER);
305 		config->direction = arg;
306 		break;
307 	case SEDI_CONFIG_DMA_BURST_LENGTH:
308 		DBG_CHECK(arg < DMA_BURST_TRANS_LENGTH_MAX,
309 			  SEDI_DRIVER_ERROR_PARAMETER);
310 		config->burst_length = arg;
311 		break;
312 	case SEDI_CONFIG_DMA_SR_TRANS_WIDTH:
313 		DBG_CHECK(arg < DMA_TRANS_WIDTH_MAX,
314 			  SEDI_DRIVER_ERROR_PARAMETER);
315 		config->sr_width = arg;
316 		break;
317 	case SEDI_CONFIG_DMA_DT_TRANS_WIDTH:
318 		DBG_CHECK(arg < DMA_TRANS_WIDTH_MAX,
319 			  SEDI_DRIVER_ERROR_PARAMETER);
320 		config->dt_width = arg;
321 		break;
322 	case SEDI_CONFIG_DMA_HS_DEVICE_ID:
323 		config->handshake_device_id = arg;
324 		break;
325 	case SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR:
326 		config->peripheral_direction = arg;
327 		break;
328 	case SEDI_CONFIG_DMA_HS_POLARITY:
329 		DBG_CHECK(arg < DMA_HS_PER_RTX_MAX,
330 			  SEDI_DRIVER_ERROR_PARAMETER);
331 		config->handshake_polarity = arg;
332 		break;
333 	case SEDI_CONFIG_DMA_LL_HEADER:
334 		DBG_CHECK((arg), SEDI_DRIVER_ERROR_PARAMETER);
335 		config->linked_list_header = (dma_linked_list_item_t *)arg;
336 		break;
337 	default:
338 		return SEDI_DRIVER_ERROR_PARAMETER;
339 	}
340 	config->config_applied = 0;
341 	return SEDI_DRIVER_OK;
342 }
343 
sedi_dma_control(IN sedi_dma_t dma_device,IN int channel_id,IN uint32_t control_id,IN uint32_t arg)344 int32_t sedi_dma_control(IN sedi_dma_t dma_device, IN int channel_id,
345 			 IN uint32_t control_id, IN uint32_t arg)
346 {
347 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
348 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
349 	DBG_CHECK(control_id < SEDI_CONFIG_DMA_CONTROL_ID_MAX,
350 		  SEDI_DRIVER_ERROR_PARAMETER);
351 
352 	return sedi_dma_control_aux(dma_device, channel_id, control_id, arg);
353 }
354 
sedi_dma_get_status(IN sedi_dma_t dma_device,IN int channel_id,OUT sedi_dma_status_t * status)355 int sedi_dma_get_status(IN sedi_dma_t dma_device, IN int channel_id,
356 			OUT sedi_dma_status_t *status)
357 {
358 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
359 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
360 
361 	*status = dma_context[dma_device].status[channel_id];
362 	return SEDI_DRIVER_OK;
363 }
364 
365 /*config misc and other registers, as an adding for ctrl register*/
dma_apply_other_regs(volatile dma_misc_regs_t * misc_regs,volatile dma_chan_reg_t * chan_regs,channel_config_t * config,int channel_id)366 static int32_t dma_apply_other_regs(volatile dma_misc_regs_t *misc_regs,
367 				    volatile dma_chan_reg_t *chan_regs,
368 				    channel_config_t *config, int channel_id)
369 {
370 	uint8_t sr_rs = DMA_RS0;
371 	uint8_t dt_rs = DMA_RS0;
372 	uint8_t dma_mem_trans_mode = 0;
373 
374 	/* peripheral related registers*/
375 	if (config->direction != DMA_MEMORY_TO_MEMORY) {
376 		/* config check */
377 		DBG_CHECK(config->peripheral_direction < DMA_HS_PER_RTX_MAX,
378 			  SEDI_DRIVER_ERROR_PARAMETER);
379 		DBG_CHECK(config->handshake_polarity < DMA_HS_POLARITY_MAX,
380 			  SEDI_DRIVER_ERROR_PARAMETER);
381 		/* hardware handshake only*/
382 		/* select peripheral rx/tx to link up with dma*/
383 		SET_BITS(misc_regs->dma_xbar_sel[channel_id], RX_TX_LOC,
384 			 RX_TX_LEN, config->peripheral_direction);
385 		/* select peripheral device to connect with dma req wire*/
386 		SET_BITS(misc_regs->dma_xbar_sel[channel_id], DEVID_LOC,
387 			 DEVID_LEN, config->handshake_device_id);
388 		/* set handshaking polarity */
389 		SET_BITS(chan_regs->cfg_low, DST_HS_POL_LOC, DST_HS_POL_LEN,
390 			 config->handshake_polarity);
391 		SET_BITS(chan_regs->cfg_low, SRC_HS_POL_LOC, SRC_HS_POL_LEN,
392 			 config->handshake_polarity);
393 		SET_BITS(chan_regs->cfg_low, HSHAKE_NP_WR_LOC, HSHAKE_NP_WR_LEN,
394 			 1U);
395 		/* fill channel id to DST/SRC_PER reg*/
396 		SET_BITS(chan_regs->cfg_high, SRC_PER_LOC, SRC_PER_LEN,
397 			 channel_id);
398 		SET_BITS(chan_regs->cfg_high, DST_PER_LOC, DST_PER_LEN,
399 			 channel_id);
400 	}
401 	/* memory type related registers config*/
402 	/* source is memory*/
403 	if ((config->direction == DMA_MEMORY_TO_PERIPHERAL) ||
404 	    (config->direction == DMA_MEMORY_TO_MEMORY)) {
405 		DBG_CHECK(config->sr_mem_type < DMA_MEM_TYPE_MAX,
406 			  SEDI_DRIVER_ERROR_PARAMETER);
407 		if (config->sr_mem_type == DMA_UMA_MEM) {
408 			sr_rs = DMA_RS3;
409 		}
410 		if (config->sr_mem_type != DMA_SRAM_MEM) {
411 			dma_mem_trans_mode |= SR_IS_IN_DRAM;
412 		}
413 	}
414 	/* destination is memory*/
415 	if ((config->direction == DMA_PERIPHERAL_TO_MEMORY) ||
416 	    (config->direction == DMA_MEMORY_TO_MEMORY)) {
417 		DBG_CHECK(config->dt_mem_type < DMA_MEM_TYPE_MAX,
418 			  SEDI_DRIVER_ERROR_PARAMETER);
419 		if (config->dt_mem_type == DMA_UMA_MEM) {
420 			dt_rs = DMA_RS3;
421 		}
422 		if (config->dt_mem_type != DMA_SRAM_MEM) {
423 			dma_mem_trans_mode |= DT_IS_IN_DRAM;
424 		}
425 	}
426 	/*set root space and memory type*/
427 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], WR_RS_LOC, WR_RS_LEN,
428 		 dt_rs);
429 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], RD_RS_LOC, RD_RS_LEN,
430 		 sr_rs);
431 	SET_BITS(misc_regs->dma_ctl_ch[channel_id], M2M_TYPE_LOC, M2M_TYPE_LEN,
432 		 dma_mem_trans_mode);
433 
434 	/* fill higher 32bit of 64bit addr */
435 	misc_regs->iosf_addr_fillin_dma_ch[channel_id] = config->sr_msb;
436 	misc_regs->iosf_dest_addr_fillin_dma_ch[channel_id] = config->dt_msb;
437 	return SEDI_DRIVER_OK;
438 }
439 
dma_channel_apply_config(IN sedi_dma_t dma_device,IN int channel_id)440 static int32_t dma_channel_apply_config(IN sedi_dma_t dma_device,
441 					IN int channel_id)
442 {
443 	int32_t ret;
444 	channel_config_t *config =
445 		&(dma_context[dma_device].channel_config[channel_id]);
446 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
447 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
448 	volatile dma_misc_regs_t *misc_regs = &(regs->dev_misc_reg);
449 
450 	/*return if no need to config*/
451 	if (config->config_applied == 1) {
452 		return SEDI_DRIVER_OK;
453 	}
454 
455 	DBG_CHECK(config->tf_mode < DMA_TYPE_MAX, SEDI_DRIVER_ERROR_PARAMETER);
456 	DBG_CHECK(config->direction < DMA_DIRECTION_MAX,
457 		  SEDI_DRIVER_ERROR_PARAMETER);
458 	misc_regs->dma_regaccess_chid = channel_id;
459 	if (config->tf_mode == DMA_TYPE_SINGLE) {
460 		/* single block mode config, mainly config ctrl_low reg*/
461 		DBG_CHECK(config->burst_length < DMA_BURST_TRANS_LENGTH_MAX,
462 			  SEDI_DRIVER_ERROR_PARAMETER);
463 		DBG_CHECK(config->sr_width < DMA_TRANS_WIDTH_MAX,
464 			  SEDI_DRIVER_ERROR_PARAMETER);
465 		DBG_CHECK(config->dt_width < DMA_TRANS_WIDTH_MAX,
466 			  SEDI_DRIVER_ERROR_PARAMETER);
467 		DBG_CHECK(config->peripheral_direction < DMA_HS_PER_RTX_MAX,
468 			  SEDI_DRIVER_ERROR_PARAMETER);
469 		/*set dest and src burst size */
470 		SET_BITS(chan_regs->ctrl_low, DEST_MSIZE_LOC, SRC_MSIZE_LEN,
471 			 config->burst_length);
472 		SET_BITS(chan_regs->ctrl_low, SRC_MSIZE_LOC, SRC_MSIZE_LEN,
473 			 config->burst_length);
474 		/*source and destination transfer width */
475 		SET_BITS(chan_regs->ctrl_low, DST_TR_WIDTH_LOC,
476 			 DST_TR_WIDTH_LEN, config->dt_width);
477 		SET_BITS(chan_regs->ctrl_low, SRC_TR_WIDTH_LOC,
478 			 SRC_TR_WIDTH_LEN, config->sr_width);
479 
480 		/*transfer  direction */
481 		SET_BITS(chan_regs->ctrl_low, TT_FC_LOC, TT_FC_LEN,
482 			 config->direction);
483 
484 		/* Set increment*/
485 		switch (config->direction) {
486 		case DMA_PERIPHERAL_TO_MEMORY:
487 			SET_BITS(chan_regs->ctrl_low, SINC_LOC, SINC_LEN,
488 				 DMA_INCREMENT_NO_CHANGE);
489 			SET_BITS(chan_regs->ctrl_low, DINC_LOC, DINC_LEN,
490 				 DMA_INCREMENT_INC);
491 			break;
492 		case DMA_MEMORY_TO_MEMORY:
493 			SET_BITS(chan_regs->ctrl_low, SINC_LOC, SINC_LEN,
494 				 DMA_INCREMENT_INC);
495 			SET_BITS(chan_regs->ctrl_low, DINC_LOC, DINC_LEN,
496 				 DMA_INCREMENT_INC);
497 			break;
498 		case DMA_MEMORY_TO_PERIPHERAL:
499 			SET_BITS(chan_regs->ctrl_low, DINC_LOC, DINC_LEN,
500 				 DMA_INCREMENT_NO_CHANGE);
501 			SET_BITS(chan_regs->ctrl_low, SINC_LOC, SINC_LEN,
502 				 DMA_INCREMENT_INC);
503 			break;
504 		case DMA_PERIPHERAL_TO_PERIPHERAL:
505 			SET_BITS(chan_regs->ctrl_low, DINC_LOC, DINC_LEN,
506 				 DMA_INCREMENT_NO_CHANGE);
507 			SET_BITS(chan_regs->ctrl_low, SINC_LOC, SINC_LEN,
508 				 DMA_INCREMENT_NO_CHANGE);
509 			break;
510 		default:
511 			return SEDI_DRIVER_ERROR_PARAMETER;
512 		}
513 		chan_regs->llp_low = 0;
514 	} else if (config->tf_mode == DMA_TYPE_MULTI_LL) {
515 		DBG_CHECK(config->linked_list_header != NULL,
516 			  SEDI_DRIVER_ERROR_PARAMETER);
517 		chan_regs->llp_low = (uint32_t)config->linked_list_header;
518 	}
519 
520 	/*config misc and other registers, as an adding for ctrl register*/
521 	ret = dma_apply_other_regs(misc_regs, chan_regs, config, channel_id);
522 	if (ret != SEDI_DRIVER_OK) {
523 		return ret;
524 	}
525 
526 	if ((config->sr_mem_type != DMA_SRAM_MEM) ||
527 	    (config->dt_mem_type != DMA_SRAM_MEM)) {
528 		dma_vnn_req(dma_device, channel_id);
529 	}
530 
531 	config->config_applied = 1;
532 	return SEDI_DRIVER_OK;
533 }
534 
dma_fill_linkedlist(INOUT dma_linked_list_item_t * ll_p,IN uint32_t src_addr,IN uint32_t dst_addr,IN uint32_t block_size,uint32_t ctrl_low,IN dma_linked_list_item_t * ll_p_next)535 int dma_fill_linkedlist(INOUT dma_linked_list_item_t *ll_p,
536 			IN uint32_t src_addr, IN uint32_t dst_addr,
537 			IN uint32_t block_size, uint32_t ctrl_low,
538 			IN dma_linked_list_item_t *ll_p_next)
539 {
540 	DBG_CHECK(ll_p != NULL, SEDI_DRIVER_ERROR_PARAMETER);
541 	DBG_CHECK(src_addr != 0, SEDI_DRIVER_ERROR_PARAMETER);
542 	DBG_CHECK(dst_addr != 0, SEDI_DRIVER_ERROR_PARAMETER);
543 	DBG_CHECK(block_size <= DMA_MAX_BLOCK_SIZE,
544 		  SEDI_DRIVER_ERROR_PARAMETER);
545 
546 	ll_p->src_addr = src_addr;
547 	ll_p->dst_addr = dst_addr;
548 	ll_p->ctrl_low.raw = ctrl_low;
549 	SET_BITS(ll_p->ctrl_high.raw, BLOCK_TS_LOC, BLOCK_TS_LEN, block_size);
550 	ll_p->next_ll_p = ll_p_next;
551 	return SEDI_DRIVER_OK;
552 }
553 
dma_fill_sc_linkedlist(INOUT dma_linked_list_item_t * llp,IN uint8_t count,IN uint32_t ctrl_reg_low,IN sc_attr_t * attr)554 int dma_fill_sc_linkedlist(INOUT dma_linked_list_item_t *llp,
555 			   IN uint8_t count, IN uint32_t ctrl_reg_low,
556 			   IN sc_attr_t *attr)
557 {
558 	DBG_CHECK(llp != NULL, SEDI_DRIVER_ERROR_PARAMETER);
559 	DBG_CHECK(attr != NULL, SEDI_DRIVER_ERROR_PARAMETER);
560 	DBG_CHECK(count > 0, SEDI_DRIVER_ERROR_PARAMETER);
561 
562 	int ret;
563 	dma_linked_list_item_t *ll_tmp = (dma_linked_list_item_t *)llp;
564 	uint32_t src = attr->src_addr;
565 	uint32_t dst = attr->dst_addr;
566 
567 	for (int i = 0; i < count - 1; i++) {
568 		ret = dma_fill_linkedlist(ll_tmp + i, src, dst,
569 					   attr->block_size,
570 					   ctrl_reg_low, ll_tmp + i + 1);
571 		if (ret != SEDI_DRIVER_OK) {
572 			return ret;
573 		}
574 		if (attr->is_scatter) {
575 			if (attr->need_reload == 0) {
576 				src += attr->block_size;
577 			}
578 			dst += (attr->interval);
579 		} else {
580 			if (attr->need_reload == 0) {
581 				dst += attr->block_size;
582 			}
583 			src += (attr->interval);
584 		}
585 	}
586 	ret = dma_fill_linkedlist(ll_tmp + count - 1, src, dst,
587 				  attr->block_size, ctrl_reg_low, NULL);
588 	return ret;
589 }
590 
sedi_dma_start_transfer_aux(sedi_dma_t dma_device,int channel_id,uint32_t sr_addr,uint32_t dest_addr,uint32_t length)591 static int32_t sedi_dma_start_transfer_aux(sedi_dma_t dma_device,
592 					   int channel_id, uint32_t sr_addr,
593 					   uint32_t dest_addr, uint32_t length)
594 {
595 	int32_t ret;
596 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
597 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
598 	channel_config_t *config =
599 		&(dma_context[dma_device].channel_config[channel_id]);
600 
601 	if (regs->misc_reg.chan_en_low & BIT(channel_id)) {
602 		return SEDI_DRIVER_ERROR_BUSY;
603 	}
604 	/* channel config*/
605 	ret = dma_channel_apply_config(dma_device, channel_id);
606 	if (ret != SEDI_DRIVER_OK) {
607 		dma_context[dma_device].status[channel_id].busy = 0;
608 		return ret;
609 	}
610 	if (config->tf_mode == DMA_TYPE_SINGLE) {
611 		chan_regs->sar_low = sr_addr;
612 		chan_regs->dar_low = dest_addr;
613 		SET_BITS(chan_regs->ctrl_high, BLOCK_TS_LOC, BLOCK_TS_LEN,
614 			 length);
615 		SET_BITS(chan_regs->ctrl_low, INT_EN_LOC, INT_EN_LEN, 1);
616 	} else if (config->tf_mode == DMA_TYPE_MULTI_LL) {
617 		SET_BITS(chan_regs->ctrl_low, LLP_DST_EN_LOC, LLP_DST_EN_LEN,
618 			 1);
619 		SET_BITS(chan_regs->ctrl_low, LLP_SRC_EN_LOC, LLP_SRC_EN_LEN,
620 			 1);
621 	}
622 
623 	dma_set_default_channel_config(config);
624 	/* enable interrupt */
625 	unmask_channel_interrupt(dma_device, channel_id);
626 
627 	/* enable channel*/
628 	regs->misc_reg.cfg_low = 1;
629 	SET_BITS(chan_regs->cfg_low, CH_DRAIN_LOC, CH_DRAIN_LEN, 0);
630 	SET_BITS(chan_regs->cfg_low, CH_SUSP_LOC, CH_SUSP_LEN, 0);
631 	regs->misc_reg.chan_en_low = DMA_WRITE_ENABLE(channel_id);
632 	return SEDI_DRIVER_OK;
633 }
634 
sedi_dma_start_transfer(IN sedi_dma_t dma_device,IN int channel_id,IN uint64_t sr_addr,IN uint64_t dest_addr,IN uint32_t length)635 int32_t sedi_dma_start_transfer(IN sedi_dma_t dma_device, IN int channel_id,
636 				IN uint64_t sr_addr, IN uint64_t dest_addr,
637 				IN uint32_t length)
638 {
639 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
640 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
641 	DBG_CHECK(sr_addr > 0, SEDI_DRIVER_ERROR_PARAMETER);
642 	DBG_CHECK(dest_addr > 0, SEDI_DRIVER_ERROR_PARAMETER);
643 	DBG_CHECK((length <= DMA_MAX_BLOCK_SIZE) && (length > 0),
644 		  SEDI_DRIVER_ERROR_PARAMETER);
645 
646 	if (dma_context[dma_device].status[channel_id].busy == 1) {
647 		return SEDI_DRIVER_ERROR_BUSY;
648 	}
649 	dma_context[dma_device].status[channel_id].busy = 1;
650 	dma_context[dma_device].status[channel_id].bus_error = 0;
651 
652 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_TRANS_TYPE,
653 			     DMA_TYPE_SINGLE);
654 	/* pass higher 32 bit of address*/
655 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_LL_SR_MSB,
656 			     GET_MSB(sr_addr));
657 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_LL_DT_MSB,
658 			     GET_MSB(dest_addr));
659 
660 	return sedi_dma_start_transfer_aux(dma_device, channel_id,
661 					   GET_LSB(sr_addr), GET_LSB(dest_addr),
662 					   length);
663 }
664 
sedi_dma_start_ll_transfer(IN sedi_dma_t dma_device,IN int channel_id,IN dma_linked_list_item_t * linkedlist_header)665 int32_t sedi_dma_start_ll_transfer(IN sedi_dma_t dma_device, IN int channel_id,
666 				   IN dma_linked_list_item_t *linkedlist_header)
667 {
668 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
669 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
670 	DBG_CHECK(linkedlist_header != NULL, SEDI_DRIVER_ERROR_PARAMETER);
671 	int32_t ret;
672 
673 	if (dma_context[dma_device].status[channel_id].busy == 1) {
674 		return SEDI_DRIVER_ERROR_BUSY;
675 	}
676 	dma_context[dma_device].status[channel_id].busy = 1;
677 	dma_context[dma_device].status[channel_id].bus_error = 0;
678 	/* channel config*/
679 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_TRANS_TYPE,
680 			     DMA_TYPE_MULTI_LL);
681 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_LL_HEADER,
682 			     (uint32_t)linkedlist_header);
683 
684 	ret = sedi_dma_start_transfer_aux(dma_device, channel_id, 0, 0, 0);
685 
686 	return ret;
687 }
688 
dma_transfer_post(sedi_dma_t dma_device,int channel_id)689 static void dma_transfer_post(sedi_dma_t dma_device, int channel_id)
690 {
691 	channel_config_t *config =
692 		&(dma_context[dma_device].channel_config[channel_id]);
693 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
694 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
695 
696 	/* get status*/
697 	dma_context[dma_device].done_byte[channel_id] =
698 		GET_BITS(chan_regs->ctrl_high, BLOCK_TS_LOC, BLOCK_TS_LEN);
699 	dma_context[dma_device].next_llp[channel_id] =
700 		(dma_linked_list_item_t *)chan_regs->llp_low;
701 
702 	/* disable dma channel*/
703 	regs->misc_reg.chan_en_low = DMA_WRITE_DISABLE(channel_id);
704 	while (regs->misc_reg.chan_en_low & BIT(channel_id)) {
705 	}
706 
707 	/* mask and clear interrupt*/
708 	clear_channel_interrupt(dma_device, channel_id);
709 	mask_channel_interrupt(dma_device, channel_id);
710 
711 	dma_vnn_dereq(dma_device, channel_id);
712 	dma_set_default_channel_config(config);
713 	dma_context[dma_device].status[channel_id].busy = 0;
714 }
715 
716 /* Polling mode is only used in single-block mode */
sedi_dma_start_transfer_polling(IN sedi_dma_t dma_device,IN int channel_id,IN uint64_t sr_addr,IN uint64_t dest_addr,IN uint32_t length)717 int32_t sedi_dma_start_transfer_polling(IN sedi_dma_t dma_device,
718 					IN int channel_id, IN uint64_t sr_addr,
719 					IN uint64_t dest_addr,
720 					IN uint32_t length)
721 {
722 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
723 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
724 	DBG_CHECK(length <= DMA_MAX_BLOCK_SIZE, SEDI_DRIVER_ERROR_PARAMETER);
725 
726 	uint32_t ret;
727 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
728 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
729 	sedi_dma_event_cb_t cb = dma_context[dma_device].cb_event[channel_id];
730 	void *usr_param = dma_context[dma_device].cb_param[channel_id];
731 
732 	if (dma_context[dma_device].status[channel_id].busy == 1) {
733 		return SEDI_DRIVER_ERROR_BUSY;
734 	}
735 	dma_context[dma_device].status[channel_id].busy = 1;
736 	dma_context[dma_device].status[channel_id].bus_error = 0;
737 
738 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_TRANS_TYPE,
739 			     DMA_TYPE_SINGLE);
740 
741 	/* pass higher 32 bit of address*/
742 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_LL_SR_MSB,
743 			     GET_MSB(sr_addr));
744 	sedi_dma_control_aux(dma_device, channel_id, SEDI_CONFIG_DMA_LL_DT_MSB,
745 			     GET_MSB(dest_addr));
746 
747 	ret = dma_channel_apply_config(dma_device, channel_id);
748 	if (ret != SEDI_DRIVER_OK) {
749 		dma_context[dma_device].status[channel_id].busy = 0;
750 		return ret;
751 	}
752 
753 	chan_regs->sar_low = GET_LSB(sr_addr);
754 	chan_regs->dar_low = GET_LSB(dest_addr);
755 	SET_BITS(chan_regs->ctrl_high, BLOCK_TS_LOC, BLOCK_TS_LEN, length);
756 
757 	/* disable and clear interrupt */
758 	mask_channel_interrupt(dma_device, channel_id);
759 	clear_channel_interrupt(dma_device, channel_id);
760 
761 	/* enable channel*/
762 	regs->misc_reg.cfg_low = 1;
763 	regs->misc_reg.chan_en_low = DMA_WRITE_ENABLE(channel_id);
764 
765 	while (1) {
766 		if (regs->misc_reg.chan_en_low & BIT(channel_id)) {
767 			continue;
768 		}
769 		if (regs->int_reg.raw_err_low & BIT(channel_id)) {
770 			dma_transfer_post(dma_device, channel_id);
771 			dma_context[dma_device].status[channel_id].bus_error =
772 				1;
773 			if (cb != NULL) {
774 				cb(dma_device, channel_id,
775 				   SEDI_DMA_EVENT_BUS_ERROR, usr_param);
776 			}
777 			return SEDI_DRIVER_ERROR_TRANSFER;
778 		} else {
779 			dma_transfer_post(dma_device, channel_id);
780 			if (cb != NULL) {
781 				cb(dma_device, channel_id,
782 				   SEDI_DMA_EVENT_TRANSFER_DONE, usr_param);
783 			}
784 			return SEDI_DRIVER_OK;
785 		}
786 	}
787 	return SEDI_DRIVER_OK;
788 }
789 
sedi_dma_abort_transfer(IN sedi_dma_t dma_device,IN int channel_id)790 int32_t sedi_dma_abort_transfer(IN sedi_dma_t dma_device, IN int channel_id)
791 {
792 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
793 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
794 
795 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
796 	volatile dma_chan_reg_t *chan_regs = &(regs->chan_reg[channel_id]);
797 
798 	if (dma_context[dma_device].status[channel_id].busy == 0) {
799 		return SEDI_DRIVER_OK;
800 	}
801 
802 	SET_BITS(chan_regs->cfg_low, CH_SUSP_LOC, CH_SUSP_LEN, 1);
803 	SET_BITS(chan_regs->cfg_low, CH_DRAIN_LOC, CH_DRAIN_LEN, 1);
804 
805 	while ((chan_regs->cfg_low & BIT(FIFO_EMPTY_LOC)) == 0) {
806 	}
807 
808 	dma_transfer_post(dma_device, channel_id);
809 	return SEDI_DRIVER_OK;
810 }
811 
sedi_dma_get_done_status(IN sedi_dma_t dma_device,IN int channel_id,OUT uint32_t * done_bytes,OUT dma_linked_list_item_t ** next_llp)812 int32_t sedi_dma_get_done_status(IN sedi_dma_t dma_device, IN int channel_id,
813 				 OUT uint32_t *done_bytes,
814 				 OUT dma_linked_list_item_t **next_llp)
815 {
816 	DBG_CHECK(dma_device < SEDI_DMA_NUM, SEDI_DRIVER_ERROR_PARAMETER);
817 	DBG_CHECK(channel_id < DMA_CHANNEL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
818 
819 	if (dma_context[dma_device].status[channel_id].busy == 1) {
820 		return SEDI_DRIVER_ERROR_BUSY;
821 	}
822 
823 	if (done_bytes) {
824 		*done_bytes = dma_context[dma_device].done_byte[channel_id];
825 	}
826 	if (next_llp) {
827 		*next_llp = dma_context[dma_device].next_llp[channel_id];
828 	}
829 	return SEDI_DRIVER_OK;
830 }
831 
dma_isr(IN sedi_dma_t dma_device)832 void dma_isr(IN sedi_dma_t dma_device)
833 {
834 	volatile dma_ann_1p0_regs_t *regs = resources[dma_device].regs;
835 	sedi_dma_event_cb_t cb;
836 	void *usr_param;
837 	uint32_t tfr_status = regs->int_reg.status_tfr_low;
838 	uint32_t err_status = regs->int_reg.status_err_low;
839 
840 	for (int channel_id = 0; channel_id < DMA_CHANNEL_NUM; channel_id++) {
841 		cb = dma_context[dma_device].cb_event[channel_id];
842 		usr_param = dma_context[dma_device].cb_param[channel_id];
843 		if (tfr_status & BIT(channel_id)) {
844 			dma_transfer_post(dma_device, channel_id);
845 			if (cb != NULL) {
846 				cb(dma_device, channel_id,
847 				   SEDI_DMA_EVENT_TRANSFER_DONE, usr_param);
848 			}
849 		}
850 		if (err_status & BIT(channel_id)) {
851 			dma_transfer_post(dma_device, channel_id);
852 			dma_context[dma_device].status[channel_id].bus_error =
853 				1;
854 			if (cb != NULL) {
855 				cb(dma_device, channel_id,
856 				   SEDI_DMA_EVENT_BUS_ERROR, usr_param);
857 			}
858 		}
859 	}
860 }
861