1 /*
2  * Copyright (c) 2023 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT intel_sedi_dma
8 
9 #include <errno.h>
10 #include <stdio.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/pm/device.h>
13 #include <string.h>
14 #include <zephyr/init.h>
15 #include <zephyr/drivers/dma.h>
16 #include <zephyr/devicetree.h>
17 #include <zephyr/cache.h>
18 #include <soc.h>
19 
20 #include "sedi_driver_dma.h"
21 #include "sedi_driver_core.h"
22 
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(sedi_dma, CONFIG_DMA_LOG_LEVEL);
25 
26 extern void dma_isr(sedi_dma_t dma_device);
27 
28 struct dma_sedi_config_info {
29 	sedi_dma_t peripheral_id; /* Controller instance. */
30 	uint8_t chn_num;
31 	void (*irq_config)(void);
32 };
33 
34 struct dma_sedi_driver_data {
35 	struct dma_config dma_configs[DMA_CHANNEL_NUM];
36 };
37 
38 #define DEV_DATA(dev) ((struct dma_sedi_driver_data *const)(dev)->data)
39 #define DEV_CFG(dev) \
40 	((const struct dma_sedi_config_info *const)(dev)->config)
41 
42 /*
43  * this function will be called when dma transferring is completed
44  * or error happened
45  */
dma_handler(sedi_dma_t dma_device,int channel,int event_id,void * args)46 static void dma_handler(sedi_dma_t dma_device, int channel, int event_id,
47 			void *args)
48 {
49 	ARG_UNUSED(args);
50 	const struct device *dev = (const struct device *)args;
51 	struct dma_sedi_driver_data *const data = DEV_DATA(dev);
52 	struct dma_config *config = &(data->dma_configs[channel]);
53 
54 	/* run user-defined callback */
55 	if (config->dma_callback) {
56 		if ((event_id == SEDI_DMA_EVENT_TRANSFER_DONE) &&
57 		    (config->complete_callback_en)) {
58 			config->dma_callback(dev, config->user_data,
59 					channel, 0);
60 		} else if (!config->error_callback_dis) {
61 			config->dma_callback(dev, config->user_data,
62 					channel, event_id);
63 		}
64 	}
65 }
66 
67 /* map width to certain macros*/
width_index(uint32_t num_bytes,uint32_t * index)68 static int width_index(uint32_t num_bytes, uint32_t *index)
69 {
70 	switch (num_bytes) {
71 	case 1:
72 		*index = DMA_TRANS_WIDTH_8;
73 		break;
74 	case 2:
75 		*index = DMA_TRANS_WIDTH_16;
76 		break;
77 	case 4:
78 		*index = DMA_TRANS_WIDTH_32;
79 		break;
80 	case 8:
81 		*index = DMA_TRANS_WIDTH_64;
82 		break;
83 	case 16:
84 		*index = DMA_TRANS_WIDTH_128;
85 		break;
86 	case 32:
87 		*index = DMA_TRANS_WIDTH_256;
88 		break;
89 	default:
90 		return -ENOTSUP;
91 	}
92 
93 	return 0;
94 }
95 
96 /* map burst size to certain macros*/
burst_index(uint32_t num_units,uint32_t * index)97 static int burst_index(uint32_t num_units, uint32_t *index)
98 {
99 	switch (num_units) {
100 	case 1:
101 		*index = DMA_BURST_TRANS_LENGTH_1;
102 		break;
103 	case 4:
104 		*index = DMA_BURST_TRANS_LENGTH_4;
105 		break;
106 	case 8:
107 		*index = DMA_BURST_TRANS_LENGTH_8;
108 		break;
109 	case 16:
110 		*index = DMA_BURST_TRANS_LENGTH_16;
111 		break;
112 	case 32:
113 		*index = DMA_BURST_TRANS_LENGTH_32;
114 		break;
115 	case 64:
116 		*index = DMA_BURST_TRANS_LENGTH_64;
117 		break;
118 	case 128:
119 		*index = DMA_BURST_TRANS_LENGTH_128;
120 		break;
121 	case 256:
122 		*index = DMA_BURST_TRANS_LENGTH_256;
123 		break;
124 	default:
125 		return -ENOTSUP;
126 	}
127 
128 	return 0;
129 }
130 
dma_config_convert(struct dma_config * config,dma_memory_type_t * src_mem,dma_memory_type_t * dst_mem,uint8_t * sedi_dma_dir)131 static void dma_config_convert(struct dma_config *config,
132 			       dma_memory_type_t *src_mem,
133 			       dma_memory_type_t *dst_mem,
134 			       uint8_t *sedi_dma_dir)
135 {
136 
137 	*src_mem = DMA_SRAM_MEM;
138 	*dst_mem = DMA_SRAM_MEM;
139 	*sedi_dma_dir = MEMORY_TO_MEMORY;
140 	switch (config->channel_direction) {
141 	case MEMORY_TO_MEMORY:
142 	case MEMORY_TO_PERIPHERAL:
143 	case PERIPHERAL_TO_MEMORY:
144 	case PERIPHERAL_TO_PERIPHERAL:
145 		*sedi_dma_dir = config->channel_direction;
146 		break;
147 	case MEMORY_TO_HOST:
148 		*dst_mem = DMA_DRAM_MEM;
149 		break;
150 	case HOST_TO_MEMORY:
151 		*src_mem = DMA_DRAM_MEM;
152 		break;
153 #ifdef MEMORY_TO_IMR
154 	case MEMORY_TO_IMR:
155 		*dst_mem = DMA_UMA_MEM;
156 		break;
157 #endif
158 #ifdef IMR_TO_MEMORY
159 	case IMR_TO_MEMORY:
160 		*src_mem = DMA_UMA_MEM;
161 		break;
162 #endif
163 	}
164 }
165 
166 /* config basic dma */
dma_sedi_apply_common_config(sedi_dma_t dev,uint32_t channel,struct dma_config * config,uint8_t * dir)167 static int dma_sedi_apply_common_config(sedi_dma_t dev, uint32_t channel,
168 					struct dma_config *config, uint8_t *dir)
169 {
170 	uint8_t direction = MEMORY_TO_MEMORY;
171 	dma_memory_type_t src_mem = DMA_SRAM_MEM, dst_mem = DMA_SRAM_MEM;
172 
173 	dma_config_convert(config, &src_mem, &dst_mem, &direction);
174 
175 	if (dir) {
176 		*dir = direction;
177 	}
178 
179 	/* configure dma transferring direction*/
180 	sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DIRECTION,
181 			 direction);
182 
183 	if (direction == MEMORY_TO_MEMORY) {
184 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_SR_MEM_TYPE,
185 				 src_mem);
186 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DT_MEM_TYPE,
187 				 dst_mem);
188 	} else if (direction == MEMORY_TO_PERIPHERAL) {
189 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID,
190 				 config->dma_slot);
191 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_POLARITY,
192 				 DMA_HS_POLARITY_HIGH);
193 		sedi_dma_control(dev, channel,
194 				 SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR,
195 				 DMA_HS_PER_TX);
196 	} else if (direction == PERIPHERAL_TO_MEMORY) {
197 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID,
198 				 config->dma_slot);
199 		sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_POLARITY,
200 				 DMA_HS_POLARITY_HIGH);
201 		sedi_dma_control(dev, channel,
202 				 SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR,
203 				 DMA_HS_PER_RX);
204 	} else {
205 		return -1;
206 	}
207 	return 0;
208 }
209 
dma_sedi_apply_single_config(sedi_dma_t dev,uint32_t channel,struct dma_config * config)210 static int dma_sedi_apply_single_config(sedi_dma_t dev, uint32_t channel,
211 					struct dma_config *config)
212 {
213 	int ret = 0;
214 	uint32_t temp = 0;
215 
216 	ret = dma_sedi_apply_common_config(dev, channel, config, NULL);
217 	if (ret != 0) {
218 		goto INVALID_ARGS;
219 	}
220 	/* configurate dma width of source data*/
221 	ret = width_index(config->source_data_size, &temp);
222 	if (ret != 0) {
223 		goto INVALID_ARGS;
224 	}
225 	sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_SR_TRANS_WIDTH, temp);
226 
227 	/* configurate dma width of destination data*/
228 	ret = width_index(config->dest_data_size, &temp);
229 	if (ret != 0) {
230 		goto INVALID_ARGS;
231 	}
232 	sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DT_TRANS_WIDTH, temp);
233 
234 	/* configurate dma burst size*/
235 	ret = burst_index(config->source_burst_length, &temp);
236 	if (ret != 0) {
237 		goto INVALID_ARGS;
238 	}
239 	sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_BURST_LENGTH, temp);
240 	return 0;
241 
242 INVALID_ARGS:
243 	return ret;
244 }
245 
dma_sedi_chan_config(const struct device * dev,uint32_t channel,struct dma_config * config)246 static int dma_sedi_chan_config(const struct device *dev, uint32_t channel,
247 				struct dma_config *config)
248 {
249 	if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num)
250 		|| (config == NULL)
251 		|| (config->block_count != 1)) {
252 		goto INVALID_ARGS;
253 	}
254 
255 	const struct dma_sedi_config_info *const info = DEV_CFG(dev);
256 	struct dma_sedi_driver_data *const data = DEV_DATA(dev);
257 
258 	memcpy(&(data->dma_configs[channel]), config, sizeof(struct dma_config));
259 
260 	/* initialize the dma controller, following the sedi api*/
261 	sedi_dma_event_cb_t cb = dma_handler;
262 
263 	sedi_dma_init(info->peripheral_id, (int)channel, cb, (void *)dev);
264 
265 	return 0;
266 
267 INVALID_ARGS:
268 	return -1;
269 }
270 
dma_sedi_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)271 static int dma_sedi_reload(const struct device *dev, uint32_t channel,
272 			      uint64_t src, uint64_t dst, size_t size)
273 {
274 	if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num)) {
275 		LOG_ERR("dma reload failed for invalid args");
276 		return -ENOTSUP;
277 	}
278 
279 	int ret = 0;
280 	struct dma_sedi_driver_data *const data = DEV_DATA(dev);
281 	struct dma_config *config = &(data->dma_configs[channel]);
282 	struct dma_block_config *block_config;
283 
284 	if ((config == NULL) || (config->head_block == NULL)) {
285 		LOG_ERR("dma reload failed, no config found");
286 		return -ENOTSUP;
287 	}
288 	block_config = config->head_block;
289 
290 	if ((config->block_count == 1) || (block_config->next_block == NULL)) {
291 		block_config->source_address = src;
292 		block_config->dest_address = dst;
293 		block_config->block_size = size;
294 	} else {
295 		LOG_ERR("no reload support for multi-linkedlist mode");
296 		return -ENOTSUP;
297 	}
298 	return ret;
299 }
300 
dma_sedi_start(const struct device * dev,uint32_t channel)301 static int dma_sedi_start(const struct device *dev, uint32_t channel)
302 {
303 	if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num)) {
304 		LOG_ERR("dma transferring failed for invalid args");
305 		return -ENOTSUP;
306 	}
307 
308 	int ret = -1;
309 	const struct dma_sedi_config_info *const info = DEV_CFG(dev);
310 	struct dma_sedi_driver_data *const data = DEV_DATA(dev);
311 	struct dma_config *config = &(data->dma_configs[channel]);
312 	struct dma_block_config *block_config = config->head_block;
313 	uint64_t src_addr, dst_addr;
314 
315 	if (config->block_count == 1) {
316 		/* call sedi start function */
317 		ret = dma_sedi_apply_single_config(info->peripheral_id,
318 						   channel, config);
319 		if (ret) {
320 			goto ERR;
321 		}
322 		src_addr = block_config->source_address;
323 		dst_addr = block_config->dest_address;
324 
325 		ret = sedi_dma_start_transfer(info->peripheral_id, channel,
326 						src_addr, dst_addr, block_config->block_size);
327 	} else {
328 		LOG_ERR("MULTIPLE_BLOCK CONFIG is not set");
329 		goto ERR;
330 	}
331 
332 	if (ret != SEDI_DRIVER_OK) {
333 		goto ERR;
334 	}
335 
336 	return ret;
337 
338 ERR:
339 	LOG_ERR("dma transfer failed");
340 	return ret;
341 }
342 
dma_sedi_stop(const struct device * dev,uint32_t channel)343 static int dma_sedi_stop(const struct device *dev, uint32_t channel)
344 {
345 	const struct dma_sedi_config_info *const info = DEV_CFG(dev);
346 
347 	LOG_DBG("stopping dma: %p, %d", dev, channel);
348 	sedi_dma_abort_transfer(info->peripheral_id, channel);
349 
350 	return 0;
351 }
352 
353 static DEVICE_API(dma, dma_funcs) = {
354 	.config = dma_sedi_chan_config,
355 	.start = dma_sedi_start,
356 	.stop = dma_sedi_stop,
357 	.reload = dma_sedi_reload,
358 	.get_status = NULL,
359 };
360 
dma_sedi_init(const struct device * dev)361 static int dma_sedi_init(const struct device *dev)
362 {
363 	const struct dma_sedi_config_info *const config = DEV_CFG(dev);
364 
365 	config->irq_config();
366 
367 	return 0;
368 }
369 
370 #define DMA_DEVICE_INIT_SEDI(inst) \
371 	static void dma_sedi_##inst##_irq_config(void);			\
372 									\
373 	static struct dma_sedi_driver_data dma_sedi_dev_data_##inst; \
374 	static const struct dma_sedi_config_info dma_sedi_config_data_##inst = { \
375 		.peripheral_id = DT_INST_PROP(inst, peripheral_id), \
376 		.chn_num = DT_INST_PROP(inst, dma_channels), \
377 		.irq_config = dma_sedi_##inst##_irq_config \
378 	}; \
379 	DEVICE_DT_DEFINE(DT_INST(inst, DT_DRV_COMPAT), &dma_sedi_init, \
380 	      NULL, &dma_sedi_dev_data_##inst, &dma_sedi_config_data_##inst, PRE_KERNEL_2, \
381 	      CONFIG_KERNEL_INIT_PRIORITY_DEVICE, (void *)&dma_funcs); \
382 									\
383 	static void dma_sedi_##inst##_irq_config(void)			\
384 	{								\
385 		IRQ_CONNECT(DT_INST_IRQN(inst),				\
386 			    DT_INST_IRQ(inst, priority), dma_isr,	\
387 			    (void *)DT_INST_PROP(inst, peripheral_id),			\
388 			    DT_INST_IRQ(inst, sense));			\
389 		irq_enable(DT_INST_IRQN(inst));				\
390 	}
391 
392 DT_INST_FOREACH_STATUS_OKAY(DMA_DEVICE_INIT_SEDI)
393