1 /*
2  * Copyright (c) 2023 Renesas Electronics Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/device.h>
9 #include <zephyr/drivers/dma.h>
10 #include <zephyr/irq.h>
11 #include <DA1469xAB.h>
12 #include <da1469x_pd.h>
13 #include <da1469x_config.h>
14 #include <system_DA1469x.h>
15 #include <da1469x_otp.h>
16 #include <zephyr/drivers/dma/dma_smartbond.h>
17 #include <zephyr/pm/device.h>
18 #include <zephyr/pm/policy.h>
19 #include <zephyr/logging/log.h>
20 
21 LOG_MODULE_REGISTER(dma_smartbond, CONFIG_DMA_LOG_LEVEL);
22 
23 #define DT_DRV_COMPAT renesas_smartbond_dma
24 
25 #define SMARTBOND_IRQN      DT_INST_IRQN(0)
26 #define SMARTBOND_IRQ_PRIO  DT_INST_IRQ(0, priority)
27 
28 #define DMA_CHANNELS_COUNT   DT_PROP(DT_NODELABEL(dma), dma_channels)
29 #define DMA_BLOCK_COUNT     DT_PROP(DT_NODELABEL(dma), block_count)
30 #define DMA_SECURE_CHANNEL  7
31 
32 #define DMA_CTRL_REG_SET_FIELD(_field, _var, _val) \
33 	(_var) = \
34 	(((_var) & ~DMA_DMA0_CTRL_REG_ ## _field ## _Msk) | \
35 	(((_val) << DMA_DMA0_CTRL_REG_ ## _field ## _Pos) & DMA_DMA0_CTRL_REG_ ## _field ## _Msk))
36 
37 #define DMA_CTRL_REG_GET_FIELD(_field, _var) \
38 	(((_var) & DMA_DMA0_CTRL_REG_ ## _field ## _Msk) >> DMA_DMA0_CTRL_REG_ ## _field ## _Pos)
39 
40 #define DMA_CHN2REG(_idx)   (&((struct channel_regs *)DMA)[(_idx)])
41 
42 #define DMA_MUX_SHIFT(_idx)   (((_idx) >> 1) * 4)
43 
44 #define DMA_REQ_MUX_REG_SET(_idx, _val) \
45 	DMA->DMA_REQ_MUX_REG = \
46 		(DMA->DMA_REQ_MUX_REG & ~(0xf << DMA_MUX_SHIFT((_idx)))) | \
47 		(((_val) & 0xf) << DMA_MUX_SHIFT((_idx)))
48 
49 #define DMA_REQ_MUX_REG_GET(_idx) \
50 	((DMA->DMA_REQ_MUX_REG >> DMA_MUX_SHIFT((_idx))) & 0xf)
51 
52 #define CRYPTO_KEYS_BUF_ADDR   0x30040100
53 #define CRYPTO_KEYS_BUF_SIZE   0x100
54 #define IS_AES_KEYS_BUF_RANGE(_a)  ((uint32_t)(_a) >= (uint32_t)(CRYPTO_KEYS_BUF_ADDR)) && \
55 	((uint32_t)(_a) < (uint32_t)(CRYPTO_KEYS_BUF_ADDR + CRYPTO_KEYS_BUF_SIZE))
56 
57 /*
58  * DMA channel priority level. The smaller the value the lower the priority granted to a channel
59  * when two or more channels request the bus at the same time. For channels of same priority an
60  * inherent mechanism is applied in which the lower the channel number the higher the priority.
61  */
62 enum dma_smartbond_channel_prio {
63 	DMA_SMARTBOND_CHANNEL_PRIO_0 = 0x0,  /* Lowest channel priority */
64 	DMA_SMARTBOND_CHANNEL_PRIO_1,
65 	DMA_SMARTBOND_CHANNEL_PRIO_2,
66 	DMA_SMARTBOND_CHANNEL_PRIO_3,
67 	DMA_SMARTBOND_CHANNEL_PRIO_4,
68 	DMA_SMARTBOND_CHANNEL_PRIO_5,
69 	DMA_SMARTBOND_CHANNEL_PRIO_6,
70 	DMA_SMARTBOND_CHANNEL_PRIO_7,         /* Highest channel priority */
71 	DMA_SMARTBOND_CHANNEL_PRIO_MAX
72 };
73 
74 enum dma_smartbond_channel {
75 	DMA_SMARTBOND_CHANNEL_0 = 0x0,
76 	DMA_SMARTBOND_CHANNEL_1,
77 	DMA_SMARTBOND_CHANNEL_2,
78 	DMA_SMARTBOND_CHANNEL_3,
79 	DMA_SMARTBOND_CHANNEL_4,
80 	DMA_SMARTBOND_CHANNEL_5,
81 	DMA_SMARTBOND_CHANNEL_6,
82 	DMA_SMARTBOND_CHANNEL_7,
83 	DMA_SMARTBOND_CHANNEL_MAX
84 };
85 
86 enum dma_smartbond_burst_len {
87 	DMA_SMARTBOND_BURST_LEN_1B  = 0x1, /* Burst mode is disabled */
88 	DMA_SMARTBOND_BURST_LEN_4B  = 0x4, /* Perform bursts of 4 beats (INCR4) */
89 	DMA_SMARTBOND_BURST_LEN_8B  = 0x8  /* Perform bursts of 8 beats (INCR8) */
90 };
91 
92 /*
93  * DMA bus width indicating how many bytes are retrived/written per transfer.
94  * Note that the bus width is the same for the source and destination.
95  */
96 enum dma_smartbond_bus_width {
97 	DMA_SMARTBOND_BUS_WIDTH_1B = 0x1,
98 	DMA_SMARTBOND_BUS_WIDTH_2B = 0x2,
99 	DMA_SMARTBOND_BUS_WIDTH_4B = 0x4
100 };
101 
102 enum dreq_mode {
103 	DREQ_MODE_SW = 0x0,
104 	DREQ_MODE_HW
105 };
106 
107 enum burst_mode {
108 	BURST_MODE_0B = 0x0,
109 	BURST_MODE_4B = 0x1,
110 	BURST_MODE_8B = 0x2
111 };
112 
113 enum bus_width {
114 	BUS_WIDTH_1B = 0x0,
115 	BUS_WIDTH_2B = 0x1,
116 	BUS_WIDTH_4B = 0x2
117 };
118 
119 enum addr_adj {
120 	ADDR_ADJ_NO_CHANGE = 0x0,
121 	ADDR_ADJ_INCR
122 };
123 
124 enum copy_mode {
125 	COPY_MODE_BLOCK = 0x0,
126 	COPY_MODE_INIT
127 };
128 
129 enum req_sense {
130 	REQ_SENSE_LEVEL = 0x0,
131 	REQ_SENSE_EDGE
132 };
133 
134 struct channel_regs {
135 	__IO uint32_t DMA_A_START;
136 	__IO uint32_t DMA_B_START;
137 	__IO uint32_t DMA_INT_REG;
138 	__IO uint32_t DMA_LEN_REG;
139 	__IO uint32_t DMA_CTRL_REG;
140 
141 	__I uint32_t DMA_IDX_REG;
142 	__I uint32_t RESERVED[2];
143 };
144 
145 struct dma_channel_data {
146 	dma_callback_t cb;
147 	void *user_data;
148 	enum dma_smartbond_bus_width bus_width;
149 	enum dma_smartbond_burst_len burst_len;
150 	enum dma_channel_direction dir;
151 	bool is_dma_configured;
152 };
153 
154 struct dma_smartbond_data {
155 	/* Should be the first member of the driver data */
156 	struct dma_context dma_ctx;
157 
158 	ATOMIC_DEFINE(channels_atomic, DMA_CHANNELS_COUNT);
159 
160 	/* User callbacks and data to be stored per channel */
161 	struct dma_channel_data channel_data[DMA_CHANNELS_COUNT];
162 };
163 
164 /* True if there is any DMA activity on any channel, false otheriwise. */
dma_smartbond_is_dma_active(void)165 static bool dma_smartbond_is_dma_active(void)
166 {
167 	int idx;
168 	struct channel_regs *regs;
169 
170 	for (idx = 0; idx < DMA_CHANNELS_COUNT; idx++) {
171 		regs = DMA_CHN2REG(idx);
172 
173 		if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) {
174 			return true;
175 		}
176 	}
177 
178 	return false;
179 }
180 
dma_smartbond_pm_policy_state_lock_get(void)181 static inline void dma_smartbond_pm_policy_state_lock_get(void)
182 {
183 #if defined(CONFIG_PM_DEVICE)
184 	pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
185 #endif
186 }
187 
dma_smartbond_pm_policy_state_lock_put(void)188 static inline void dma_smartbond_pm_policy_state_lock_put(void)
189 {
190 #if defined(CONFIG_PM_DEVICE)
191 	if (pm_policy_state_lock_is_active(PM_STATE_STANDBY, PM_ALL_SUBSTATES)) {
192 		pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
193 	}
194 #endif
195 }
196 
dma_smartbond_set_channel_status(const struct device * dev,uint32_t channel,bool status)197 static void dma_smartbond_set_channel_status(const struct device *dev,
198 	uint32_t channel, bool status)
199 {
200 	unsigned int key;
201 	struct channel_regs *regs = DMA_CHN2REG(channel);
202 
203 	key = irq_lock();
204 
205 	if (status) {
206 		/* Make sure the status register for the requested channel is cleared. */
207 		DMA->DMA_CLEAR_INT_REG |= BIT(channel);
208 		/* Enable interrupts for the requested channel. */
209 		DMA->DMA_INT_MASK_REG |= BIT(channel);
210 
211 		/* Check if this is the first attempt to enable DMA interrupts. */
212 		if (!irq_is_enabled(SMARTBOND_IRQN)) {
213 			irq_enable(SMARTBOND_IRQN);
214 			/* Prevent sleep as long as DMA operations are ongoing */
215 			dma_smartbond_pm_policy_state_lock_get();
216 		}
217 
218 		DMA_CTRL_REG_SET_FIELD(DMA_ON, regs->DMA_CTRL_REG, 0x1);
219 	} else {
220 		DMA_CTRL_REG_SET_FIELD(DMA_ON, regs->DMA_CTRL_REG, 0x0);
221 
222 		/*
223 		 * It might happen that DMA is already in progress. Make sure the current
224 		 * on-going transfer is complete (cannot be interrupted).
225 		 */
226 		while (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) {
227 		}
228 
229 		/* Disable interrupts for the requested channel */
230 		DMA->DMA_INT_MASK_REG &= ~(BIT(channel));
231 		/* Clear the status register; the requested channel should be considered obsolete */
232 		DMA->DMA_CLEAR_INT_REG |= BIT(channel);
233 
234 		/* DMA interrupts should be disabled only if all channels are disabled. */
235 		if (!dma_smartbond_is_dma_active() && irq_is_enabled(SMARTBOND_IRQN)) {
236 			irq_disable(SMARTBOND_IRQN);
237 			/* Allow entering sleep once all DMA channels are inactive */
238 			dma_smartbond_pm_policy_state_lock_put();
239 		}
240 	}
241 
242 	irq_unlock(key);
243 }
244 
dma_channel_dst_addr_check_and_adjust(uint32_t channel,uint32_t * dst)245 static bool dma_channel_dst_addr_check_and_adjust(uint32_t channel, uint32_t *dst)
246 {
247 	uint32_t phy_address;
248 	uint32_t secure_boot_reg;
249 	bool is_aes_keys_protected, is_qspic_keys_protected;
250 
251 	phy_address = black_orca_phy_addr(*dst);
252 
253 	secure_boot_reg = CRG_TOP->SECURE_BOOT_REG;
254 	is_aes_keys_protected =
255 		(secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_AES_KEY_READ_Msk);
256 	is_qspic_keys_protected =
257 		(secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_QSPI_KEY_READ_Msk);
258 
259 	/*
260 	 * If the destination address reflects the AES key buffer area and secure keys are protected
261 	 * then only the secure channel #7 can be used to transfer data to AES key buffer.
262 	 */
263 	if ((IS_AES_KEYS_BUF_RANGE(phy_address) &&
264 		(is_aes_keys_protected || is_qspic_keys_protected) &&
265 		(channel != DMA_SECURE_CHANNEL))) {
266 		LOG_ERR("Keys are protected. Only secure channel #7 can be employed.");
267 		return false;
268 	}
269 
270 	if (IS_QSPIF_ADDRESS(phy_address) || IS_QSPIF_CACHED_ADDRESS(phy_address) ||
271 				IS_OTP_ADDRESS(phy_address) || IS_OTP_P_ADDRESS(phy_address)) {
272 		LOG_ERR("Invalid destination location.");
273 		return false;
274 	}
275 
276 	*dst = phy_address;
277 
278 	return true;
279 }
280 
dma_channel_src_addr_check_and_adjust(uint32_t channel,uint32_t * src)281 static bool dma_channel_src_addr_check_and_adjust(uint32_t channel, uint32_t *src)
282 {
283 	uint32_t phy_address;
284 	uint32_t secure_boot_reg;
285 	bool is_aes_keys_protected, is_qspic_keys_protected;
286 
287 	/* DMA can only access physical addresses, not remapped. */
288 	phy_address = black_orca_phy_addr(*src);
289 
290 	if (IS_QSPIF_CACHED_ADDRESS(phy_address)) {
291 		/*
292 		 * To achiebe max. perfomance, peripherals should not access the Flash memory
293 		 * through the instruction cache controller (avoid cache misses).
294 		 */
295 		phy_address += (MCU_QSPIF_M_BASE - MCU_QSPIF_M_CACHED_BASE);
296 	} else if (IS_OTP_ADDRESS(phy_address)) {
297 		/* Peripherals should access OTP through its peripheral address space. */
298 		phy_address += (MCU_OTP_M_P_BASE - MCU_OTP_M_BASE);
299 	}
300 
301 	secure_boot_reg = CRG_TOP->SECURE_BOOT_REG;
302 	is_aes_keys_protected =
303 		(secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_AES_KEY_READ_Msk);
304 	is_qspic_keys_protected =
305 		(secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_QSPI_KEY_READ_Msk);
306 
307 	/*
308 	 * If the source address reflects protected area in OTP then only the
309 	 * secure channel #7 can be used to fetch secure keys data.
310 	 */
311 	if (((IS_ADDRESS_USER_DATA_KEYS_SEGMENT(phy_address) && is_aes_keys_protected) ||
312 	     (IS_ADDRESS_QSPI_FW_KEYS_SEGMENT(phy_address) && is_qspic_keys_protected)) &&
313 							(channel != DMA_SECURE_CHANNEL)) {
314 		LOG_ERR("Keys are protected. Only secure channel #7 can be employed.");
315 		return false;
316 	}
317 
318 	*src = phy_address;
319 
320 	return true;
321 }
322 
dma_channel_update_dreq_mode(enum dma_channel_direction direction,uint32_t * dma_ctrl_reg)323 static bool dma_channel_update_dreq_mode(enum dma_channel_direction direction,
324 									uint32_t *dma_ctrl_reg)
325 {
326 	switch (direction) {
327 	case MEMORY_TO_HOST:
328 	case HOST_TO_MEMORY:
329 	case MEMORY_TO_MEMORY:
330 		/* DMA channel starts immediately */
331 		DMA_CTRL_REG_SET_FIELD(DREQ_MODE, *dma_ctrl_reg, DREQ_MODE_SW);
332 		break;
333 	case PERIPHERAL_TO_MEMORY:
334 	case MEMORY_TO_PERIPHERAL:
335 	case PERIPHERAL_TO_PERIPHERAL:
336 		/* DMA channels starts by peripheral DMA req */
337 		DMA_CTRL_REG_SET_FIELD(DREQ_MODE, *dma_ctrl_reg, DREQ_MODE_HW);
338 		break;
339 	default:
340 		return false;
341 	};
342 
343 	return true;
344 }
345 
dma_channel_update_src_addr_adj(enum dma_addr_adj addr_adj,uint32_t * dma_ctrl_reg)346 static bool dma_channel_update_src_addr_adj(enum dma_addr_adj addr_adj, uint32_t *dma_ctrl_reg)
347 {
348 	switch (addr_adj) {
349 	case DMA_ADDR_ADJ_NO_CHANGE:
350 		DMA_CTRL_REG_SET_FIELD(AINC, *dma_ctrl_reg, ADDR_ADJ_NO_CHANGE);
351 		break;
352 	case DMA_ADDR_ADJ_INCREMENT:
353 		DMA_CTRL_REG_SET_FIELD(AINC, *dma_ctrl_reg, ADDR_ADJ_INCR);
354 		break;
355 	default:
356 		return false;
357 	}
358 
359 	return true;
360 }
361 
dma_channel_update_dst_addr_adj(enum dma_addr_adj addr_adj,uint32_t * dma_ctrl_reg)362 static bool dma_channel_update_dst_addr_adj(enum dma_addr_adj addr_adj, uint32_t *dma_ctrl_reg)
363 {
364 	switch (addr_adj) {
365 	case DMA_ADDR_ADJ_NO_CHANGE:
366 		DMA_CTRL_REG_SET_FIELD(BINC, *dma_ctrl_reg, ADDR_ADJ_NO_CHANGE);
367 		break;
368 	case DMA_ADDR_ADJ_INCREMENT:
369 		DMA_CTRL_REG_SET_FIELD(BINC, *dma_ctrl_reg, ADDR_ADJ_INCR);
370 		break;
371 	default:
372 		return false;
373 	}
374 
375 	return true;
376 }
377 
dma_channel_update_bus_width(uint16_t bw,uint32_t * dma_ctrl_reg)378 static bool dma_channel_update_bus_width(uint16_t bw, uint32_t *dma_ctrl_reg)
379 {
380 	switch (bw) {
381 	case DMA_SMARTBOND_BUS_WIDTH_1B:
382 		DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_1B);
383 		break;
384 	case DMA_SMARTBOND_BUS_WIDTH_2B:
385 		DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_2B);
386 		break;
387 	case DMA_SMARTBOND_BUS_WIDTH_4B:
388 		DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_4B);
389 		break;
390 	default:
391 		return false;
392 	}
393 
394 	return true;
395 }
396 
dma_channel_update_burst_mode(uint16_t burst,uint32_t * dma_ctrl_reg)397 static bool dma_channel_update_burst_mode(uint16_t burst, uint32_t *dma_ctrl_reg)
398 {
399 	switch (burst) {
400 	case DMA_SMARTBOND_BURST_LEN_1B:
401 		DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_0B);
402 		break;
403 	case DMA_SMARTBOND_BURST_LEN_4B:
404 		DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_4B);
405 		break;
406 	case DMA_SMARTBOND_BURST_LEN_8B:
407 		DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_8B);
408 		break;
409 	default:
410 		return false;
411 	}
412 
413 	return true;
414 }
415 
dma_channel_update_req_sense(enum dma_smartbond_trig_mux trig_mux,uint32_t channel,uint32_t * dma_ctrl_reg)416 static void dma_channel_update_req_sense(enum dma_smartbond_trig_mux trig_mux,
417 						uint32_t channel, uint32_t *dma_ctrl_reg)
418 {
419 	switch (trig_mux) {
420 	case DMA_SMARTBOND_TRIG_MUX_UART:
421 	case DMA_SMARTBOND_TRIG_MUX_UART2:
422 	case DMA_SMARTBOND_TRIG_MUX_UART3:
423 	case DMA_SMARTBOND_TRIG_MUX_I2C:
424 	case DMA_SMARTBOND_TRIG_MUX_I2C2:
425 	case DMA_SMARTBOND_TRIG_MUX_USB:
426 		/* Odd channel numbers should reflect TX path */
427 		if (channel & BIT(0)) {
428 			DMA_CTRL_REG_SET_FIELD(REQ_SENSE, *dma_ctrl_reg, REQ_SENSE_EDGE);
429 			break;
430 		}
431 	default:
432 		DMA_CTRL_REG_SET_FIELD(REQ_SENSE, *dma_ctrl_reg, REQ_SENSE_LEVEL);
433 	}
434 }
435 
dma_set_mux_request(enum dma_smartbond_trig_mux trig_mux,uint32_t channel)436 static void dma_set_mux_request(enum dma_smartbond_trig_mux trig_mux, uint32_t channel)
437 {
438 	unsigned int key;
439 
440 	key = irq_lock();
441 	DMA_REQ_MUX_REG_SET(channel, trig_mux);
442 
443 	/*
444 	 * Having same trigger for different channels can cause unpredictable results.
445 	 * The audio triggers (src and pcm) are an exception, as they use 2 pairs each
446 	 * for DMA access.
447 	 * The lesser significant selector has higher priority and will control
448 	 * the DMA acknowledge signal driven to the selected peripheral. Make sure
449 	 * the current selector does not match with selectors of
450 	 * higher priorities (dma channels of lower indexing). It's OK if a
451 	 * channel of higher indexing defines the same peripheral request source
452 	 * (should be ignored as it has lower priority).
453 	 */
454 	if (trig_mux != DMA_SMARTBOND_TRIG_MUX_NONE) {
455 		switch (channel) {
456 		case DMA_SMARTBOND_CHANNEL_7:
457 		case DMA_SMARTBOND_CHANNEL_6:
458 			if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_5) == trig_mux) {
459 				DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_5,
460 								DMA_SMARTBOND_TRIG_MUX_NONE);
461 			}
462 			/* fall-through */
463 		case DMA_SMARTBOND_CHANNEL_5:
464 		case DMA_SMARTBOND_CHANNEL_4:
465 			if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_3) == trig_mux) {
466 				DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_3,
467 								DMA_SMARTBOND_TRIG_MUX_NONE);
468 			}
469 			/* fall-through */
470 		case DMA_SMARTBOND_CHANNEL_3:
471 		case DMA_SMARTBOND_CHANNEL_2:
472 			if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_1) == trig_mux) {
473 				DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_1,
474 								DMA_SMARTBOND_TRIG_MUX_NONE);
475 			}
476 		case DMA_SMARTBOND_CHANNEL_1:
477 		case DMA_SMARTBOND_CHANNEL_0:
478 			break;
479 		}
480 	}
481 
482 	irq_unlock(key);
483 }
484 
dma_smartbond_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)485 static int dma_smartbond_config(const struct device *dev, uint32_t channel, struct dma_config *cfg)
486 {
487 	struct dma_smartbond_data *data = dev->data;
488 	struct channel_regs *regs;
489 	uint32_t dma_ctrl_reg;
490 	uint32_t src_dst_address;
491 
492 	if (channel >= DMA_CHANNELS_COUNT) {
493 		LOG_ERR("Inavlid DMA channel index");
494 		return -EINVAL;
495 	}
496 	regs = DMA_CHN2REG(channel);
497 
498 	dma_ctrl_reg = regs->DMA_CTRL_REG;
499 
500 	if (DMA_CTRL_REG_GET_FIELD(DMA_ON, dma_ctrl_reg)) {
501 		LOG_ERR("Requested channel is enabled. It should first be disabled");
502 		return -EIO;
503 	}
504 
505 	if (cfg == NULL || cfg->head_block == NULL) {
506 		LOG_ERR("Missing configuration structure");
507 		return -EINVAL;
508 	}
509 
510 	/* Error handling is not supported; just warn user. */
511 	if (!cfg->error_callback_dis) {
512 		LOG_WRN("Error handling is not supported");
513 	}
514 
515 	if (!cfg->complete_callback_en) {
516 		data->channel_data[channel].cb = cfg->dma_callback;
517 		data->channel_data[channel].user_data = cfg->user_data;
518 	} else {
519 		LOG_WRN("User callback can only be called at completion only and not per block.");
520 
521 		/* Nulify pointers to indicate notifications are disabled. */
522 		data->channel_data[channel].cb = NULL;
523 		data->channel_data[channel].user_data = NULL;
524 	}
525 
526 	data->channel_data[channel].dir = cfg->channel_direction;
527 
528 	if (cfg->block_count > DMA_BLOCK_COUNT) {
529 		LOG_WRN("A single block is supported. The rest blocks will be discarded");
530 	}
531 
532 	if (cfg->channel_priority >= DMA_SMARTBOND_CHANNEL_PRIO_MAX) {
533 		cfg->channel_priority = DMA_SMARTBOND_CHANNEL_PRIO_7;
534 		LOG_WRN("Channel priority exceeded max. Setting to highest valid level");
535 	}
536 
537 	DMA_CTRL_REG_SET_FIELD(DMA_PRIO, dma_ctrl_reg, cfg->channel_priority);
538 
539 	if (((cfg->source_burst_length != cfg->dest_burst_length) ||
540 		!dma_channel_update_burst_mode(cfg->source_burst_length, &dma_ctrl_reg))) {
541 		LOG_ERR("Invalid burst mode or source and destination mode mismatch");
542 		return -EINVAL;
543 	}
544 
545 	data->channel_data[channel].burst_len = cfg->source_burst_length;
546 
547 	if (cfg->source_data_size != cfg->dest_data_size ||
548 		!dma_channel_update_bus_width(cfg->source_data_size, &dma_ctrl_reg)) {
549 		LOG_ERR("Invalid bus width or source and destination bus width mismatch");
550 		return -EINVAL;
551 	}
552 
553 	data->channel_data[channel].bus_width = cfg->source_data_size;
554 
555 	if (cfg->source_chaining_en || cfg->dest_chaining_en ||
556 		cfg->head_block->source_gather_en || cfg->head_block->dest_scatter_en ||
557 		cfg->head_block->source_reload_en || cfg->head_block->dest_reload_en) {
558 		LOG_WRN("Chainning, scattering, gathering or reloading is not supported");
559 	}
560 
561 	if (!dma_channel_update_src_addr_adj(cfg->head_block->source_addr_adj,
562 								&dma_ctrl_reg)) {
563 		LOG_ERR("Invalid source address adjustment");
564 		return -EINVAL;
565 	}
566 
567 	if (!dma_channel_update_dst_addr_adj(cfg->head_block->dest_addr_adj, &dma_ctrl_reg)) {
568 		LOG_ERR("Invalid destination address adjustment");
569 		return -EINVAL;
570 	}
571 
572 	if (!dma_channel_update_dreq_mode(cfg->channel_direction, &dma_ctrl_reg)) {
573 		LOG_ERR("Inavlid channel direction");
574 		return -EINVAL;
575 	}
576 
577 	/* Cyclic is valid only when DREQ_MODE is set */
578 	if (cfg->cyclic && DMA_CTRL_REG_GET_FIELD(DREQ_MODE, dma_ctrl_reg) != DREQ_MODE_HW) {
579 		LOG_ERR("Circular mode is only supported for non memory-memory transfers");
580 		return -EINVAL;
581 	}
582 
583 	DMA_CTRL_REG_SET_FIELD(CIRCULAR, dma_ctrl_reg, cfg->cyclic);
584 
585 	if (DMA_CTRL_REG_GET_FIELD(DREQ_MODE, dma_ctrl_reg) == DREQ_MODE_SW &&
586 		DMA_CTRL_REG_GET_FIELD(AINC, dma_ctrl_reg) == ADDR_ADJ_NO_CHANGE &&
587 		DMA_CTRL_REG_GET_FIELD(BINC, dma_ctrl_reg) == ADDR_ADJ_INCR) {
588 		/*
589 		 * Valid for memory initialization to a specific value. This process
590 		 * cannot be interrupted by other DMA channels.
591 		 */
592 		DMA_CTRL_REG_SET_FIELD(DMA_INIT, dma_ctrl_reg, COPY_MODE_INIT);
593 	} else {
594 		DMA_CTRL_REG_SET_FIELD(DMA_INIT, dma_ctrl_reg, COPY_MODE_BLOCK);
595 	}
596 
597 	dma_channel_update_req_sense(cfg->dma_slot, channel, &dma_ctrl_reg);
598 
599 	regs->DMA_CTRL_REG = dma_ctrl_reg;
600 
601 	/* Requested address might be changed */
602 	src_dst_address = cfg->head_block->source_address;
603 	if (!dma_channel_src_addr_check_and_adjust(channel, &src_dst_address)) {
604 		return -EINVAL;
605 	}
606 
607 	if (src_dst_address % cfg->source_data_size) {
608 		LOG_ERR("Source address is not bus width aligned");
609 		return -EINVAL;
610 	}
611 
612 	regs->DMA_A_START = src_dst_address;
613 
614 	src_dst_address = cfg->head_block->dest_address;
615 	if (!dma_channel_dst_addr_check_and_adjust(channel, &src_dst_address)) {
616 		return -EINVAL;
617 	}
618 
619 	if (src_dst_address % cfg->dest_data_size) {
620 		LOG_ERR("Destination address is not bus width aligned");
621 		return -EINVAL;
622 	}
623 
624 	regs->DMA_B_START = src_dst_address;
625 
626 	if (cfg->head_block->block_size % (cfg->source_data_size * cfg->source_burst_length)) {
627 		LOG_ERR("Requested data size is not multiple of bus width");
628 		return -EINVAL;
629 	}
630 
631 	regs->DMA_LEN_REG = (cfg->head_block->block_size / cfg->source_data_size) - 1;
632 
633 	/* Interrupt will be raised once all transfers are complete. */
634 	regs->DMA_INT_REG = (cfg->head_block->block_size / cfg->source_data_size) - 1;
635 
636 	if ((cfg->source_handshake != cfg->dest_handshake) ||
637 		(cfg->source_handshake != 0)/*HW*/) {
638 		LOG_ERR("Source/destination handshakes mismatch or invalid");
639 		return -EINVAL;
640 	}
641 
642 	dma_set_mux_request(cfg->dma_slot, channel);
643 
644 	/* Designate that channel has been configured */
645 	data->channel_data[channel].is_dma_configured = true;
646 
647 	return 0;
648 }
649 
650 
dma_smartbond_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)651 static int dma_smartbond_reload(const struct device *dev, uint32_t channel, uint32_t src,
652 									uint32_t dst, size_t size)
653 {
654 	struct dma_smartbond_data *data = dev->data;
655 	struct channel_regs *regs;
656 
657 	if (channel >= DMA_CHANNELS_COUNT) {
658 		LOG_ERR("Inavlid DMA channel index");
659 		return -EINVAL;
660 	}
661 	regs = DMA_CHN2REG(channel);
662 
663 	if (!data->channel_data[channel].is_dma_configured) {
664 		LOG_ERR("Requested DMA channel should first be configured");
665 		return -EINVAL;
666 	}
667 
668 	if (size == 0) {
669 		LOG_ERR("Min. transfer size is one");
670 		return -EINVAL;
671 	}
672 
673 	if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) {
674 		LOG_ERR("Channel is busy, settings cannot be changed mid-transfer");
675 		return -EBUSY;
676 	}
677 
678 	if (src % data->channel_data[channel].bus_width) {
679 		LOG_ERR("Source address is not bus width aligned");
680 		return -EINVAL;
681 	}
682 
683 	if (!dma_channel_src_addr_check_and_adjust(channel, &src)) {
684 		return -EINVAL;
685 	}
686 
687 	regs->DMA_A_START = src;
688 
689 	if (dst % data->channel_data[channel].bus_width) {
690 		LOG_ERR("Destination address is not bus width aligned");
691 		return -EINVAL;
692 	}
693 
694 	if (!dma_channel_dst_addr_check_and_adjust(channel, &dst)) {
695 		return -EINVAL;
696 	}
697 
698 	regs->DMA_B_START = dst;
699 
700 	if (size % (data->channel_data[channel].burst_len *
701 							data->channel_data[channel].bus_width)) {
702 		LOG_ERR("Requested data size is not multiple of bus width");
703 		return -EINVAL;
704 	}
705 
706 	regs->DMA_LEN_REG = (size / data->channel_data[channel].bus_width) - 1;
707 
708 	/* Interrupt will be raised once all transfers are complete. */
709 	regs->DMA_INT_REG = (size / data->channel_data[channel].bus_width) - 1;
710 
711 	return 0;
712 }
713 
dma_smartbond_start(const struct device * dev,uint32_t channel)714 static int dma_smartbond_start(const struct device *dev, uint32_t channel)
715 {
716 	struct channel_regs *regs;
717 	struct dma_smartbond_data *data = dev->data;
718 
719 	if (channel >= DMA_CHANNELS_COUNT) {
720 		LOG_ERR("Inavlid DMA channel index");
721 		return -EINVAL;
722 	}
723 	regs = DMA_CHN2REG(channel);
724 
725 	if (!data->channel_data[channel].is_dma_configured) {
726 		LOG_ERR("Requested DMA channel should first be configured");
727 		return -EINVAL;
728 	}
729 
730 	/* Should return succss if the requested channel is already started. */
731 	if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) {
732 		return 0;
733 	}
734 
735 	dma_smartbond_set_channel_status(dev, channel, true);
736 
737 	return 0;
738 }
739 
dma_smartbond_stop(const struct device * dev,uint32_t channel)740 static int dma_smartbond_stop(const struct device *dev, uint32_t channel)
741 {
742 	struct channel_regs *regs;
743 
744 	if (channel >= DMA_CHANNELS_COUNT) {
745 		LOG_ERR("Inavlid DMA channel index");
746 		return -EINVAL;
747 	}
748 	regs = DMA_CHN2REG(channel);
749 
750 	/*
751 	 * In normal mode DMA_ON is cleared automatically. However we need to clear
752 	 * the corresponding register mask and disable NVIC if there is no other
753 	 * channel in use.
754 	 */
755 	dma_smartbond_set_channel_status(dev, channel, false);
756 
757 	return 0;
758 }
759 
dma_smartbond_suspend(const struct device * dev,uint32_t channel)760 static int dma_smartbond_suspend(const struct device *dev, uint32_t channel)
761 {
762 	if (channel >= DMA_CHANNELS_COUNT) {
763 		LOG_ERR("Inavlid DMA channel index");
764 		return -EINVAL;
765 	}
766 
767 	/*
768 	 * Freezing the DMA engine is valid for memory-to-memory operations.
769 	 * Valid memory locations are SYSRAM and/or PSRAM.
770 	 */
771 	LOG_WRN("DMA is freezed globally");
772 
773 	/*
774 	 * Freezing the DMA engine can be done universally and not per channel!.
775 	 * An attempt to disable the channel would result in resetting the IDX
776 	 * register next time the channel was re-enabled.
777 	 */
778 	GPREG->SET_FREEZE_REG = GPREG_SET_FREEZE_REG_FRZ_DMA_Msk;
779 
780 	return 0;
781 }
782 
dma_smartbond_resume(const struct device * dev,uint32_t channel)783 static int dma_smartbond_resume(const struct device *dev, uint32_t channel)
784 {
785 	if (channel >= DMA_CHANNELS_COUNT) {
786 		LOG_ERR("Inavlid DMA channel index");
787 		return -EINVAL;
788 	}
789 
790 	LOG_WRN("DMA is unfreezed globally");
791 
792 	/* Unfreezing the DMA engine can be done unviversally and not per channel! */
793 	GPREG->RESET_FREEZE_REG = GPREG_RESET_FREEZE_REG_FRZ_DMA_Msk;
794 
795 	return 0;
796 }
797 
dma_smartbond_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)798 static int dma_smartbond_get_status(const struct device *dev, uint32_t channel,
799 							struct dma_status *stat)
800 {
801 	struct channel_regs *regs;
802 	int key;
803 	struct dma_smartbond_data *data = dev->data;
804 	uint8_t bus_width;
805 	uint32_t dma_ctrl_reg, dma_idx_reg, dma_len_reg;
806 
807 	if (channel >= DMA_CHANNELS_COUNT) {
808 		LOG_ERR("Inavlid DMA channel index");
809 		return -EINVAL;
810 	}
811 
812 	if (stat == NULL) {
813 		LOG_ERR("User should provide a valid pointer to store the status info requested");
814 	}
815 
816 	if (!data->channel_data[channel].is_dma_configured) {
817 		LOG_ERR("Requested DMA channel should first be configured");
818 		return -EINVAL;
819 	}
820 
821 	regs = DMA_CHN2REG(channel);
822 
823 	/*
824 	 * The DMA is running in parallel with CPU and so it might happen that an on-going transfer
825 	 * might be completed the moment user parses the status results. Disable interrupts globally
826 	 * so there is no chance for a new transfer to be initiated from within ISR and so changing
827 	 * the channel registers values.
828 	 */
829 	key = irq_lock();
830 
831 	dma_ctrl_reg = regs->DMA_CTRL_REG;
832 	dma_idx_reg = regs->DMA_IDX_REG;
833 	dma_len_reg = regs->DMA_LEN_REG;
834 
835 	/* Calculate how many byes each transfer consists of. */
836 	bus_width = DMA_CTRL_REG_GET_FIELD(BW, dma_ctrl_reg);
837 	if (bus_width == BUS_WIDTH_1B) {
838 		bus_width = 1;
839 	} else {
840 		bus_width <<= 1;
841 	}
842 
843 	/* Convert transfers to bytes. */
844 	stat->total_copied = dma_idx_reg * bus_width;
845 	stat->pending_length = (dma_len_reg - dma_idx_reg) * bus_width;
846 	stat->busy = DMA_CTRL_REG_GET_FIELD(DMA_ON, dma_ctrl_reg);
847 	stat->dir = data->channel_data[channel].dir;
848 
849 	/* DMA does not support circular buffer functionality */
850 	stat->free = 0;
851 	stat->read_position = 0;
852 	stat->write_position = 0;
853 
854 	irq_unlock(key);
855 
856 	return 0;
857 }
858 
dma_smartbond_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)859 static int dma_smartbond_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
860 {
861 	if (value == NULL) {
862 		LOG_ERR("User should provide a valid pointer to attribute value");
863 		return -EINVAL;
864 	}
865 
866 	switch (type) {
867 	/*
868 	 * Source and destination addresses should be multiple of a channel's bus width.
869 	 * This info could be provided at runtime given that attributes of a specific
870 	 * channel could be requested.
871 	 */
872 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
873 	case DMA_ATTR_COPY_ALIGNMENT:
874 	/*
875 	 * Buffer size should be multiple of a channel's bus width multiplied by burst length.
876 	 * This info could be provided at runtime given that attributes of a specific channel
877 	 * could be requested.
878 	 */
879 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
880 		return -ENOSYS;
881 	case DMA_ATTR_MAX_BLOCK_COUNT:
882 		*value = DMA_BLOCK_COUNT;
883 		return 0;
884 	default:
885 		return -EINVAL;
886 	}
887 }
888 
dma_smartbond_chan_filter(const struct device * dev,int channel,void * filter_param)889 static bool dma_smartbond_chan_filter(const struct device *dev, int channel, void *filter_param)
890 {
891 	uint32_t requested_channel;
892 
893 	if (channel >= DMA_CHANNELS_COUNT) {
894 		LOG_ERR("Inavlid DMA channel index");
895 		return -EINVAL;
896 	}
897 
898 	/* If user does not provide any channel request explicitly, return true. */
899 	if (filter_param == NULL) {
900 		return true;
901 	}
902 
903 	requested_channel = *(uint32_t *)filter_param;
904 
905 	if (channel == requested_channel) {
906 		return true;
907 	}
908 
909 	return false;
910 }
911 
912 static DEVICE_API(dma, dma_smartbond_driver_api) = {
913 	.config = dma_smartbond_config,
914 	.reload = dma_smartbond_reload,
915 	.start = dma_smartbond_start,
916 	.stop = dma_smartbond_stop,
917 	.suspend = dma_smartbond_suspend,
918 	.resume = dma_smartbond_resume,
919 	.get_status = dma_smartbond_get_status,
920 	.get_attribute = dma_smartbond_get_attribute,
921 	.chan_filter = dma_smartbond_chan_filter
922 };
923 
smartbond_dma_isr(const void * arg)924 static void smartbond_dma_isr(const void *arg)
925 {
926 	uint16_t dma_int_status_reg;
927 	int i;
928 	struct channel_regs *regs;
929 	struct dma_smartbond_data *data = ((const struct device *)arg)->data;
930 
931 	/*
932 	 * A single interrupt line is generated for all channels and so each channel
933 	 * should be parsed separately.
934 	 */
935 	for (i = 0, dma_int_status_reg = DMA->DMA_INT_STATUS_REG;
936 		 i < DMA_CHANNELS_COUNT && dma_int_status_reg != 0; ++i, dma_int_status_reg >>= 1) {
937 		/* Check if the selected channel has raised the interrupt line */
938 		if (dma_int_status_reg & BIT(0)) {
939 
940 			regs = DMA_CHN2REG(i);
941 			/*
942 			 * Should be valid if callbacks are explicitly enabled by users.
943 			 * Interrupt should be triggered only when the total size of
944 			 * bytes has been transferred. Bus errors cannot raise interrupts.
945 			 */
946 			if (data->channel_data[i].cb) {
947 				data->channel_data[i].cb((const struct device *)arg,
948 				data->channel_data[i].user_data, i, DMA_STATUS_COMPLETE);
949 			}
950 			/* Channel line should be cleared otherwise ISR will keep firing! */
951 			DMA->DMA_CLEAR_INT_REG = BIT(i);
952 		}
953 	}
954 }
955 
956 #if defined(CONFIG_PM_DEVICE)
dma_smartbond_is_sleep_allowed(const struct device * dev)957 static bool dma_smartbond_is_sleep_allowed(const struct device *dev)
958 {
959 	struct dma_smartbond_data *data = dev->data;
960 
961 	for (int i = 0; i < data->dma_ctx.dma_channels; i++) {
962 		if (atomic_test_bit(data->dma_ctx.atomic, i)) {
963 			/* Abort sleeping if at least one dma channel is acquired */
964 			return false;
965 		}
966 	}
967 
968 	return true;
969 }
970 
dma_smartbond_pm_action(const struct device * dev,enum pm_device_action action)971 static int dma_smartbond_pm_action(const struct device *dev,
972 	enum pm_device_action action)
973 {
974 	int ret = 0;
975 
976 	switch (action) {
977 	case PM_DEVICE_ACTION_SUSPEND:
978 		/*
979 		 * When we reach this point there should be no ongoing DMA transfers.
980 		 * However, a DMA channel can still be acquired and so the configured
981 		 * channel(s) should be retained. To avoid reconfiguring DMA or
982 		 * read/write DMA channels' registers we assume that sleep is not allowed
983 		 * as long as all DMA channels are released.
984 		 */
985 		if (!dma_smartbond_is_sleep_allowed(dev)) {
986 			ret = -EBUSY;
987 		}
988 		/*
989 		 * No need to perform any actions here as the DMA engine
990 		 * should already be turned off.
991 		 */
992 		break;
993 	case PM_DEVICE_ACTION_RESUME:
994 		/*
995 		 * No need to perform any actions here as the DMA engine
996 		 * will be configured by application explicitly.
997 		 */
998 		break;
999 	default:
1000 		return -ENOTSUP;
1001 	}
1002 
1003 	return ret;
1004 }
1005 #endif
1006 
dma_smartbond_init(const struct device * dev)1007 static int dma_smartbond_init(const struct device *dev)
1008 {
1009 #ifdef CONFIG_DMA_64BIT
1010 	LOG_ERR("64-bit addressing mode is not supported\n");
1011 	return -ENOSYS;
1012 #endif
1013 
1014 	int idx;
1015 	struct dma_smartbond_data *data;
1016 
1017 	data = dev->data;
1018 	data->dma_ctx.magic = DMA_MAGIC;
1019 	data->dma_ctx.dma_channels = DMA_CHANNELS_COUNT;
1020 	data->dma_ctx.atomic = data->channels_atomic;
1021 
1022 	/* Make sure that all channels are disabled. */
1023 	for (idx = 0; idx < DMA_CHANNELS_COUNT; idx++) {
1024 		dma_smartbond_set_channel_status(dev, idx, false);
1025 		data->channel_data[idx].is_dma_configured = false;
1026 	}
1027 
1028 	IRQ_CONNECT(SMARTBOND_IRQN, SMARTBOND_IRQ_PRIO, smartbond_dma_isr,
1029 								DEVICE_DT_INST_GET(0), 0);
1030 
1031 	return 0;
1032 }
1033 
1034 #define SMARTBOND_DMA_INIT(inst) \
1035 	BUILD_ASSERT((inst) == 0, "multiple instances are not supported"); \
1036 	\
1037 	PM_DEVICE_DT_INST_DEFINE(inst, dma_smartbond_pm_action);	\
1038 	\
1039 	static struct dma_smartbond_data dma_smartbond_data_ ## inst; \
1040 	\
1041 	DEVICE_DT_INST_DEFINE(0, dma_smartbond_init, \
1042 		PM_DEVICE_DT_INST_GET(inst), \
1043 		&dma_smartbond_data_ ## inst, NULL,	\
1044 		POST_KERNEL, \
1045 		CONFIG_DMA_INIT_PRIORITY, \
1046 		&dma_smartbond_driver_api);
1047 
1048 DT_INST_FOREACH_STATUS_OKAY(SMARTBOND_DMA_INIT)
1049