1 /*
2  * Copyright (c) 2022 Renesas Electronics Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT renesas_smartbond_spi
8 
9 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(spi_smartbond);
12 
13 #include "spi_context.h"
14 
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <zephyr/drivers/spi.h>
18 #include <zephyr/drivers/spi/rtio.h>
19 #include <zephyr/pm/device.h>
20 #include <zephyr/pm/policy.h>
21 #include <zephyr/pm/device_runtime.h>
22 #include <zephyr/drivers/dma.h>
23 #include <zephyr/sys/byteorder.h>
24 
25 #include <DA1469xAB.h>
26 #include <da1469x_pd.h>
27 
28 #define DIVN_CLK	32000000	/* DIVN clock: fixed @32MHz */
29 #define SCLK_FREQ_2MHZ	(DIVN_CLK / 14) /* 2.285714 MHz*/
30 #define SCLK_FREQ_4MHZ	(DIVN_CLK / 8)	/* 4 MHz */
31 #define SCLK_FREQ_8MHZ	(DIVN_CLK / 4)	/* 8 MHz */
32 #define SCLK_FREQ_16MHZ (DIVN_CLK / 2)	/* 16 MHz */
33 
34 enum spi_smartbond_transfer {
35 	SPI_SMARTBOND_TRANSFER_TX_ONLY,
36 	SPI_SMARTBOND_TRANSFER_RX_ONLY,
37 	SPI_SMARTBOND_TRANSFER_TX_RX,
38 	SPI_SMARTBOND_TRANSFER_NONE
39 };
40 
41 enum spi_smartbond_dma_channel {
42 	SPI_SMARTBOND_DMA_TX_CHANNEL,
43 	SPI_SMARTBOND_DMA_RX_CHANNEL
44 };
45 
46 enum spi_smartbond_fifo_mode {
47 	/* Bi-directional mode */
48 	SPI_SMARTBOND_FIFO_MODE_TX_RX,
49 	/* TX FIFO single depth, no flow control */
50 	SPI_SMARTBOND_FIFO_MODE_RX_ONLY,
51 	/* RX  FIFO single depth, no flow control */
52 	SPI_SMARTBOND_FIFO_MODE_TX_ONLY,
53 	SPI_SMARTBOND_FIFO_NONE
54 };
55 
56 struct spi_smartbond_cfg {
57 	SPI_Type *regs;
58 	int periph_clock_config;
59 	const struct pinctrl_dev_config *pcfg;
60 #ifdef CONFIG_SPI_SMARTBOND_DMA
61 	int tx_dma_chan;
62 	int rx_dma_chan;
63 	uint8_t tx_slot_mux;
64 	uint8_t rx_slot_mux;
65 	const struct device *tx_dma_ctrl;
66 	const struct device *rx_dma_ctrl;
67 #endif
68 };
69 
70 struct spi_smartbond_data {
71 	struct spi_context ctx;
72 	uint8_t dfs;
73 
74 #if defined(CONFIG_PM_DEVICE)
75 	uint32_t spi_ctrl_reg;
76 #endif
77 
78 #ifdef CONFIG_SPI_SMARTBOND_DMA
79 	struct dma_config tx_dma_cfg;
80 	struct dma_config rx_dma_cfg;
81 	struct dma_block_config tx_dma_block_cfg;
82 	struct dma_block_config rx_dma_block_cfg;
83 	struct k_sem rx_dma_sync;
84 	struct k_sem tx_dma_sync;
85 
86 	ATOMIC_DEFINE(dma_channel_atomic_flag, 2);
87 
88 #endif
89 
90 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
91 	size_t rx_len;
92 	size_t tx_len;
93 	size_t transferred;
94 	enum spi_smartbond_transfer transfer_mode;
95 #endif
96 };
97 
98 #define SPI_CTRL_REG_SET_FIELD(_field, _var, _val) \
99 	(_var) = \
100 	(((_var) & ~SPI_SPI_CTRL_REG_ ## _field ## _Msk) | \
101 	(((_val) << SPI_SPI_CTRL_REG_ ## _field ## _Pos) & SPI_SPI_CTRL_REG_ ## _field ## _Msk))
102 
spi_smartbond_enable(const struct spi_smartbond_cfg * cfg,bool enable)103 static inline void spi_smartbond_enable(const struct spi_smartbond_cfg *cfg, bool enable)
104 {
105 	if (enable) {
106 		cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_ON_Msk;
107 		cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_RST_Msk;
108 	} else {
109 		cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk;
110 		cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_RST_Msk;
111 	}
112 }
113 
spi_smartbond_isenabled(const struct spi_smartbond_cfg * cfg)114 static inline bool spi_smartbond_isenabled(const struct spi_smartbond_cfg *cfg)
115 {
116 	return (!!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_ON_Msk)) &&
117 	       (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_RST_Msk));
118 }
119 
spi_smartbond_write_word(const struct device * dev)120 static inline void spi_smartbond_write_word(const struct device *dev)
121 {
122 	const struct spi_smartbond_cfg *cfg = dev->config;
123 	struct spi_smartbond_data *data = dev->data;
124 
125 	/*
126 	 * No need to typecast the register address as the controller will automatically
127 	 * generate the necessary clock cycles based on the data size.
128 	 */
129 	switch (data->dfs) {
130 	case 1:
131 		cfg->regs->SPI_RX_TX_REG = *(uint8_t *)data->ctx.tx_buf;
132 		break;
133 	case 2:
134 		cfg->regs->SPI_RX_TX_REG = sys_get_le16(data->ctx.tx_buf);
135 		break;
136 	case 4:
137 		cfg->regs->SPI_RX_TX_REG = sys_get_le32(data->ctx.tx_buf);
138 		break;
139 	}
140 }
141 
spi_smartbond_write_dummy(const struct device * dev)142 static inline void spi_smartbond_write_dummy(const struct device *dev)
143 {
144 	const struct spi_smartbond_cfg *cfg = dev->config;
145 
146 	cfg->regs->SPI_RX_TX_REG = 0x0;
147 }
148 
spi_smartbond_read_word(const struct device * dev)149 static inline void spi_smartbond_read_word(const struct device *dev)
150 {
151 	const struct spi_smartbond_cfg *cfg = dev->config;
152 	struct spi_smartbond_data *data = dev->data;
153 
154 	switch (data->dfs) {
155 	case 1:
156 		*(uint8_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG;
157 		break;
158 	case 2:
159 		sys_put_le16((uint16_t)cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf);
160 		break;
161 	case 4:
162 		sys_put_le32(cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf);
163 		break;
164 	}
165 }
166 
spi_smartbond_read_discard(const struct device * dev)167 static inline void spi_smartbond_read_discard(const struct device *dev)
168 {
169 	const struct spi_smartbond_cfg *cfg = dev->config;
170 
171 	(void)cfg->regs->SPI_RX_TX_REG;
172 }
173 
spi_smartbond_set_speed(const struct spi_smartbond_cfg * cfg,const uint32_t frequency)174 static inline int spi_smartbond_set_speed(const struct spi_smartbond_cfg *cfg,
175 					  const uint32_t frequency)
176 {
177 	if (frequency < SCLK_FREQ_2MHZ) {
178 		LOG_ERR("Frequency is lower than minimal SCLK %d", SCLK_FREQ_2MHZ);
179 		return -ENOTSUP;
180 	} else if (frequency < SCLK_FREQ_4MHZ) {
181 		cfg->regs->SPI_CTRL_REG =
182 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) |
183 			3UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos;
184 	} else if (frequency < SCLK_FREQ_8MHZ) {
185 		cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk);
186 	} else if (frequency < SCLK_FREQ_16MHZ) {
187 		cfg->regs->SPI_CTRL_REG =
188 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) |
189 			1UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos;
190 	} else {
191 		cfg->regs->SPI_CTRL_REG =
192 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) |
193 			2UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos;
194 	}
195 	return 0;
196 }
197 
spi_smartbond_set_word_size(const struct spi_smartbond_cfg * cfg,struct spi_smartbond_data * data,const uint32_t operation)198 static inline int spi_smartbond_set_word_size(const struct spi_smartbond_cfg *cfg,
199 					      struct spi_smartbond_data *data,
200 					      const uint32_t operation)
201 {
202 	switch (SPI_WORD_SIZE_GET(operation)) {
203 	case 8:
204 		data->dfs = 1;
205 		cfg->regs->SPI_CTRL_REG =
206 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk);
207 		break;
208 	case 16:
209 		data->dfs = 2;
210 		cfg->regs->SPI_CTRL_REG =
211 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk) |
212 			(1UL << SPI_SPI_CTRL_REG_SPI_WORD_Pos);
213 		break;
214 	case 32:
215 		data->dfs = 4;
216 		cfg->regs->SPI_CTRL_REG =
217 			(cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk) |
218 			(2UL << SPI_SPI_CTRL_REG_SPI_WORD_Pos);
219 		break;
220 	default:
221 		LOG_ERR("Word size not supported");
222 		return -ENOTSUP;
223 	}
224 
225 	return 0;
226 }
227 
spi_smartbond_pm_policy_state_lock_get(const struct device * dev)228 static inline void spi_smartbond_pm_policy_state_lock_get(const struct device *dev)
229 {
230 #if defined(CONFIG_PM_DEVICE)
231 	/*
232 	 * Prevent the SoC from entering the normal sleep state as PDC does not support
233 	 * waking up the application core following SPI events.
234 	 */
235 	pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
236 	pm_device_runtime_get(dev);
237 #endif
238 }
239 
spi_smartbond_pm_policy_state_lock_put(const struct device * dev)240 static inline void spi_smartbond_pm_policy_state_lock_put(const struct device *dev)
241 {
242 #if defined(CONFIG_PM_DEVICE)
243 	pm_device_runtime_put(dev);
244 	/*
245 	 * Allow the SoC to enter the normal sleep state once SPI transactions are done.
246 	 */
247 	pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
248 #endif
249 }
250 
spi_smartbond_configure(const struct spi_smartbond_cfg * cfg,struct spi_smartbond_data * data,const struct spi_config * spi_cfg)251 static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg,
252 				   struct spi_smartbond_data *data,
253 				   const struct spi_config *spi_cfg)
254 {
255 	int rc;
256 
257 	if (spi_context_configured(&data->ctx, spi_cfg)) {
258 #ifdef CONFIG_PM_DEVICE
259 		spi_smartbond_enable(cfg, true);
260 #endif
261 		return 0;
262 	}
263 
264 	if (spi_cfg->operation & SPI_OP_MODE_SLAVE) {
265 		LOG_ERR("Slave mode not yet supported");
266 		return -ENOTSUP;
267 	}
268 
269 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
270 		LOG_ERR("Half-duplex not supported");
271 		return -ENOTSUP;
272 	}
273 
274 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
275 	    (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
276 		LOG_ERR("Only single line mode is supported");
277 		return -ENOTSUP;
278 	}
279 
280 	if (spi_cfg->operation & SPI_MODE_LOOP) {
281 		LOG_ERR("Loopback mode is not supported");
282 		return -ENOTSUP;
283 	}
284 
285 	if (spi_smartbond_isenabled(cfg)) {
286 		spi_smartbond_enable(cfg, false);
287 	}
288 
289 	rc = spi_smartbond_set_speed(cfg, spi_cfg->frequency);
290 	if (rc) {
291 		return rc;
292 	}
293 
294 	cfg->regs->SPI_CTRL_REG =
295 		(spi_cfg->operation & SPI_MODE_CPOL)
296 			? (cfg->regs->SPI_CTRL_REG | SPI_SPI_CTRL_REG_SPI_POL_Msk)
297 			: (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_POL_Msk);
298 
299 	cfg->regs->SPI_CTRL_REG =
300 		(spi_cfg->operation & SPI_MODE_CPHA)
301 			? (cfg->regs->SPI_CTRL_REG | SPI_SPI_CTRL_REG_SPI_PHA_Msk)
302 			: (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_PHA_Msk);
303 
304 	rc = spi_smartbond_set_word_size(cfg, data, spi_cfg->operation);
305 	if (rc) {
306 		return rc;
307 	}
308 
309 	cfg->regs->SPI_CTRL_REG &= ~(SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk);
310 
311 	spi_smartbond_enable(cfg, true);
312 
313 	cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk;
314 
315 	data->ctx.config = spi_cfg;
316 
317 	return 0;
318 }
319 
320 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
spi_smartbond_isr_set_status(const struct device * dev,bool status)321 static inline void spi_smartbond_isr_set_status(const struct device *dev, bool status)
322 {
323 	const struct spi_smartbond_cfg *cfg = dev->config;
324 
325 	if (status) {
326 		cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_MINT_Msk;
327 	} else {
328 		cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk;
329 	}
330 }
331 
spi_smartbond_is_busy(const struct device * dev)332 static inline bool spi_smartbond_is_busy(const struct device *dev)
333 {
334 	const struct spi_smartbond_cfg *cfg = dev->config;
335 
336 	return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_BUSY_Msk);
337 }
338 
spi_smartbond_clear_interrupt(const struct device * dev)339 static inline void spi_smartbond_clear_interrupt(const struct device *dev)
340 {
341 	const struct spi_smartbond_cfg *cfg = dev->config;
342 
343 	cfg->regs->SPI_CLEAR_INT_REG = 0x1;
344 }
345 
346 /* 0 = No RX data available, 1 = data has been transmitted and received */
spi_smartbond_is_rx_data(const struct device * dev)347 static inline bool spi_smartbond_is_rx_data(const struct device *dev)
348 {
349 	const struct spi_smartbond_cfg *cfg = dev->config;
350 
351 	return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk);
352 }
353 
spi_smartbond_get_fifo_mode(const struct device * dev)354 static inline uint8_t spi_smartbond_get_fifo_mode(const struct device *dev)
355 {
356 	const struct spi_smartbond_cfg *cfg = dev->config;
357 
358 	return ((cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) >>
359 			SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos);
360 }
361 
spi_smartbond_set_fifo_mode(const struct device * dev,enum spi_smartbond_fifo_mode mode)362 static void spi_smartbond_set_fifo_mode(const struct device *dev, enum spi_smartbond_fifo_mode mode)
363 {
364 	const struct spi_smartbond_cfg *cfg = dev->config;
365 	bool is_enabled = spi_smartbond_isenabled(cfg);
366 	enum spi_smartbond_fifo_mode current_mode = spi_smartbond_get_fifo_mode(dev);
367 	uint32_t spi_ctrl_reg = cfg->regs->SPI_CTRL_REG;
368 
369 #ifdef CONFIG_SPI_SMARTBOND_DMA
370 	struct spi_smartbond_data *data = dev->data;
371 #endif
372 
373 	if ((current_mode != mode)
374 #ifdef CONFIG_SPI_SMARTBOND_DMA
375 		|| (data->dfs == 4)
376 #endif
377 		) {
378 		if (current_mode != SPI_SMARTBOND_FIFO_MODE_RX_ONLY) {
379 			while (spi_smartbond_is_busy(dev)) {
380 				;
381 			}
382 		}
383 		/* Controller should be disabled when FIFO mode is updated */
384 		cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk;
385 
386 #ifdef CONFIG_SPI_SMARTBOND_DMA
387 		/*
388 		 * Workaround for the controller that cannot generate DMA requests
389 		 * for 4-byte bus length.
390 		 */
391 		if (data->dfs == 4) {
392 			mode = SPI_SMARTBOND_FIFO_NONE;
393 		}
394 #endif
395 		SPI_CTRL_REG_SET_FIELD(SPI_FIFO_MODE, spi_ctrl_reg, mode);
396 
397 
398 		if (mode != SPI_SMARTBOND_FIFO_NONE) {
399 			SPI_CTRL_REG_SET_FIELD(SPI_DMA_TXREQ_MODE, spi_ctrl_reg, 0);
400 		} else {
401 			SPI_CTRL_REG_SET_FIELD(SPI_DMA_TXREQ_MODE, spi_ctrl_reg, 1);
402 		}
403 
404 		if (is_enabled) {
405 			SPI_CTRL_REG_SET_FIELD(SPI_ON, spi_ctrl_reg, 1);
406 		}
407 
408 		cfg->regs->SPI_CTRL_REG = spi_ctrl_reg;
409 	}
410 }
411 
spi_smartbond_transfer_mode_get(const struct device * dev)412 static int spi_smartbond_transfer_mode_get(const struct device *dev)
413 {
414 	struct spi_smartbond_data *data = dev->data;
415 	struct spi_context *ctx = &data->ctx;
416 
417 	if (spi_context_rx_buf_on(ctx) || spi_context_tx_buf_on(ctx)) {
418 		/*
419 		 * Check only buffers' length as it might happen that current buffer is NULL.
420 		 * In such a case the context should be updated and a dummy write/read should
421 		 * take place.
422 		 */
423 		if (ctx->rx_len || ctx->tx_len) {
424 			spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX);
425 			return SPI_SMARTBOND_TRANSFER_TX_RX;
426 		}
427 
428 		if (!spi_context_rx_buf_on(ctx)) {
429 			spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_ONLY);
430 			return SPI_SMARTBOND_TRANSFER_TX_ONLY;
431 		}
432 
433 		if (!spi_context_tx_buf_on(ctx)) {
434 			/*
435 			 * Use the TX/RX mode with TX being dummy. Using the RX only mode
436 			 * is a bit tricky as the controller should generate clock cycles
437 			 * automatically and immediately after the ISR is enabled.
438 			 */
439 			spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX);
440 			return SPI_SMARTBOND_TRANSFER_RX_ONLY;
441 		}
442 	}
443 
444 	/* Return waiting updating the fifo mode */
445 	return SPI_SMARTBOND_TRANSFER_NONE;
446 }
447 
spi_smartbond_transfer_mode_check_and_update(const struct device * dev)448 static inline void spi_smartbond_transfer_mode_check_and_update(const struct device *dev)
449 {
450 	struct spi_smartbond_data *data = dev->data;
451 
452 	data->transfer_mode = spi_smartbond_transfer_mode_get(dev);
453 }
454 #endif
455 
456 #ifdef CONFIG_SPI_ASYNC
spi_smartbond_is_tx_full(const struct device * dev)457 static inline bool spi_smartbond_is_tx_full(const struct device *dev)
458 {
459 	const struct spi_smartbond_cfg *cfg = dev->config;
460 
461 	return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_TXH_Msk);
462 }
463 
spi_smartbond_write(const struct device * dev)464 static void spi_smartbond_write(const struct device *dev)
465 {
466 	struct spi_smartbond_data *data = dev->data;
467 	struct spi_context *ctx = &data->ctx;
468 
469 	while (spi_context_tx_buf_on(ctx)) {
470 		/* Check if TX FIFO is full as otherwise undefined data should be transmitted. */
471 		if (spi_smartbond_is_tx_full(dev)) {
472 			spi_smartbond_clear_interrupt(dev);
473 			break;
474 		}
475 		/* Send to TX FIFO and update buffer pointer. */
476 		spi_smartbond_write_word(dev);
477 		spi_context_update_tx(ctx, data->dfs, 1);
478 
479 		/*
480 		 * It might happen that a NULL buffer with a non-zero length is provided.
481 		 * In that case, the bytes should be consumed.
482 		 */
483 		if (ctx->rx_len && !ctx->rx_buf) {
484 			spi_smartbond_read_discard(dev);
485 			spi_context_update_rx(ctx, data->dfs, 1);
486 		}
487 	}
488 }
489 
spi_smartbond_transfer(const struct device * dev)490 static void spi_smartbond_transfer(const struct device *dev)
491 {
492 	struct spi_smartbond_data *data = dev->data;
493 	struct spi_context *ctx = &data->ctx;
494 
495 	while (data->rx_len) {
496 		/* Zero means that RX FIFO or register is empty */
497 		if (!spi_smartbond_is_rx_data(dev)) {
498 			break;
499 		}
500 
501 		if (ctx->rx_buf) {
502 			spi_smartbond_read_word(dev);
503 		} else {
504 			spi_smartbond_read_discard(dev);
505 		}
506 		spi_context_update_rx(ctx, data->dfs, 1);
507 
508 		spi_smartbond_clear_interrupt(dev);
509 
510 		data->rx_len--;
511 		data->transferred++;
512 	}
513 
514 	while (data->tx_len) {
515 		/* Check if TX FIFO is full as otherwise undefined data should be transmitted. */
516 		if (spi_smartbond_is_tx_full(dev)) {
517 			break;
518 		}
519 
520 		if (ctx->tx_buf) {
521 			spi_smartbond_write_word(dev);
522 		} else {
523 			spi_smartbond_write_dummy(dev);
524 		}
525 		spi_context_update_tx(ctx, data->dfs, 1);
526 
527 		data->tx_len--;
528 	}
529 }
530 
spi_smartbond_read(const struct device * dev)531 static void spi_smartbond_read(const struct device *dev)
532 {
533 	struct spi_smartbond_data *data = dev->data;
534 	struct spi_context *ctx = &data->ctx;
535 
536 	while (spi_context_rx_buf_on(ctx)) {
537 		/* Zero means that RX FIFO or register is empty */
538 		if (!spi_smartbond_is_rx_data(dev)) {
539 			break;
540 		}
541 
542 		spi_smartbond_read_word(dev);
543 		spi_context_update_rx(ctx, data->dfs, 1);
544 		spi_smartbond_clear_interrupt(dev);
545 	}
546 
547 	/* Perform dummy access to generate the required clock cycles */
548 	while (data->tx_len) {
549 		if (spi_smartbond_is_tx_full(dev)) {
550 			break;
551 		}
552 		spi_smartbond_write_dummy(dev);
553 
554 		data->tx_len--;
555 	}
556 }
557 
spi_smartbond_isr_trigger(const struct device * dev)558 static void spi_smartbond_isr_trigger(const struct device *dev)
559 {
560 	struct spi_smartbond_data *data = dev->data;
561 	struct spi_context *ctx = &data->ctx;
562 
563 	data->transfer_mode = spi_smartbond_transfer_mode_get(dev);
564 
565 	switch (data->transfer_mode) {
566 	case SPI_SMARTBOND_TRANSFER_RX_ONLY:
567 		data->tx_len = spi_context_total_rx_len(ctx);
568 		spi_smartbond_read(dev);
569 		break;
570 	case SPI_SMARTBOND_TRANSFER_TX_ONLY:
571 		spi_smartbond_write(dev);
572 		break;
573 	case SPI_SMARTBOND_TRANSFER_TX_RX:
574 		/*
575 		 * Each sub-transfer in the descriptor list should be exercised
576 		 * separately as it might happen that a buffer is NULL with
577 		 * non-zero length.
578 		 */
579 		data->rx_len = spi_context_max_continuous_chunk(ctx);
580 		data->tx_len = data->rx_len;
581 		spi_smartbond_transfer(dev);
582 		break;
583 	case SPI_SMARTBOND_TRANSFER_NONE:
584 		__fallthrough;
585 	default:
586 		__ASSERT_MSG_INFO("Invalid transfer mode");
587 		break;
588 	}
589 
590 	spi_smartbond_isr_set_status(dev, true);
591 }
592 
spi_smartbond_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)593 static int spi_smartbond_transceive_async(const struct device *dev,
594 					  const struct spi_config *spi_cfg,
595 					  const struct spi_buf_set *tx_bufs,
596 					  const struct spi_buf_set *rx_bufs, spi_callback_t cb,
597 					  void *userdata)
598 {
599 	const struct spi_smartbond_cfg *cfg = dev->config;
600 	struct spi_smartbond_data *data = dev->data;
601 	struct spi_context *ctx = &data->ctx;
602 	int rc;
603 
604 	spi_context_lock(ctx, true, cb, userdata, spi_cfg);
605 	spi_smartbond_pm_policy_state_lock_get(dev);
606 
607 	rc = spi_smartbond_configure(cfg, data, spi_cfg);
608 	if (rc == 0) {
609 		spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs);
610 		spi_context_cs_control(ctx, true);
611 
612 		/*
613 		 * PM constraints will be released within ISR once all transfers
614 		 * are exercised along with de-asserting the #CS line.
615 		 */
616 		spi_smartbond_isr_trigger(dev);
617 	}
618 	/*
619 	 * Context will actually be released when \sa spi_context_complete
620 	 * is called.
621 	 */
622 	spi_context_release(ctx, rc);
623 
624 	return rc;
625 }
626 #endif
627 
628 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
spi_smartbond_isr(void * args)629 static void spi_smartbond_isr(void *args)
630 {
631 #ifdef CONFIG_SPI_ASYNC
632 	struct device *dev = args;
633 	struct spi_smartbond_data *data = dev->data;
634 	struct spi_context *ctx = &data->ctx;
635 
636 	switch (data->transfer_mode) {
637 	case SPI_SMARTBOND_TRANSFER_RX_ONLY:
638 		spi_smartbond_read(dev);
639 		break;
640 	case SPI_SMARTBOND_TRANSFER_TX_ONLY:
641 		spi_smartbond_write(dev);
642 		break;
643 	case SPI_SMARTBOND_TRANSFER_TX_RX:
644 		/* Exersice the type of the next sub-transfer */
645 		if (!data->rx_len && !data->tx_len) {
646 			spi_smartbond_transfer_mode_check_and_update(dev);
647 
648 			if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_RX_ONLY) {
649 				data->tx_len = spi_context_total_rx_len(ctx) - data->transferred;
650 				/* Clear in case another truncated transfer should be executed */
651 				data->transferred = 0;
652 				spi_smartbond_read(dev);
653 			} else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_ONLY) {
654 				spi_smartbond_write(dev);
655 			} else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_RX) {
656 				data->rx_len = spi_context_max_continuous_chunk(ctx);
657 				data->tx_len = data->rx_len;
658 				spi_smartbond_transfer(dev);
659 			}
660 		} else {
661 			spi_smartbond_transfer(dev);
662 		}
663 		break;
664 	case SPI_SMARTBOND_TRANSFER_NONE:
665 		__fallthrough;
666 	default:
667 		__ASSERT_MSG_INFO("Invalid transfer mode");
668 		break;
669 	}
670 
671 	/* All buffers have been exercised, signal completion */
672 	if (!spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx)) {
673 		spi_smartbond_isr_set_status(dev, false);
674 
675 		/* Mark completion to trigger callback function */
676 		spi_context_complete(ctx, dev, 0);
677 
678 		spi_context_cs_control(ctx, false);
679 		spi_smartbond_pm_policy_state_lock_put(data);
680 	}
681 #endif
682 }
683 #endif
684 
685 #ifdef CONFIG_SPI_SMARTBOND_DMA
686 static uint32_t spi_smartbond_read_dummy_buf;
687 
688 /*
689  * Should be used to flush the RX FIFO in case a transaction is requested
690  * with NULL pointer and non-zero length. In such a case, data will be
691  * shifted into the RX FIFO (regardless of whether or not the RX mode is
692  * disabled) which should then be flushed. Otherwise, a next read operation
693  * will result in fetching old bytes.
694  */
spi_smartbond_flush_rx_fifo(const struct device * dev)695 static void spi_smartbond_flush_rx_fifo(const struct device *dev)
696 {
697 	while (spi_smartbond_is_busy(dev)) {
698 	};
699 	while (spi_smartbond_is_rx_data(dev)) {
700 		spi_smartbond_read_discard(dev);
701 		spi_smartbond_clear_interrupt(dev);
702 	}
703 }
704 
spi_smartbond_dma_tx_channel_request(const struct device * dev)705 static int spi_smartbond_dma_tx_channel_request(const struct device *dev)
706 {
707 	struct spi_smartbond_data *data = dev->data;
708 	const struct spi_smartbond_cfg *config = dev->config;
709 
710 	if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag,
711 		SPI_SMARTBOND_DMA_TX_CHANNEL)) {
712 		if (dma_request_channel(config->tx_dma_ctrl, (void *)&config->tx_dma_chan) < 0) {
713 			atomic_clear_bit(data->dma_channel_atomic_flag,
714 				SPI_SMARTBOND_DMA_TX_CHANNEL);
715 			return -EIO;
716 		}
717 	}
718 
719 	return 0;
720 }
721 
722 #ifdef CONFIG_PM_DEVICE
spi_smartbond_dma_tx_channel_release(const struct device * dev)723 static void spi_smartbond_dma_tx_channel_release(const struct device *dev)
724 {
725 	struct spi_smartbond_data *data = dev->data;
726 	const struct spi_smartbond_cfg *config = dev->config;
727 
728 	if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag,
729 		SPI_SMARTBOND_DMA_TX_CHANNEL)) {
730 		dma_release_channel(config->tx_dma_ctrl, config->tx_dma_chan);
731 	}
732 }
733 #endif
734 
spi_smartbond_dma_rx_channel_request(const struct device * dev)735 static int spi_smartbond_dma_rx_channel_request(const struct device *dev)
736 {
737 	struct spi_smartbond_data *data = dev->data;
738 	const struct spi_smartbond_cfg *config = dev->config;
739 
740 	if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag,
741 		SPI_SMARTBOND_DMA_RX_CHANNEL)) {
742 		if (dma_request_channel(config->rx_dma_ctrl, (void *)&config->rx_dma_chan) < 0) {
743 			atomic_clear_bit(data->dma_channel_atomic_flag,
744 				SPI_SMARTBOND_DMA_RX_CHANNEL);
745 			return -EIO;
746 		}
747 	}
748 
749 	return 0;
750 }
751 
752 #ifdef CONFIG_PM_DEVICE
spi_smartbond_dma_rx_channel_release(const struct device * dev)753 static void spi_smartbond_dma_rx_channel_release(const struct device *dev)
754 {
755 	struct spi_smartbond_data *data = dev->data;
756 	const struct spi_smartbond_cfg *config = dev->config;
757 
758 	if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag,
759 		SPI_SMARTBOND_DMA_RX_CHANNEL)) {
760 		dma_release_channel(config->rx_dma_ctrl, config->rx_dma_chan);
761 	}
762 }
763 #endif
764 
spi_smartbond_tx_dma_cb(const struct device * dma,void * arg,uint32_t id,int status)765 static void spi_smartbond_tx_dma_cb(const struct device *dma, void *arg,
766 		uint32_t id, int status)
767 {
768 	const struct device *dev = arg;
769 	struct spi_smartbond_data *data = dev->data;
770 	struct spi_context *ctx = &data->ctx;
771 
772 	if (status < 0) {
773 		LOG_WRN("DMA transfer did not complete");
774 	}
775 
776 	spi_context_update_tx(ctx, data->dfs, data->tx_len);
777 	k_sem_give(&data->tx_dma_sync);
778 }
779 
spi_smartbond_rx_dma_cb(const struct device * dma,void * arg,uint32_t id,int status)780 static void spi_smartbond_rx_dma_cb(const struct device *dma, void *arg,
781 		uint32_t id, int status)
782 {
783 	const struct device *dev = arg;
784 	struct spi_smartbond_data *data = dev->data;
785 	struct spi_context *ctx = &data->ctx;
786 
787 	if (status < 0) {
788 		LOG_WRN("DMA transfer did not complete");
789 	}
790 
791 	spi_context_update_rx(ctx, data->dfs, data->rx_len);
792 	k_sem_give(&data->rx_dma_sync);
793 }
794 
795 #ifdef CONFIG_PM_DEVICE
spi_smartbond_dma_deconfig(const struct device * dev)796 static void spi_smartbond_dma_deconfig(const struct device *dev)
797 {
798 	const struct spi_smartbond_cfg *config = dev->config;
799 
800 	if (config->rx_dma_ctrl && config->tx_dma_ctrl) {
801 		dma_stop(config->rx_dma_ctrl, config->rx_dma_chan);
802 		dma_stop(config->tx_dma_ctrl, config->tx_dma_chan);
803 
804 		spi_smartbond_dma_rx_channel_release(dev);
805 		spi_smartbond_dma_tx_channel_release(dev);
806 	}
807 }
808 #endif
809 
spi_smartbond_dma_config(const struct device * dev)810 static int spi_smartbond_dma_config(const struct device *dev)
811 {
812 	struct spi_smartbond_data *data = dev->data;
813 	const struct spi_smartbond_cfg *config = dev->config;
814 	struct dma_config *tx = &data->tx_dma_cfg;
815 	struct dma_config *rx = &data->rx_dma_cfg;
816 	struct dma_block_config *tx_block = &data->tx_dma_block_cfg;
817 	struct dma_block_config *rx_block = &data->rx_dma_block_cfg;
818 
819 	/*
820 	 * DMA RX should be assigned an even number and
821 	 * DMA TX should be assigned the right next
822 	 * channel (odd number).
823 	 */
824 	if (!(config->tx_dma_chan & 0x1) ||
825 			(config->rx_dma_chan & 0x1) ||
826 			(config->tx_dma_chan != (config->rx_dma_chan + 1))) {
827 		LOG_ERR("Invalid RX/TX channel selection");
828 		return -EINVAL;
829 	}
830 
831 	if (config->tx_slot_mux != config->rx_slot_mux) {
832 		LOG_ERR("TX/RX DMA slots mismatch");
833 		return -EINVAL;
834 	}
835 
836 	if (!device_is_ready(config->tx_dma_ctrl) ||
837 		!device_is_ready(config->rx_dma_ctrl)) {
838 		LOG_ERR("TX/RX DMA device is not ready");
839 		return -ENODEV;
840 	}
841 
842 	if (spi_smartbond_dma_tx_channel_request(dev) < 0) {
843 		LOG_ERR("TX DMA channel is already occupied");
844 		return -EIO;
845 	}
846 
847 	if (spi_smartbond_dma_rx_channel_request(dev) < 0) {
848 		LOG_ERR("RX DMA channel is already occupied");
849 		return -EIO;
850 	}
851 
852 	tx->channel_direction = MEMORY_TO_PERIPHERAL;
853 	tx->dma_callback = spi_smartbond_tx_dma_cb;
854 	tx->user_data = (void *)dev;
855 	tx->block_count = 1;
856 	tx->head_block = &data->tx_dma_block_cfg;
857 	tx->error_callback_dis = 1;
858 	tx->dma_slot = config->tx_slot_mux;
859 	tx->channel_priority = 2;
860 
861 	/* Burst mode is not using when DREQ is one */
862 	tx->source_burst_length = 1;
863 	tx->dest_burst_length = 1;
864 	/* Source and destination data size should reflect DFS value */
865 	tx->source_data_size = 0;
866 	tx->dest_data_size = 0;
867 
868 	/* Do not change */
869 	tx_block->dest_addr_adj = 0x2;
870 	/* Incremental */
871 	tx_block->source_addr_adj = 0x0;
872 	tx_block->dest_address = (uint32_t)&config->regs->SPI_RX_TX_REG;
873 
874 	/*
875 	 * To be filled when a transaction is requested and
876 	 * should reflect the total number of bytes.
877 	 */
878 	tx_block->block_size = 0;
879 	/* Should reflect the TX buffer */
880 	tx_block->source_address = 0;
881 
882 	rx->channel_direction = PERIPHERAL_TO_MEMORY;
883 	rx->dma_callback = spi_smartbond_rx_dma_cb;
884 	rx->user_data = (void *)dev;
885 	rx->block_count = 1;
886 	rx->head_block = &data->rx_dma_block_cfg;
887 	rx->error_callback_dis = 1;
888 	rx->dma_slot = config->rx_slot_mux;
889 	rx->channel_priority = 2;
890 
891 	/* Burst mode is not using when DREQ is one */
892 	rx->source_burst_length = 1;
893 	rx->dest_burst_length = 1;
894 	/* Source and destination data size should reflect DFS value */
895 	rx->source_data_size = 0;
896 	rx->dest_data_size = 0;
897 
898 	/* Do not change */
899 	rx_block->source_addr_adj = 0x2;
900 	/* Incremenetal */
901 	rx_block->dest_addr_adj = 0x0;
902 	rx_block->source_address = (uint32_t)&config->regs->SPI_RX_TX_REG;
903 
904 	/*
905 	 * To be filled when a transaction is requested and
906 	 * should reflect the total number of bytes.
907 	 */
908 	rx_block->block_size = 0;
909 	/* Should reflect the RX buffer */
910 	rx_block->dest_address = 0;
911 
912 	return 0;
913 }
914 
spi_smartbond_dma_trigger(const struct device * dev)915 static int spi_smartbond_dma_trigger(const struct device *dev)
916 {
917 	struct spi_smartbond_data *data = dev->data;
918 	const struct spi_smartbond_cfg *config = dev->config;
919 	struct spi_context *ctx = &data->ctx;
920 	struct dma_config *tx = &data->tx_dma_cfg;
921 	struct dma_config *rx = &data->rx_dma_cfg;
922 	struct dma_block_config *tx_block = &data->tx_dma_block_cfg;
923 	struct dma_block_config *rx_block = &data->rx_dma_block_cfg;
924 
925 	rx->source_data_size = data->dfs;
926 	rx->dest_data_size = data->dfs;
927 	tx->source_data_size = data->dfs;
928 	tx->dest_data_size = data->dfs;
929 
930 	data->transfer_mode = spi_smartbond_transfer_mode_get(dev);
931 	do {
932 		switch (data->transfer_mode) {
933 		case SPI_SMARTBOND_TRANSFER_RX_ONLY:
934 			spi_smartbond_flush_rx_fifo(dev);
935 
936 			data->rx_len = spi_context_max_continuous_chunk(ctx);
937 			data->tx_len = data->rx_len;
938 
939 			rx_block->block_size = data->rx_len * data->dfs;
940 			tx_block->block_size = rx_block->block_size;
941 
942 			rx_block->dest_address = (uint32_t)ctx->rx_buf;
943 			rx_block->dest_addr_adj = 0x0;
944 			tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf;
945 			/* Non-incremental */
946 			tx_block->source_addr_adj = 0x2;
947 
948 			if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) {
949 				LOG_ERR("TX DMA configuration failed");
950 				return -EINVAL;
951 			}
952 			if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) {
953 				LOG_ERR("RX DMA configuration failed");
954 				return -EINVAL;
955 			}
956 			dma_start(config->rx_dma_ctrl, config->rx_dma_chan);
957 			dma_start(config->tx_dma_ctrl, config->tx_dma_chan);
958 
959 			/* Wait for the current DMA transfer to complete */
960 			k_sem_take(&data->tx_dma_sync, K_FOREVER);
961 			k_sem_take(&data->rx_dma_sync, K_FOREVER);
962 			break;
963 		case SPI_SMARTBOND_TRANSFER_TX_ONLY:
964 			spi_smartbond_flush_rx_fifo(dev);
965 
966 			data->tx_len = spi_context_max_continuous_chunk(ctx);
967 			data->rx_len = data->tx_len;
968 
969 			tx_block->block_size = data->tx_len * data->dfs;
970 			tx_block->source_address = (uint32_t)ctx->tx_buf;
971 			tx_block->source_addr_adj = 0x0;
972 
973 			if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) {
974 				LOG_ERR("TX DMA configuration failed");
975 				return -EINVAL;
976 			}
977 			dma_start(config->tx_dma_ctrl, config->tx_dma_chan);
978 
979 			/* Wait for the current DMA transfer to complete */
980 			k_sem_take(&data->tx_dma_sync, K_FOREVER);
981 			break;
982 		case SPI_SMARTBOND_TRANSFER_TX_RX:
983 			spi_smartbond_flush_rx_fifo(dev);
984 
985 			data->rx_len = spi_context_max_continuous_chunk(ctx);
986 			data->tx_len = data->rx_len;
987 			/*
988 			 * DMA block size represents total number of bytes whilist,
989 			 * context length is divided by the data size (dfs).
990 			 */
991 			tx_block->block_size = data->tx_len * data->dfs;
992 			rx_block->block_size = tx_block->block_size;
993 
994 			if (ctx->tx_buf) {
995 				tx_block->source_address = (uint32_t)ctx->tx_buf;
996 				tx_block->source_addr_adj = 0x0;
997 			} else {
998 				tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf;
999 				tx_block->source_addr_adj = 0x2;
1000 			}
1001 
1002 			if (ctx->rx_buf) {
1003 				rx_block->dest_address = (uint32_t)ctx->rx_buf;
1004 				rx_block->dest_addr_adj = 0x0;
1005 			} else {
1006 				rx_block->dest_address = (uint32_t)&spi_smartbond_read_dummy_buf;
1007 				rx_block->dest_addr_adj = 0x2;
1008 			}
1009 
1010 			if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) {
1011 				LOG_ERR("TX DMA configuration failed");
1012 				return -EINVAL;
1013 			}
1014 			if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) {
1015 				LOG_ERR("RX DMA configuration failed");
1016 				return -EINVAL;
1017 			}
1018 			dma_start(config->rx_dma_ctrl, config->rx_dma_chan);
1019 			dma_start(config->tx_dma_ctrl, config->tx_dma_chan);
1020 
1021 			k_sem_take(&data->tx_dma_sync, K_FOREVER);
1022 			k_sem_take(&data->rx_dma_sync, K_FOREVER);
1023 
1024 			/*
1025 			 * Regardless of whether or not the RX FIFO is enabled, received
1026 			 * bytes are pushed into it. As such, the RXI FIFO should be
1027 			 * flushed so that a next read access retrives the correct bytes
1028 			 * and not old ones.
1029 			 */
1030 			if (!ctx->rx_buf) {
1031 				spi_smartbond_flush_rx_fifo(dev);
1032 			}
1033 			break;
1034 		case SPI_SMARTBOND_TRANSFER_NONE:
1035 			__fallthrough;
1036 		default:
1037 			__ASSERT_MSG_INFO("Invalid transfer mode");
1038 			break;
1039 		}
1040 
1041 		spi_smartbond_transfer_mode_check_and_update(dev);
1042 	} while (data->transfer_mode != SPI_SMARTBOND_TRANSFER_NONE);
1043 
1044 	return 0;
1045 }
1046 #endif
1047 
spi_smartbond_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)1048 static int spi_smartbond_transceive(const struct device *dev, const struct spi_config *spi_cfg,
1049 				    const struct spi_buf_set *tx_bufs,
1050 				    const struct spi_buf_set *rx_bufs)
1051 {
1052 	const struct spi_smartbond_cfg *cfg = dev->config;
1053 	struct spi_smartbond_data *data = dev->data;
1054 	struct spi_context *ctx = &data->ctx;
1055 	int rc;
1056 
1057 	spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg);
1058 	spi_smartbond_pm_policy_state_lock_get(dev);
1059 
1060 	rc = spi_smartbond_configure(cfg, data, spi_cfg);
1061 	if (rc == 0) {
1062 		spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs);
1063 		spi_context_cs_control(ctx, true);
1064 
1065 #ifdef CONFIG_SPI_SMARTBOND_DMA
1066 		rc = spi_smartbond_dma_trigger(dev);
1067 		/* Mark completion to trigger callback function */
1068 		spi_context_complete(ctx, dev, 0);
1069 #else
1070 		while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) {
1071 			if (spi_context_tx_buf_on(ctx)) {
1072 				spi_smartbond_write_word(dev);
1073 				spi_context_update_tx(ctx, data->dfs, 1);
1074 			} else {
1075 				spi_smartbond_write_dummy(dev);
1076 			}
1077 
1078 			while (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk)) {
1079 			};
1080 			if (spi_context_rx_buf_on(ctx)) {
1081 				spi_smartbond_read_word(dev);
1082 				spi_context_update_rx(ctx, data->dfs, 1);
1083 			} else {
1084 				spi_smartbond_read_discard(dev);
1085 				/*
1086 				 * It might happen that a NULL buffer with a non-zero length
1087 				 * is provided. In that case, the bytes should be consumed.
1088 				 */
1089 				if (ctx->rx_len) {
1090 					spi_context_update_rx(ctx, data->dfs, 1);
1091 				}
1092 			}
1093 			cfg->regs->SPI_CLEAR_INT_REG = 1UL;
1094 		}
1095 #endif
1096 
1097 		spi_context_cs_control(ctx, false);
1098 	}
1099 	spi_context_release(&data->ctx, rc);
1100 
1101 	spi_smartbond_pm_policy_state_lock_put(dev);
1102 
1103 	return rc;
1104 }
1105 
spi_smartbond_release(const struct device * dev,const struct spi_config * spi_cfg)1106 static int spi_smartbond_release(const struct device *dev, const struct spi_config *spi_cfg)
1107 {
1108 	struct spi_smartbond_data *data = dev->data;
1109 	struct spi_context *ctx = &data->ctx;
1110 
1111 	if (!spi_context_configured(ctx, spi_cfg)) {
1112 		LOG_ERR("SPI configuration was not the last one to be used");
1113 		return -EINVAL;
1114 	}
1115 
1116 	spi_context_unlock_unconditionally(ctx);
1117 
1118 	return 0;
1119 }
1120 
1121 static DEVICE_API(spi, spi_smartbond_driver_api) = {
1122 	.transceive = spi_smartbond_transceive,
1123 #ifdef CONFIG_SPI_ASYNC
1124 	.transceive_async = spi_smartbond_transceive_async,
1125 #endif
1126 #ifdef CONFIG_SPI_RTIO
1127 	.iodev_submit = spi_rtio_iodev_default_submit,
1128 #endif
1129 	.release = spi_smartbond_release,
1130 };
1131 
spi_smartbond_resume(const struct device * dev)1132 static int spi_smartbond_resume(const struct device *dev)
1133 {
1134 	const struct spi_smartbond_cfg *cfg = dev->config;
1135 	struct spi_smartbond_data *data = dev->data;
1136 	int rc;
1137 
1138 	CRG_COM->RESET_CLK_COM_REG = cfg->periph_clock_config << 1;
1139 	CRG_COM->SET_CLK_COM_REG = cfg->periph_clock_config;
1140 
1141 	rc = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1142 	if (rc < 0) {
1143 		LOG_ERR("Failed to configure SPI pins");
1144 		return rc;
1145 	}
1146 
1147 	rc = spi_context_cs_configure_all(&data->ctx);
1148 	if (rc < 0) {
1149 		LOG_ERR("Failed to configure CS pins: %d", rc);
1150 		return rc;
1151 	}
1152 
1153 #ifdef CONFIG_SPI_SMARTBOND_DMA
1154 	rc = spi_smartbond_dma_config(dev);
1155 	if (rc < 0) {
1156 		LOG_ERR("Failed to configure DMA");
1157 		return rc;
1158 	}
1159 #endif
1160 
1161 	spi_context_unlock_unconditionally(&data->ctx);
1162 
1163 	return 0;
1164 }
1165 
1166 #if defined(CONFIG_PM_DEVICE)
spi_smartbond_suspend(const struct device * dev)1167 static int spi_smartbond_suspend(const struct device *dev)
1168 {
1169 	int ret;
1170 	const struct spi_smartbond_cfg *config = dev->config;
1171 	struct spi_smartbond_data *data = dev->data;
1172 
1173 	data->spi_ctrl_reg = config->regs->SPI_CTRL_REG;
1174 	/* Disable the SPI digital block */
1175 	config->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_EN_CTRL_Msk;
1176 	/* Gate SPI clocking */
1177 	CRG_COM->RESET_CLK_COM_REG = config->periph_clock_config;
1178 
1179 	ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
1180 	if (ret < 0) {
1181 		LOG_WRN("Failed to configure the SPI pins to inactive state");
1182 	}
1183 
1184 #ifdef CONFIG_SPI_SMARTBOND_DMA
1185 	spi_smartbond_dma_deconfig(dev);
1186 #endif
1187 
1188 	return ret;
1189 }
1190 
spi_smartbond_pm_action(const struct device * dev,enum pm_device_action action)1191 static int spi_smartbond_pm_action(const struct device *dev,
1192 				   enum pm_device_action action)
1193 {
1194 	int ret;
1195 
1196 	switch (action) {
1197 	case PM_DEVICE_ACTION_RESUME:
1198 		da1469x_pd_acquire(MCU_PD_DOMAIN_COM);
1199 		ret = spi_smartbond_resume(dev);
1200 		break;
1201 	case PM_DEVICE_ACTION_SUSPEND:
1202 		ret = spi_smartbond_suspend(dev);
1203 		da1469x_pd_release(MCU_PD_DOMAIN_COM);
1204 		break;
1205 	default:
1206 		ret = -ENOTSUP;
1207 	}
1208 
1209 	return ret;
1210 }
1211 #endif
1212 
1213 #define SPI_SMARTBOND_ISR_CONNECT \
1214 		IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi)), DT_IRQ(DT_NODELABEL(spi), priority), \
1215 			spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi)), 0); \
1216 		irq_enable(DT_IRQN(DT_NODELABEL(spi)));
1217 
1218 #define SPI2_SMARTBOND_ISR_CONNECT \
1219 		IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi2)), DT_IRQ(DT_NODELABEL(spi2), priority), \
1220 			spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi2)), 0); \
1221 		irq_enable(DT_IRQN(DT_NODELABEL(spi2)));
1222 
1223 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
spi_smartbond_isr_connect(const struct device * dev)1224 static int spi_smartbond_isr_connect(const struct device *dev)
1225 {
1226 	const struct spi_smartbond_cfg *cfg = dev->config;
1227 
1228 	switch ((uint32_t)cfg->regs) {
1229 	case (uint32_t)SPI:
1230 		COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(spi)),
1231 			(SPI_SMARTBOND_ISR_CONNECT), (NULL));
1232 		break;
1233 	case (uint32_t)SPI2:
1234 		COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(spi2)),
1235 			(SPI2_SMARTBOND_ISR_CONNECT), (NULL));
1236 		break;
1237 	default:
1238 		return -EINVAL;
1239 	}
1240 
1241 	return 0;
1242 }
1243 #endif
1244 
spi_smartbond_init(const struct device * dev)1245 static int spi_smartbond_init(const struct device *dev)
1246 {
1247 	int ret;
1248 	struct spi_smartbond_data *data = dev->data;
1249 
1250 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
1251 	data->transfer_mode = SPI_SMARTBOND_TRANSFER_NONE;
1252 #endif
1253 #ifdef CONFIG_SPI_SMARTBOND_DMA
1254 	k_sem_init(&data->tx_dma_sync, 0, 1);
1255 	k_sem_init(&data->rx_dma_sync, 0, 1);
1256 #endif
1257 
1258 #ifdef CONFIG_PM_DEVICE_RUNTIME
1259 	/* Make sure device state is marked as suspended */
1260 	pm_device_init_suspended(dev);
1261 
1262 	ret = pm_device_runtime_enable(dev);
1263 
1264 #else
1265 	da1469x_pd_acquire(MCU_PD_DOMAIN_COM);
1266 	ret = spi_smartbond_resume(dev);
1267 #endif
1268 	spi_context_unlock_unconditionally(&data->ctx);
1269 
1270 #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA)
1271 	ret = spi_smartbond_isr_connect(dev);
1272 #endif
1273 
1274 	return ret;
1275 }
1276 
1277 #ifdef CONFIG_SPI_SMARTBOND_DMA
1278 #define SPI_SMARTBOND_DMA_TX_INIT(id) \
1279 	.tx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \
1280 	.tx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, tx, config), \
1281 	.tx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)),
1282 #else
1283 #define SPI_SMARTBOND_DMA_TX_INIT(id)
1284 #endif
1285 
1286 #ifdef CONFIG_SPI_SMARTBOND_DMA
1287 #define SPI_SMARTBOND_DMA_RX_INIT(id) \
1288 	.rx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, rx, channel),	\
1289 	.rx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, rx, config),	\
1290 	.rx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)),
1291 #else
1292 #define SPI_SMARTBOND_DMA_RX_INIT(id)
1293 #endif
1294 
1295 #ifdef CONFIG_SPI_SMARTBOND_DMA
1296 #define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) \
1297 	.tx_dma_chan = 255, \
1298 	.tx_slot_mux = 255, \
1299 	.tx_dma_ctrl = NULL,
1300 #else
1301 #define SPI_SMARTBOND_DMA_TX_INVALIDATE(id)
1302 #endif
1303 
1304 #ifdef CONFIG_SPI_SMARTBOND_DMA
1305 #define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) \
1306 	.rx_dma_chan = 255, \
1307 	.rx_slot_mux = 255, \
1308 	.rx_dma_ctrl = NULL,
1309 #else
1310 #define SPI_SMARTBOND_DMA_RX_INVALIDATE(id)
1311 #endif
1312 
1313 #define SPI_SMARTBOND_DEVICE(id)                                                                   \
1314 	PINCTRL_DT_INST_DEFINE(id);                                                                \
1315 	static const struct spi_smartbond_cfg spi_smartbond_##id##_cfg = {                         \
1316 		.regs = (SPI_Type *)DT_INST_REG_ADDR(id),                                          \
1317 		.periph_clock_config = DT_INST_PROP(id, periph_clock_config),                      \
1318 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),                                        \
1319 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, tx),	\
1320 			(SPI_SMARTBOND_DMA_TX_INIT(id)),	\
1321 			(SPI_SMARTBOND_DMA_TX_INVALIDATE(id))) \
1322 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, rx), \
1323 			(SPI_SMARTBOND_DMA_RX_INIT(id)),	\
1324 			(SPI_SMARTBOND_DMA_RX_INVALIDATE(id)))	\
1325 	};                                                                                         \
1326 	static struct spi_smartbond_data spi_smartbond_##id##_data = {                             \
1327 		SPI_CONTEXT_INIT_LOCK(spi_smartbond_##id##_data, ctx),                             \
1328 		SPI_CONTEXT_INIT_SYNC(spi_smartbond_##id##_data, ctx),                             \
1329 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)};                            \
1330 	PM_DEVICE_DT_INST_DEFINE(id, spi_smartbond_pm_action);                                     \
1331 	SPI_DEVICE_DT_INST_DEFINE(id,                                                              \
1332 			      spi_smartbond_init,                                                  \
1333 			      PM_DEVICE_DT_INST_GET(id),                                           \
1334 			      &spi_smartbond_##id##_data,                                          \
1335 			      &spi_smartbond_##id##_cfg,                                           \
1336 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,                               \
1337 			      &spi_smartbond_driver_api);
1338 
1339 DT_INST_FOREACH_STATUS_OKAY(SPI_SMARTBOND_DEVICE)
1340