1 /*
2  * Copyright (c) 2024 Daikin Comfort Technologies North America, Inc.
3  * Copyright (c) 2025 Silicon Laboratories Inc.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT silabs_eusart_spi
9 
10 #include <stdbool.h>
11 #include <stddef.h>
12 
13 #include <zephyr/sys/sys_io.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/device.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/drivers/spi.h>
18 #include <zephyr/drivers/clock_control.h>
19 #include <zephyr/drivers/clock_control/clock_control_silabs.h>
20 #include <zephyr/drivers/pinctrl.h>
21 #include <zephyr/drivers/dma/dma_silabs_ldma.h>
22 #include <zephyr/drivers/dma.h>
23 #include <zephyr/pm/device.h>
24 #include <zephyr/pm/policy.h>
25 #include <zephyr/pm/device_runtime.h>
26 #include <em_cmu.h>
27 #include <em_eusart.h>
28 
29 LOG_MODULE_REGISTER(spi_silabs_eusart, CONFIG_SPI_LOG_LEVEL);
30 
31 /* Required by spi_context.h */
32 #include "spi_context.h"
33 
34 #if defined(CONFIG_SPI_ASYNC) && !defined(CONFIG_SPI_SILABS_EUSART_DMA)
35 #warning "Silabs eusart SPI driver ASYNC without DMA is not supported"
36 #endif
37 
38 #define SPI_WORD_SIZE 8
39 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
40 #define SPI_DMA_MAX_DESCRIPTOR_TRANSFER_SIZE (0x800U)
41 
42 struct dma_channel {
43 	const struct device *dma_dev;
44 	uint8_t dma_slot;
45 	int chan_nb;
46 	struct dma_block_config dma_descriptors[CONFIG_SPI_SILABS_EUSART_DMA_MAX_BLOCKS];
47 };
48 #endif
49 
50 struct spi_silabs_eusart_data {
51 	struct spi_context ctx;
52 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
53 	struct dma_channel dma_chan_rx;
54 	struct dma_channel dma_chan_tx;
55 #endif
56 };
57 
58 struct spi_silabs_eusart_config {
59 	EUSART_TypeDef *base;
60 	const struct device *clock_dev;
61 	const struct silabs_clock_control_cmu_config clock_cfg;
62 	uint32_t clock_frequency;
63 	const struct pinctrl_dev_config *pcfg;
64 	uint8_t mosi_overrun;
65 };
66 
67 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
68 static volatile uint8_t empty_buffer;
69 #endif
70 
spi_silabs_eusart_is_dma_enabled_instance(const struct device * dev)71 static bool spi_silabs_eusart_is_dma_enabled_instance(const struct device *dev)
72 {
73 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
74 	struct spi_silabs_eusart_data *data = dev->data;
75 
76 	__ASSERT_NO_MSG(!!data->dma_chan_tx.dma_dev == !!data->dma_chan_rx.dma_dev);
77 
78 	return data->dma_chan_rx.dma_dev != NULL;
79 #else
80 	return false;
81 #endif
82 }
83 
spi_silabs_eusart_configure(const struct device * dev,const struct spi_config * config)84 static int spi_silabs_eusart_configure(const struct device *dev, const struct spi_config *config)
85 {
86 	struct spi_silabs_eusart_data *data = dev->data;
87 	const struct spi_silabs_eusart_config *eusart_cfg = dev->config;
88 	uint32_t spi_frequency;
89 
90 	EUSART_SpiAdvancedInit_TypeDef eusartAdvancedSpiInit = EUSART_SPI_ADVANCED_INIT_DEFAULT;
91 	EUSART_SpiInit_TypeDef eusartInit = EUSART_SPI_MASTER_INIT_DEFAULT_HF;
92 
93 	int err;
94 
95 	if (spi_context_configured(&data->ctx, config)) {
96 		/* Already configured. No need to do it again, but must re-enable in case
97 		 * TXEN/RXEN were cleared.
98 		 */
99 		EUSART_Enable(eusart_cfg->base, eusartEnable);
100 
101 		return 0;
102 	}
103 
104 	err = clock_control_get_rate(eusart_cfg->clock_dev,
105 				     (clock_control_subsys_t)&eusart_cfg->clock_cfg,
106 				     &spi_frequency);
107 	if (err) {
108 		return err;
109 	}
110 	/* Max supported SPI frequency is half the source clock */
111 	spi_frequency /= 2;
112 
113 	if (config->operation & SPI_HALF_DUPLEX) {
114 		LOG_ERR("Half-duplex not supported");
115 		return -ENOTSUP;
116 	}
117 
118 	if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) {
119 		LOG_ERR("Word size must be %d", SPI_WORD_SIZE);
120 		return -ENOTSUP;
121 	}
122 
123 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
124 	    (config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
125 		LOG_ERR("Only supports single mode");
126 		return -ENOTSUP;
127 	}
128 
129 	if (config->operation & SPI_OP_MODE_SLAVE) {
130 		LOG_ERR("Slave mode not supported");
131 		return -ENOTSUP;
132 	}
133 
134 	/* Set frequency to the minimum of what the device supports, what the
135 	 * user has configured the controller to, and the max frequency for the
136 	 * transaction.
137 	 */
138 	if (eusart_cfg->clock_frequency > spi_frequency) {
139 		LOG_ERR("SPI clock-frequency too high");
140 		return -EINVAL;
141 	}
142 	spi_frequency = MIN(eusart_cfg->clock_frequency, spi_frequency);
143 	if (config->frequency) {
144 		spi_frequency = MIN(config->frequency, spi_frequency);
145 	}
146 	eusartInit.bitRate = spi_frequency;
147 
148 	if (config->operation & SPI_MODE_LOOP) {
149 		eusartInit.loopbackEnable = eusartLoopbackEnable;
150 	} else {
151 		eusartInit.loopbackEnable = eusartLoopbackDisable;
152 	}
153 
154 	/* Set Clock Mode */
155 	if (config->operation & SPI_MODE_CPOL) {
156 		if (config->operation & SPI_MODE_CPHA) {
157 			eusartInit.clockMode = eusartClockMode3;
158 		} else {
159 			eusartInit.clockMode = eusartClockMode2;
160 		}
161 	} else {
162 		if (config->operation & SPI_MODE_CPHA) {
163 			eusartInit.clockMode = eusartClockMode1;
164 		} else {
165 			eusartInit.clockMode = eusartClockMode0;
166 		}
167 	}
168 
169 	if (config->operation & SPI_CS_ACTIVE_HIGH) {
170 		eusartAdvancedSpiInit.csPolarity = eusartCsActiveHigh;
171 	} else {
172 		eusartAdvancedSpiInit.csPolarity = eusartCsActiveLow;
173 	}
174 
175 	eusartAdvancedSpiInit.msbFirst = !(config->operation & SPI_TRANSFER_LSB);
176 	eusartAdvancedSpiInit.autoCsEnable = !spi_cs_is_gpio(config);
177 	eusartInit.databits = eusartDataBits8;
178 	eusartInit.advancedSettings = &eusartAdvancedSpiInit;
179 
180 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
181 	if (spi_silabs_eusart_is_dma_enabled_instance(dev)) {
182 		if (!device_is_ready(data->dma_chan_tx.dma_dev)) {
183 			return -ENODEV;
184 		}
185 
186 		eusartAdvancedSpiInit.TxFifoWatermark = eusartTxFiFoWatermark1Frame;
187 		eusartAdvancedSpiInit.RxFifoWatermark = eusartRxFiFoWatermark1Frame;
188 
189 		if (data->dma_chan_rx.chan_nb < 0) {
190 			data->dma_chan_rx.chan_nb =
191 				dma_request_channel(data->dma_chan_rx.dma_dev, NULL);
192 		}
193 
194 		if (data->dma_chan_rx.chan_nb < 0) {
195 			LOG_ERR("DMA channel request failed");
196 			return -EAGAIN;
197 		}
198 
199 		if (data->dma_chan_tx.chan_nb < 0) {
200 			data->dma_chan_tx.chan_nb =
201 				dma_request_channel(data->dma_chan_tx.dma_dev, NULL);
202 		}
203 
204 		if (data->dma_chan_tx.chan_nb < 0) {
205 			dma_release_channel(data->dma_chan_rx.dma_dev, data->dma_chan_rx.chan_nb);
206 			data->dma_chan_rx.chan_nb = -1;
207 			LOG_ERR("DMA channel request failed");
208 			return -EAGAIN;
209 		}
210 	}
211 #endif
212 	/* Enable EUSART clock */
213 	err = clock_control_on(eusart_cfg->clock_dev,
214 			       (clock_control_subsys_t)&eusart_cfg->clock_cfg);
215 	if (err < 0 && err != -EALREADY) {
216 		goto exit;
217 	}
218 
219 	/* Initialize the EUSART */
220 	EUSART_SpiInit(eusart_cfg->base, &eusartInit);
221 
222 	data->ctx.config = config;
223 
224 	return 0;
225 
226 exit:
227 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
228 	if (spi_silabs_eusart_is_dma_enabled_instance(dev)) {
229 		dma_release_channel(data->dma_chan_rx.dma_dev, data->dma_chan_rx.chan_nb);
230 		dma_release_channel(data->dma_chan_tx.dma_dev, data->dma_chan_tx.chan_nb);
231 		data->dma_chan_rx.chan_nb = -1;
232 		data->dma_chan_tx.chan_nb = -1;
233 	}
234 #endif
235 	return err;
236 }
237 
spi_silabs_eusart_pm_policy_get(const struct device * dev)238 static inline void spi_silabs_eusart_pm_policy_get(const struct device *dev)
239 {
240 	pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
241 	pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
242 }
243 
spi_silabs_eusart_pm_policy_put(const struct device * dev)244 static inline void spi_silabs_eusart_pm_policy_put(const struct device *dev)
245 {
246 	pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
247 	pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
248 }
249 
spi_silabs_eusart_pm_action(const struct device * dev,enum pm_device_action action)250 static int spi_silabs_eusart_pm_action(const struct device *dev, enum pm_device_action action)
251 {
252 	const struct spi_silabs_eusart_config *eusart_config = dev->config;
253 	int ret;
254 
255 	switch (action) {
256 	case PM_DEVICE_ACTION_RESUME:
257 		ret = clock_control_on(eusart_config->clock_dev,
258 				       (clock_control_subsys_t)&eusart_config->clock_cfg);
259 
260 		if (ret == -EALREADY) {
261 			ret = 0;
262 		} else if (ret < 0) {
263 			break;
264 		}
265 
266 		pinctrl_apply_state(eusart_config->pcfg, PINCTRL_STATE_DEFAULT);
267 
268 		break;
269 	case PM_DEVICE_ACTION_SUSPEND:
270 		pinctrl_apply_state(eusart_config->pcfg, PINCTRL_STATE_SLEEP);
271 
272 		EUSART_Enable(eusart_config->base, eusartDisable);
273 		ret = clock_control_off(eusart_config->clock_dev,
274 					(clock_control_subsys_t)&eusart_config->clock_cfg);
275 		if (ret == -EALREADY) {
276 			ret = 0;
277 		}
278 
279 		break;
280 	default:
281 		ret = -ENOTSUP;
282 	}
283 
284 	return ret;
285 }
286 
287 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
spi_silabs_dma_rx_callback(const struct device * dev,void * user_data,uint32_t channel,int status)288 static void spi_silabs_dma_rx_callback(const struct device *dev, void *user_data, uint32_t channel,
289 				       int status)
290 {
291 	const struct device *spi_dev = (const struct device *)user_data;
292 	struct spi_silabs_eusart_data *data = spi_dev->data;
293 	struct spi_context *instance_ctx = &data->ctx;
294 
295 	ARG_UNUSED(dev);
296 
297 	if (status >= 0 && status != DMA_STATUS_COMPLETE) {
298 		return;
299 	}
300 
301 	if (status < 0) {
302 		dma_stop(data->dma_chan_tx.dma_dev, data->dma_chan_tx.chan_nb);
303 		dma_stop(data->dma_chan_rx.dma_dev, data->dma_chan_rx.chan_nb);
304 	}
305 
306 	spi_context_cs_control(instance_ctx, false);
307 	spi_silabs_eusart_pm_policy_put(spi_dev);
308 	spi_context_complete(instance_ctx, spi_dev, status);
309 }
310 
spi_silabs_eusart_clear_txrx_fifos(EUSART_TypeDef * eusart)311 static void spi_silabs_eusart_clear_txrx_fifos(EUSART_TypeDef *eusart)
312 {
313 	sys_write32(EUSART_CMD_CLEARTX, (mem_addr_t)&eusart->CMD_SET);
314 
315 	while (sys_read32((mem_addr_t)&eusart->STATUS) & EUSART_STATUS_RXFL) {
316 		(void)sys_read32((mem_addr_t)&eusart->RXDATA);
317 	}
318 
319 	while (sys_read32((mem_addr_t)&eusart->STATUS) & EUSART_STATUS_CLEARTXBUSY) {
320 	}
321 }
322 
spi_silabs_longest_transfer_size(struct spi_context * instance_ctx)323 static size_t spi_silabs_longest_transfer_size(struct spi_context *instance_ctx)
324 {
325 	uint32_t tx_transfer_size = spi_context_total_tx_len(instance_ctx);
326 	uint32_t rx_transfer_size = spi_context_total_rx_len(instance_ctx);
327 
328 	return MAX(tx_transfer_size, rx_transfer_size);
329 }
330 
spi_silabs_dma_config(const struct device * dev,struct dma_channel * channel,uint32_t block_count,bool is_tx)331 static int spi_silabs_dma_config(const struct device *dev,
332 				 struct dma_channel *channel,
333 				 uint32_t block_count, bool is_tx)
334 {
335 	struct dma_config cfg = {
336 		.channel_direction = is_tx ? MEMORY_TO_PERIPHERAL : PERIPHERAL_TO_MEMORY,
337 		.complete_callback_en = 0,
338 		.source_data_size = 1,
339 		.dest_data_size = 1,
340 		.source_burst_length = 1,
341 		.dest_burst_length = 1,
342 		.block_count = block_count,
343 		.head_block = channel->dma_descriptors,
344 		.dma_slot = channel->dma_slot,
345 		.dma_callback = !is_tx ? &spi_silabs_dma_rx_callback : NULL,
346 		.user_data = (void *)dev,
347 	};
348 
349 	return dma_config(channel->dma_dev, channel->chan_nb, &cfg);
350 }
351 
spi_eusart_fill_desc(const struct spi_silabs_eusart_config * cfg,struct dma_block_config * new_blk_cfg,uint8_t * buffer,size_t requested_transaction_size,bool is_tx)352 static uint32_t spi_eusart_fill_desc(const struct spi_silabs_eusart_config *cfg,
353 				     struct dma_block_config *new_blk_cfg, uint8_t *buffer,
354 				     size_t requested_transaction_size, bool is_tx)
355 {
356 	/* Set-up source and destination address with increment behavior */
357 	if (is_tx) {
358 		new_blk_cfg->dest_address = (uint32_t)&cfg->base->TXDATA;
359 		new_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
360 		if (buffer) {
361 			new_blk_cfg->source_address = (uint32_t)buffer;
362 			new_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
363 		} else {
364 			/* Null buffer pointer means sending dummy byte */
365 			new_blk_cfg->source_address = (uint32_t)&(cfg->mosi_overrun);
366 			new_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
367 		}
368 	} else {
369 		new_blk_cfg->source_address = (uint32_t)&cfg->base->RXDATA;
370 		new_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
371 		if (buffer) {
372 			new_blk_cfg->dest_address = (uint32_t)buffer;
373 			new_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
374 		} else {
375 			/* Null buffer pointer means rx to null byte */
376 			new_blk_cfg->dest_address = (uint32_t)&empty_buffer;
377 			new_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
378 		}
379 	}
380 	/* Setup max transfer according to requested transaction size.
381 	 * Will top if bigger than the maximum transfer size.
382 	 */
383 	new_blk_cfg->block_size = MIN(requested_transaction_size,
384 				      SPI_DMA_MAX_DESCRIPTOR_TRANSFER_SIZE);
385 	return new_blk_cfg->block_size;
386 }
387 
spi_eusart_fill_data_desc(const struct spi_silabs_eusart_config * cfg,struct dma_block_config * desc,const struct spi_buf buffers[],int buffer_count,size_t transaction_len,bool is_tx)388 struct dma_block_config *spi_eusart_fill_data_desc(const struct spi_silabs_eusart_config *cfg,
389 						   struct dma_block_config *desc,
390 						   const struct spi_buf buffers[],
391 						   int buffer_count,
392 						   size_t transaction_len,
393 						   bool is_tx)
394 {
395 	__ASSERT(transaction_len > 0, "Not supported");
396 
397 	size_t offset = 0;
398 	int i = 0;
399 	uint8_t *buffer = NULL;
400 
401 	while (i != buffer_count) {
402 		if (!buffers[i].len) {
403 			i++;
404 			continue;
405 		}
406 		if (!desc) {
407 			return NULL;
408 		}
409 		buffer = buffers[i].buf ? (uint8_t *)buffers[i].buf + offset : NULL;
410 		offset += spi_eusart_fill_desc(cfg, desc,
411 					       buffer,
412 					       buffers[i].len - offset,
413 					       is_tx);
414 		if (offset == buffers[i].len) {
415 			transaction_len -= offset;
416 			offset = 0;
417 			i++;
418 		}
419 		if (transaction_len) {
420 			desc = desc->next_block;
421 		}
422 	}
423 
424 	while (transaction_len) {
425 		if (!desc) {
426 			return NULL;
427 		}
428 
429 		transaction_len -= spi_eusart_fill_desc(cfg, desc, NULL, transaction_len, is_tx);
430 		if (transaction_len) {
431 			desc = desc->next_block;
432 		}
433 	}
434 
435 	desc->next_block = NULL;
436 	return desc;
437 }
438 
spi_eusart_reset_desc(struct dma_channel * channel)439 static void spi_eusart_reset_desc(struct dma_channel *channel)
440 {
441 	int i;
442 
443 	memset(channel->dma_descriptors, 0, sizeof(channel->dma_descriptors));
444 	for (i = 0; i < ARRAY_SIZE(channel->dma_descriptors) - 1; i++) {
445 		channel->dma_descriptors[i].next_block = &channel->dma_descriptors[i + 1];
446 	}
447 }
448 
spi_eusart_prepare_dma_channel(const struct device * spi_dev,const struct spi_buf * buffer,size_t buffer_count,struct dma_channel * channel,size_t padded_transaction_size,bool is_tx)449 static int spi_eusart_prepare_dma_channel(const struct device *spi_dev,
450 					  const struct spi_buf *buffer,
451 					  size_t buffer_count,
452 					  struct dma_channel *channel,
453 					  size_t padded_transaction_size,
454 					  bool is_tx)
455 {
456 	const struct spi_silabs_eusart_config *cfg = spi_dev->config;
457 	struct dma_block_config *desc;
458 	int ret = 0;
459 
460 	spi_eusart_reset_desc(channel);
461 	desc = spi_eusart_fill_data_desc(cfg, channel->dma_descriptors,
462 					 buffer, buffer_count, padded_transaction_size, is_tx);
463 	if (!desc) {
464 		return -ENOMEM;
465 	}
466 
467 	ret = spi_silabs_dma_config(spi_dev, channel,
468 				    ARRAY_INDEX(channel->dma_descriptors, desc),
469 				    is_tx);
470 
471 	return ret;
472 }
473 
spi_eusart_prepare_dma_transaction(const struct device * dev,size_t padded_transaction_size)474 static int spi_eusart_prepare_dma_transaction(const struct device *dev,
475 					      size_t padded_transaction_size)
476 {
477 	int ret;
478 	struct spi_silabs_eusart_data *data = dev->data;
479 
480 	if (padded_transaction_size == 0) {
481 		/* Nothing to do */
482 		return 0;
483 	}
484 
485 	ret = spi_eusart_prepare_dma_channel(dev, data->ctx.current_tx, data->ctx.tx_count,
486 					     &data->dma_chan_tx, padded_transaction_size,
487 					     true);
488 	if (ret) {
489 		return ret;
490 	}
491 
492 	ret = spi_eusart_prepare_dma_channel(dev, data->ctx.current_rx, data->ctx.rx_count,
493 					     &data->dma_chan_rx, padded_transaction_size, false);
494 	return ret;
495 }
496 
497 #endif
498 
spi_silabs_eusart_send(EUSART_TypeDef * eusart,uint8_t frame)499 static void spi_silabs_eusart_send(EUSART_TypeDef *eusart, uint8_t frame)
500 {
501 	/* Write frame to register */
502 	EUSART_Tx(eusart, frame);
503 
504 	/* Wait until the transfer ends */
505 	while (!(eusart->STATUS & EUSART_STATUS_TXC)) {
506 	}
507 }
508 
spi_silabs_eusart_recv(EUSART_TypeDef * eusart)509 static uint8_t spi_silabs_eusart_recv(EUSART_TypeDef *eusart)
510 {
511 	/* Return data inside rx register */
512 	return EUSART_Rx(eusart);
513 }
514 
spi_silabs_eusart_transfer_ongoing(struct spi_silabs_eusart_data * data)515 static bool spi_silabs_eusart_transfer_ongoing(struct spi_silabs_eusart_data *data)
516 {
517 	return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
518 }
519 
spi_silabs_eusart_next_tx(struct spi_silabs_eusart_data * data)520 static inline uint8_t spi_silabs_eusart_next_tx(struct spi_silabs_eusart_data *data)
521 {
522 	uint8_t tx_frame = 0;
523 
524 	if (spi_context_tx_buf_on(&data->ctx)) {
525 		tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
526 	}
527 
528 	return tx_frame;
529 }
530 
spi_silabs_eusart_shift_frames(EUSART_TypeDef * eusart,struct spi_silabs_eusart_data * data)531 static int spi_silabs_eusart_shift_frames(EUSART_TypeDef *eusart,
532 					  struct spi_silabs_eusart_data *data)
533 {
534 	uint8_t tx_frame;
535 	uint8_t rx_frame;
536 
537 	tx_frame = spi_silabs_eusart_next_tx(data);
538 	spi_silabs_eusart_send(eusart, tx_frame);
539 	spi_context_update_tx(&data->ctx, 1, 1);
540 
541 	rx_frame = spi_silabs_eusart_recv(eusart);
542 
543 	if (spi_context_rx_buf_on(&data->ctx)) {
544 		UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
545 	}
546 
547 	spi_context_update_rx(&data->ctx, 1, 1);
548 
549 	return 0;
550 }
551 
spi_silabs_eusart_xfer_dma(const struct device * dev,const struct spi_config * config)552 static int spi_silabs_eusart_xfer_dma(const struct device *dev, const struct spi_config *config)
553 {
554 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
555 	const struct spi_silabs_eusart_config *eusart_config = dev->config;
556 	struct spi_silabs_eusart_data *data = dev->data;
557 	struct spi_context *ctx = &data->ctx;
558 	int ret = 0;
559 
560 	size_t padded_transaction_size = spi_silabs_longest_transfer_size(ctx);
561 
562 	if (padded_transaction_size == 0) {
563 		return -EINVAL;
564 	}
565 
566 	spi_silabs_eusart_clear_txrx_fifos(eusart_config->base);
567 
568 	ret = spi_eusart_prepare_dma_transaction(dev, padded_transaction_size);
569 	if (ret) {
570 		return ret;
571 	}
572 
573 	spi_silabs_eusart_pm_policy_get(dev);
574 
575 	spi_context_cs_control(ctx, true);
576 
577 	/* RX channel needs to be ready before TX channel actually starts */
578 	ret = dma_start(data->dma_chan_rx.dma_dev, data->dma_chan_rx.chan_nb);
579 	if (ret) {
580 		goto force_transaction_close;
581 	}
582 
583 	ret = dma_start(data->dma_chan_tx.dma_dev, data->dma_chan_tx.chan_nb);
584 	if (ret) {
585 
586 		goto force_transaction_close;
587 	}
588 
589 	ret = spi_context_wait_for_completion(&data->ctx);
590 	if (ret < 0) {
591 		goto force_transaction_close;
592 	}
593 
594 	/* Successful transaction. DMA transfer done interrupt ended the transaction. */
595 	return 0;
596 force_transaction_close:
597 	dma_stop(data->dma_chan_rx.dma_dev, data->dma_chan_rx.chan_nb);
598 	dma_stop(data->dma_chan_tx.dma_dev, data->dma_chan_tx.chan_nb);
599 	spi_context_cs_control(ctx, false);
600 	spi_silabs_eusart_pm_policy_put(dev);
601 	return ret;
602 #else
603 	return -ENOTSUP;
604 #endif
605 }
606 
spi_silabs_eusart_xfer_polling(const struct device * dev,const struct spi_config * config)607 static int spi_silabs_eusart_xfer_polling(const struct device *dev,
608 					  const struct spi_config *config)
609 {
610 	const struct spi_silabs_eusart_config *eusart_config = dev->config;
611 	struct spi_silabs_eusart_data *data = dev->data;
612 	struct spi_context *ctx = &data->ctx;
613 	int ret;
614 
615 	spi_silabs_eusart_pm_policy_get(dev);
616 	spi_context_cs_control(ctx, true);
617 
618 	ret = 0;
619 	while (!ret && spi_silabs_eusart_transfer_ongoing(data)) {
620 		ret = spi_silabs_eusart_shift_frames(eusart_config->base, data);
621 	}
622 
623 	spi_context_cs_control(ctx, false);
624 	spi_context_complete(ctx, dev, 0);
625 
626 	spi_silabs_eusart_pm_policy_put(dev);
627 	return ret;
628 }
629 
spi_silabs_eusart_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)630 static int spi_silabs_eusart_transceive(const struct device *dev,
631 					const struct spi_config *config,
632 					const struct spi_buf_set *tx_bufs,
633 					const struct spi_buf_set *rx_bufs,
634 					bool asynchronous,
635 					spi_callback_t cb,
636 					void *userdata)
637 {
638 	struct spi_silabs_eusart_data *data = dev->data;
639 	struct spi_context *ctx = &data->ctx;
640 	int ret;
641 
642 	spi_context_lock(ctx, asynchronous, cb, userdata, config);
643 
644 	ret = spi_silabs_eusart_configure(dev, config);
645 	if (ret) {
646 		goto out;
647 	}
648 
649 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
650 
651 	if (spi_silabs_eusart_is_dma_enabled_instance(dev)) {
652 		/* DMA transfer handle a/synchronous transfers */
653 		ret = spi_silabs_eusart_xfer_dma(dev, config);
654 	} else if (!asynchronous) {
655 		ret = spi_silabs_eusart_xfer_polling(dev, config);
656 	} else {
657 		/* Asynchronous transfers without DMA is not implemented,
658 		 * please configure the device tree
659 		 * instance with the proper DMA configuration.
660 		 */
661 		ret = -ENOTSUP;
662 	}
663 
664 out:
665 	spi_context_release(ctx, ret);
666 
667 	return ret;
668 }
669 
670 /* API Functions */
spi_silabs_eusart_init(const struct device * dev)671 static int spi_silabs_eusart_init(const struct device *dev)
672 {
673 	struct spi_silabs_eusart_data *data = dev->data;
674 	int err;
675 
676 	err = spi_context_cs_configure_all(&data->ctx);
677 	if (err < 0) {
678 		return err;
679 	}
680 
681 	spi_context_unlock_unconditionally(&data->ctx);
682 
683 	return pm_device_driver_init(dev, spi_silabs_eusart_pm_action);
684 }
685 
spi_silabs_eusart_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)686 static int spi_silabs_eusart_transceive_sync(const struct device *dev,
687 					     const struct spi_config *config,
688 					     const struct spi_buf_set *tx_bufs,
689 					     const struct spi_buf_set *rx_bufs)
690 {
691 	return spi_silabs_eusart_transceive(dev,
692 					    config,
693 					    tx_bufs,
694 					    rx_bufs,
695 					    false,
696 					    NULL,
697 					    NULL);
698 }
699 
700 #ifdef CONFIG_SPI_ASYNC
spi_silabs_eusart_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)701 static int spi_silabs_eusart_transceive_async(const struct device *dev,
702 					      const struct spi_config *config,
703 					      const struct spi_buf_set *tx_bufs,
704 					      const struct spi_buf_set *rx_bufs,
705 					      spi_callback_t cb,
706 					      void *userdata)
707 {
708 	return spi_silabs_eusart_transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
709 }
710 #endif
711 
spi_silabs_eusart_release(const struct device * dev,const struct spi_config * config)712 static int spi_silabs_eusart_release(const struct device *dev, const struct spi_config *config)
713 {
714 	const struct spi_silabs_eusart_config *eusart_config = dev->config;
715 	struct spi_silabs_eusart_data *data = dev->data;
716 
717 	spi_context_unlock_unconditionally(&data->ctx);
718 
719 	if (!(eusart_config->base->STATUS & EUSART_STATUS_TXIDLE)) {
720 		return -EBUSY;
721 	}
722 
723 	return 0;
724 }
725 
726 /* Device Instantiation */
727 static DEVICE_API(spi, spi_silabs_eusart_api) = {
728 	.transceive = spi_silabs_eusart_transceive_sync,
729 #ifdef CONFIG_SPI_ASYNC
730 	.transceive_async = spi_silabs_eusart_transceive_async,
731 #endif
732 	.release = spi_silabs_eusart_release,
733 };
734 
735 #ifdef CONFIG_SPI_SILABS_EUSART_DMA
736 #define SPI_SILABS_EUSART_DMA_CHANNEL_INIT(index, dir)						\
737 	.dma_chan_##dir = {									\
738 		.chan_nb = -1,									\
739 		.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)),		\
740 		.dma_slot =									\
741 			SILABS_LDMA_REQSEL_TO_SLOT(DT_INST_DMAS_CELL_BY_NAME(index, dir, slot)),\
742 	},
743 #define SPI_SILABS_EUSART_DMA_CHANNEL(index, dir) \
744 	COND_CODE_1(DT_INST_NODE_HAS_PROP(index, dmas), \
745 		    (SPI_SILABS_EUSART_DMA_CHANNEL_INIT(index, dir)), ())
746 #else
747 #define SPI_SILABS_EUSART_DMA_CHANNEL(index, dir)
748 #endif
749 
750 #define SPI_INIT(n) \
751 	PINCTRL_DT_INST_DEFINE(n); \
752 	static struct spi_silabs_eusart_data spi_silabs_eusart_data_##n = { \
753 		SPI_CONTEXT_INIT_LOCK(spi_silabs_eusart_data_##n, ctx), \
754 		SPI_CONTEXT_INIT_SYNC(spi_silabs_eusart_data_##n, ctx), \
755 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
756 		SPI_SILABS_EUSART_DMA_CHANNEL(n, rx) \
757 		SPI_SILABS_EUSART_DMA_CHANNEL(n, tx) \
758 	}; \
759 	static struct spi_silabs_eusart_config spi_silabs_eusart_cfg_##n = { \
760 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
761 		.base = (EUSART_TypeDef *)DT_INST_REG_ADDR(n), \
762 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
763 		.clock_cfg = SILABS_DT_INST_CLOCK_CFG(n), \
764 		.mosi_overrun = (uint8_t)SPI_MOSI_OVERRUN_DT(n), \
765 		.clock_frequency = DT_INST_PROP_OR(n, clock_frequency, 1000000), \
766 	}; \
767 	PM_DEVICE_DT_INST_DEFINE(n, spi_silabs_eusart_pm_action); \
768 	SPI_DEVICE_DT_INST_DEFINE(n, spi_silabs_eusart_init, PM_DEVICE_DT_INST_GET(n), \
769 				  &spi_silabs_eusart_data_##n, &spi_silabs_eusart_cfg_##n, \
770 				  POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_silabs_eusart_api);
771 
772 DT_INST_FOREACH_STATUS_OKAY(SPI_INIT)
773