1 /*
2  * Copyright (c) 2016 BayLibre, SAS
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT st_stm32_spi
8 
9 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(spi_ll_stm32);
12 
13 #include <zephyr/sys/util.h>
14 #include <zephyr/kernel.h>
15 #include <soc.h>
16 #include <stm32_ll_spi.h>
17 #include <errno.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/drivers/spi/rtio.h>
20 #include <zephyr/drivers/pinctrl.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/pm/policy.h>
23 #include <zephyr/pm/device.h>
24 #include <zephyr/pm/device_runtime.h>
25 #ifdef CONFIG_SPI_STM32_DMA
26 #include <zephyr/drivers/dma/dma_stm32.h>
27 #include <zephyr/drivers/dma.h>
28 #endif
29 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
30 #include <zephyr/drivers/clock_control.h>
31 #include <zephyr/irq.h>
32 #include <zephyr/mem_mgmt/mem_attr.h>
33 
34 #ifdef CONFIG_DCACHE
35 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
36 #endif /* CONFIG_DCACHE */
37 
38 #ifdef CONFIG_NOCACHE_MEMORY
39 #include <zephyr/linker/linker-defs.h>
40 #elif defined(CONFIG_CACHE_MANAGEMENT)
41 #include <zephyr/arch/cache.h>
42 #endif /* CONFIG_NOCACHE_MEMORY */
43 
44 #include "spi_ll_stm32.h"
45 
46 #if defined(CONFIG_DCACHE) &&                               \
47 	!defined(CONFIG_NOCACHE_MEMORY)
48 /* currently, manual cache coherency management is only done on dummy_rx_tx_buffer */
49 #define SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED	1
50 #else
51 #define  SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED	0
52 #endif /* defined(CONFIG_DCACHE) && !defined(CONFIG_NOCACHE_MEMORY) */
53 
54 #define WAIT_1US	1U
55 
56 /*
57  * Check for SPI_SR_FRE to determine support for TI mode frame format
58  * error flag, because STM32F1 SoCs do not support it and  STM32CUBE
59  * for F1 family defines an unused LL_SPI_SR_FRE.
60  */
61 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
62 #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCE | LL_SPI_SR_MODF | \
63 			   LL_SPI_SR_OVR | LL_SPI_SR_TIFRE)
64 #else
65 #if defined(LL_SPI_SR_UDR)
66 #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \
67 			   LL_SPI_SR_OVR | LL_SPI_SR_FRE)
68 #elif defined(SPI_SR_FRE)
69 #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \
70 			   LL_SPI_SR_OVR | LL_SPI_SR_FRE)
71 #else
72 #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | LL_SPI_SR_OVR)
73 #endif
74 #endif /* CONFIG_SOC_SERIES_STM32MP1X */
75 
spi_stm32_pm_policy_state_lock_get(const struct device * dev)76 static void spi_stm32_pm_policy_state_lock_get(const struct device *dev)
77 {
78 	if (IS_ENABLED(CONFIG_PM)) {
79 		struct spi_stm32_data *data = dev->data;
80 
81 		if (!data->pm_policy_state_on) {
82 			data->pm_policy_state_on = true;
83 			pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
84 			if (IS_ENABLED(CONFIG_PM_S2RAM)) {
85 				pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
86 			}
87 			pm_device_runtime_get(dev);
88 		}
89 	}
90 }
91 
spi_stm32_pm_policy_state_lock_put(const struct device * dev)92 static void spi_stm32_pm_policy_state_lock_put(const struct device *dev)
93 {
94 	if (IS_ENABLED(CONFIG_PM)) {
95 		struct spi_stm32_data *data = dev->data;
96 
97 		if (data->pm_policy_state_on) {
98 			data->pm_policy_state_on = false;
99 			pm_device_runtime_put(dev);
100 			pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
101 			if (IS_ENABLED(CONFIG_PM_S2RAM)) {
102 				pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
103 			}
104 		}
105 	}
106 }
107 
108 #ifdef CONFIG_SPI_STM32_DMA
bits2bytes(uint32_t bits)109 static uint32_t bits2bytes(uint32_t bits)
110 {
111 	return bits / 8;
112 }
113 
114 /* dummy buffer is used for transferring NOP when tx buf is null
115  * and used as a dummy sink for when rx buf is null.
116  */
117 /*
118  * If Nocache Memory is supported, buffer will be placed in nocache region by
119  * the linker to avoid potential DMA cache-coherency problems.
120  * If Nocache Memory is not supported, cache coherency might need to be kept
121  * manually. See SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED.
122  */
123 static __aligned(32) uint32_t dummy_rx_tx_buffer __nocache;
124 
125 /* This function is executed in the interrupt context */
dma_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)126 static void dma_callback(const struct device *dma_dev, void *arg,
127 			 uint32_t channel, int status)
128 {
129 	ARG_UNUSED(dma_dev);
130 
131 	/* arg holds SPI DMA data
132 	 * Passed in spi_stm32_dma_tx/rx_load()
133 	 */
134 	struct spi_stm32_data *spi_dma_data = arg;
135 
136 	if (status < 0) {
137 		LOG_ERR("DMA callback error with channel %d.", channel);
138 		spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG;
139 	} else {
140 		/* identify the origin of this callback */
141 		if (channel == spi_dma_data->dma_tx.channel) {
142 			/* this part of the transfer ends */
143 			spi_dma_data->status_flags |= SPI_STM32_DMA_TX_DONE_FLAG;
144 		} else if (channel == spi_dma_data->dma_rx.channel) {
145 			/* this part of the transfer ends */
146 			spi_dma_data->status_flags |= SPI_STM32_DMA_RX_DONE_FLAG;
147 		} else {
148 			LOG_ERR("DMA callback channel %d is not valid.", channel);
149 			spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG;
150 		}
151 	}
152 
153 	k_sem_give(&spi_dma_data->status_sem);
154 }
155 
spi_stm32_dma_tx_load(const struct device * dev,const uint8_t * buf,size_t len)156 static int spi_stm32_dma_tx_load(const struct device *dev, const uint8_t *buf,
157 				 size_t len)
158 {
159 	const struct spi_stm32_config *cfg = dev->config;
160 	struct spi_stm32_data *data = dev->data;
161 	struct dma_block_config *blk_cfg;
162 	int ret;
163 
164 	/* remember active TX DMA channel (used in callback) */
165 	struct stream *stream = &data->dma_tx;
166 
167 	blk_cfg = &stream->dma_blk_cfg;
168 
169 	/* prepare the block for this TX DMA channel */
170 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
171 	blk_cfg->block_size = len;
172 
173 	/* tx direction has memory as source and periph as dest. */
174 	if (buf == NULL) {
175 		/* if tx buff is null, then sends NOP on the line. */
176 		dummy_rx_tx_buffer = 0;
177 #if SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED
178 		arch_dcache_flush_range((void *)&dummy_rx_tx_buffer, sizeof(uint32_t));
179 #endif /* SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED */
180 		blk_cfg->source_address = (uint32_t)&dummy_rx_tx_buffer;
181 		blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
182 	} else {
183 		blk_cfg->source_address = (uint32_t)buf;
184 		if (data->dma_tx.src_addr_increment) {
185 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
186 		} else {
187 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
188 		}
189 	}
190 
191 	blk_cfg->dest_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_TX);
192 	/* fifo mode NOT USED there */
193 	if (data->dma_tx.dst_addr_increment) {
194 		blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
195 	} else {
196 		blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
197 	}
198 
199 	/* give the fifo mode from the DT */
200 	blk_cfg->fifo_mode_control = data->dma_tx.fifo_threshold;
201 
202 	/* direction is given by the DT */
203 	stream->dma_cfg.head_block = blk_cfg;
204 	/* give the dma channel data as arg, as the callback comes from the dma */
205 	stream->dma_cfg.user_data = data;
206 	/* pass our client origin to the dma: data->dma_tx.dma_channel */
207 	ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
208 			&stream->dma_cfg);
209 	/* the channel is the actual stream from 0 */
210 	if (ret != 0) {
211 		return ret;
212 	}
213 
214 	/* gives the request ID to the dma mux */
215 	return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
216 }
217 
spi_stm32_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)218 static int spi_stm32_dma_rx_load(const struct device *dev, uint8_t *buf,
219 				 size_t len)
220 {
221 	const struct spi_stm32_config *cfg = dev->config;
222 	struct spi_stm32_data *data = dev->data;
223 	struct dma_block_config *blk_cfg;
224 	int ret;
225 
226 	/* retrieve active RX DMA channel (used in callback) */
227 	struct stream *stream = &data->dma_rx;
228 
229 	blk_cfg = &stream->dma_blk_cfg;
230 
231 	/* prepare the block for this RX DMA channel */
232 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
233 	blk_cfg->block_size = len;
234 
235 
236 	/* rx direction has periph as source and mem as dest. */
237 	if (buf == NULL) {
238 		/* if rx buff is null, then write data to dummy address. */
239 		blk_cfg->dest_address = (uint32_t)&dummy_rx_tx_buffer;
240 		blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
241 	} else {
242 		blk_cfg->dest_address = (uint32_t)buf;
243 		if (data->dma_rx.dst_addr_increment) {
244 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
245 		} else {
246 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
247 		}
248 	}
249 
250 	blk_cfg->source_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_RX);
251 	if (data->dma_rx.src_addr_increment) {
252 		blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
253 	} else {
254 		blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
255 	}
256 
257 	/* give the fifo mode from the DT */
258 	blk_cfg->fifo_mode_control = data->dma_rx.fifo_threshold;
259 
260 	/* direction is given by the DT */
261 	stream->dma_cfg.head_block = blk_cfg;
262 	stream->dma_cfg.user_data = data;
263 
264 
265 	/* pass our client origin to the dma: data->dma_rx.channel */
266 	ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
267 			&stream->dma_cfg);
268 	/* the channel is the actual stream from 0 */
269 	if (ret != 0) {
270 		return ret;
271 	}
272 
273 	/* gives the request ID to the dma mux */
274 	return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
275 }
276 
spi_dma_move_buffers(const struct device * dev,size_t len)277 static int spi_dma_move_buffers(const struct device *dev, size_t len)
278 {
279 	struct spi_stm32_data *data = dev->data;
280 	int ret;
281 	size_t dma_segment_len;
282 
283 	dma_segment_len = len * data->dma_rx.dma_cfg.dest_data_size;
284 	ret = spi_stm32_dma_rx_load(dev, data->ctx.rx_buf, dma_segment_len);
285 
286 	if (ret != 0) {
287 		return ret;
288 	}
289 
290 	dma_segment_len = len * data->dma_tx.dma_cfg.source_data_size;
291 	ret = spi_stm32_dma_tx_load(dev, data->ctx.tx_buf, dma_segment_len);
292 
293 	return ret;
294 }
295 
296 #endif /* CONFIG_SPI_STM32_DMA */
297 
298 /* Value to shift out when no application data needs transmitting. */
299 #define SPI_STM32_TX_NOP 0x00
300 
spi_stm32_send_next_frame(SPI_TypeDef * spi,struct spi_stm32_data * data)301 static void spi_stm32_send_next_frame(SPI_TypeDef *spi,
302 		struct spi_stm32_data *data)
303 {
304 	const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation);
305 	uint32_t tx_frame = SPI_STM32_TX_NOP;
306 
307 	if (frame_size == 8) {
308 		if (spi_context_tx_buf_on(&data->ctx)) {
309 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
310 		}
311 		LL_SPI_TransmitData8(spi, tx_frame);
312 		spi_context_update_tx(&data->ctx, 1, 1);
313 	} else {
314 		if (spi_context_tx_buf_on(&data->ctx)) {
315 			tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
316 		}
317 		LL_SPI_TransmitData16(spi, tx_frame);
318 		spi_context_update_tx(&data->ctx, 2, 1);
319 	}
320 }
321 
spi_stm32_read_next_frame(SPI_TypeDef * spi,struct spi_stm32_data * data)322 static void spi_stm32_read_next_frame(SPI_TypeDef *spi,
323 		struct spi_stm32_data *data)
324 {
325 	const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation);
326 	uint32_t rx_frame = 0;
327 
328 	if (frame_size == 8) {
329 		rx_frame = LL_SPI_ReceiveData8(spi);
330 		if (spi_context_rx_buf_on(&data->ctx)) {
331 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
332 		}
333 		spi_context_update_rx(&data->ctx, 1, 1);
334 	} else {
335 		rx_frame = LL_SPI_ReceiveData16(spi);
336 		if (spi_context_rx_buf_on(&data->ctx)) {
337 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
338 		}
339 		spi_context_update_rx(&data->ctx, 2, 1);
340 	}
341 }
342 
spi_stm32_transfer_ongoing(struct spi_stm32_data * data)343 static bool spi_stm32_transfer_ongoing(struct spi_stm32_data *data)
344 {
345 	return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
346 }
347 
spi_stm32_get_err(SPI_TypeDef * spi)348 static int spi_stm32_get_err(SPI_TypeDef *spi)
349 {
350 	uint32_t sr = LL_SPI_ReadReg(spi, SR);
351 
352 	if (sr & SPI_STM32_ERR_MSK) {
353 		LOG_ERR("%s: err=%d", __func__,
354 			    sr & (uint32_t)SPI_STM32_ERR_MSK);
355 
356 		/* OVR error must be explicitly cleared */
357 		if (LL_SPI_IsActiveFlag_OVR(spi)) {
358 			LL_SPI_ClearFlag_OVR(spi);
359 		}
360 
361 		return -EIO;
362 	}
363 
364 	return 0;
365 }
366 
spi_stm32_shift_fifo(SPI_TypeDef * spi,struct spi_stm32_data * data)367 static void spi_stm32_shift_fifo(SPI_TypeDef *spi, struct spi_stm32_data *data)
368 {
369 	if (ll_func_rx_is_not_empty(spi)) {
370 		spi_stm32_read_next_frame(spi, data);
371 	}
372 
373 	if (ll_func_tx_is_not_full(spi)) {
374 		spi_stm32_send_next_frame(spi, data);
375 	}
376 }
377 
378 /* Shift a SPI frame as master. */
spi_stm32_shift_m(const struct spi_stm32_config * cfg,struct spi_stm32_data * data)379 static void spi_stm32_shift_m(const struct spi_stm32_config *cfg,
380 			      struct spi_stm32_data *data)
381 {
382 	if (cfg->fifo_enabled) {
383 		spi_stm32_shift_fifo(cfg->spi, data);
384 	} else {
385 		while (!ll_func_tx_is_not_full(cfg->spi)) {
386 			/* NOP */
387 		}
388 
389 		spi_stm32_send_next_frame(cfg->spi, data);
390 
391 		while (!ll_func_rx_is_not_empty(cfg->spi)) {
392 			/* NOP */
393 		}
394 
395 		spi_stm32_read_next_frame(cfg->spi, data);
396 	}
397 }
398 
399 /* Shift a SPI frame as slave. */
spi_stm32_shift_s(SPI_TypeDef * spi,struct spi_stm32_data * data)400 static void spi_stm32_shift_s(SPI_TypeDef *spi, struct spi_stm32_data *data)
401 {
402 	if (ll_func_tx_is_not_full(spi) && spi_context_tx_on(&data->ctx)) {
403 		uint16_t tx_frame;
404 
405 		if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
406 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
407 			LL_SPI_TransmitData8(spi, tx_frame);
408 			spi_context_update_tx(&data->ctx, 1, 1);
409 		} else {
410 			tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
411 			LL_SPI_TransmitData16(spi, tx_frame);
412 			spi_context_update_tx(&data->ctx, 2, 1);
413 		}
414 	} else {
415 		ll_func_disable_int_tx_empty(spi);
416 	}
417 
418 	if (ll_func_rx_is_not_empty(spi) &&
419 	    spi_context_rx_buf_on(&data->ctx)) {
420 		uint16_t rx_frame;
421 
422 		if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
423 			rx_frame = LL_SPI_ReceiveData8(spi);
424 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
425 			spi_context_update_rx(&data->ctx, 1, 1);
426 		} else {
427 			rx_frame = LL_SPI_ReceiveData16(spi);
428 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
429 			spi_context_update_rx(&data->ctx, 2, 1);
430 		}
431 	}
432 }
433 
434 /*
435  * Without a FIFO, we can only shift out one frame's worth of SPI
436  * data, and read the response back.
437  *
438  * TODO: support 16-bit data frames.
439  */
spi_stm32_shift_frames(const struct spi_stm32_config * cfg,struct spi_stm32_data * data)440 static int spi_stm32_shift_frames(const struct spi_stm32_config *cfg,
441 	struct spi_stm32_data *data)
442 {
443 	uint16_t operation = data->ctx.config->operation;
444 
445 	if (SPI_OP_MODE_GET(operation) == SPI_OP_MODE_MASTER) {
446 		spi_stm32_shift_m(cfg, data);
447 	} else {
448 		spi_stm32_shift_s(cfg->spi, data);
449 	}
450 
451 	return spi_stm32_get_err(cfg->spi);
452 }
453 
spi_stm32_cs_control(const struct device * dev,bool on)454 static void spi_stm32_cs_control(const struct device *dev, bool on)
455 {
456 	struct spi_stm32_data *data = dev->data;
457 
458 	spi_context_cs_control(&data->ctx, on);
459 
460 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz)
461 	const struct spi_stm32_config *cfg = dev->config;
462 
463 	if (cfg->use_subghzspi_nss) {
464 		if (on) {
465 			LL_PWR_SelectSUBGHZSPI_NSS();
466 		} else {
467 			LL_PWR_UnselectSUBGHZSPI_NSS();
468 		}
469 	}
470 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) */
471 }
472 
spi_stm32_complete(const struct device * dev,int status)473 static void spi_stm32_complete(const struct device *dev, int status)
474 {
475 	const struct spi_stm32_config *cfg = dev->config;
476 	SPI_TypeDef *spi = cfg->spi;
477 	struct spi_stm32_data *data = dev->data;
478 
479 #ifdef CONFIG_SPI_STM32_INTERRUPT
480 	ll_func_disable_int_tx_empty(spi);
481 	ll_func_disable_int_rx_not_empty(spi);
482 	ll_func_disable_int_errors(spi);
483 
484 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
485 	if (cfg->fifo_enabled) {
486 		LL_SPI_DisableIT_EOT(spi);
487 	}
488 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
489 
490 #endif /* CONFIG_SPI_STM32_INTERRUPT */
491 
492 
493 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo)
494 	/* Flush RX buffer */
495 	while (ll_func_rx_is_not_empty(spi)) {
496 		(void) LL_SPI_ReceiveData8(spi);
497 	}
498 #endif /* compat st_stm32_spi_fifo*/
499 
500 	if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
501 		while (ll_func_spi_is_busy(spi)) {
502 			/* NOP */
503 		}
504 
505 		spi_stm32_cs_control(dev, false);
506 	}
507 
508 	/* BSY flag is cleared when MODF flag is raised */
509 	if (LL_SPI_IsActiveFlag_MODF(spi)) {
510 		LL_SPI_ClearFlag_MODF(spi);
511 	}
512 
513 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
514 	if (cfg->fifo_enabled) {
515 		LL_SPI_ClearFlag_TXTF(spi);
516 		LL_SPI_ClearFlag_OVR(spi);
517 		LL_SPI_ClearFlag_EOT(spi);
518 		LL_SPI_SetTransferSize(spi, 0);
519 	}
520 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
521 
522 	if (!(data->ctx.config->operation & SPI_HOLD_ON_CS)) {
523 		ll_func_disable_spi(spi);
524 	}
525 
526 #ifdef CONFIG_SPI_STM32_INTERRUPT
527 	spi_context_complete(&data->ctx, dev, status);
528 #endif
529 
530 	spi_stm32_pm_policy_state_lock_put(dev);
531 }
532 
533 #ifdef CONFIG_SPI_STM32_INTERRUPT
spi_stm32_isr(const struct device * dev)534 static void spi_stm32_isr(const struct device *dev)
535 {
536 	const struct spi_stm32_config *cfg = dev->config;
537 	struct spi_stm32_data *data = dev->data;
538 	SPI_TypeDef *spi = cfg->spi;
539 	int err;
540 
541 	/* Some spurious interrupts are triggered when SPI is not enabled; ignore them.
542 	 * Do it only when fifo is enabled to leave non-fifo functionality untouched for now
543 	 */
544 	if (cfg->fifo_enabled) {
545 		if (!LL_SPI_IsEnabled(spi)) {
546 			return;
547 		}
548 	}
549 
550 	err = spi_stm32_get_err(spi);
551 	if (err) {
552 		spi_stm32_complete(dev, err);
553 		return;
554 	}
555 
556 	if (spi_stm32_transfer_ongoing(data)) {
557 		err = spi_stm32_shift_frames(cfg, data);
558 	}
559 
560 	if (err || !spi_stm32_transfer_ongoing(data)) {
561 		spi_stm32_complete(dev, err);
562 	}
563 }
564 #endif /* CONFIG_SPI_STM32_INTERRUPT */
565 
spi_stm32_configure(const struct device * dev,const struct spi_config * config)566 static int spi_stm32_configure(const struct device *dev,
567 			       const struct spi_config *config)
568 {
569 	const struct spi_stm32_config *cfg = dev->config;
570 	struct spi_stm32_data *data = dev->data;
571 	const uint32_t scaler[] = {
572 		LL_SPI_BAUDRATEPRESCALER_DIV2,
573 		LL_SPI_BAUDRATEPRESCALER_DIV4,
574 		LL_SPI_BAUDRATEPRESCALER_DIV8,
575 		LL_SPI_BAUDRATEPRESCALER_DIV16,
576 		LL_SPI_BAUDRATEPRESCALER_DIV32,
577 		LL_SPI_BAUDRATEPRESCALER_DIV64,
578 		LL_SPI_BAUDRATEPRESCALER_DIV128,
579 		LL_SPI_BAUDRATEPRESCALER_DIV256
580 	};
581 	SPI_TypeDef *spi = cfg->spi;
582 	uint32_t clock;
583 	int br;
584 
585 	if (spi_context_configured(&data->ctx, config)) {
586 		/* Nothing to do */
587 		return 0;
588 	}
589 
590 	if ((SPI_WORD_SIZE_GET(config->operation) != 8)
591 	    && (SPI_WORD_SIZE_GET(config->operation) != 16)) {
592 		return -ENOTSUP;
593 	}
594 
595 	/* configure the frame format Motorola (default) or TI */
596 	if ((config->operation & SPI_FRAME_FORMAT_TI) == SPI_FRAME_FORMAT_TI) {
597 #ifdef LL_SPI_PROTOCOL_TI
598 		LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_TI);
599 #else
600 		LOG_ERR("Frame Format TI not supported");
601 		/* on stm32F1 or some stm32L1 (cat1,2) without SPI_CR2_FRF */
602 		return -ENOTSUP;
603 #endif
604 #if defined(LL_SPI_PROTOCOL_MOTOROLA) && defined(SPI_CR2_FRF)
605 	} else {
606 		LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_MOTOROLA);
607 #endif
608 }
609 
610 	if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) {
611 		if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
612 					   (clock_control_subsys_t) &cfg->pclken[1], &clock) < 0) {
613 			LOG_ERR("Failed call clock_control_get_rate(pclk[1])");
614 			return -EIO;
615 		}
616 	} else {
617 		if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
618 					   (clock_control_subsys_t) &cfg->pclken[0], &clock) < 0) {
619 			LOG_ERR("Failed call clock_control_get_rate(pclk[0])");
620 			return -EIO;
621 		}
622 	}
623 
624 	for (br = 1 ; br <= ARRAY_SIZE(scaler) ; ++br) {
625 		uint32_t clk = clock >> br;
626 
627 		if (clk <= config->frequency) {
628 			break;
629 		}
630 	}
631 
632 	if (br > ARRAY_SIZE(scaler)) {
633 		LOG_ERR("Unsupported frequency %uHz, max %uHz, min %uHz",
634 			    config->frequency,
635 			    clock >> 1,
636 			    clock >> ARRAY_SIZE(scaler));
637 		return -EINVAL;
638 	}
639 
640 	LL_SPI_Disable(spi);
641 	LL_SPI_SetBaudRatePrescaler(spi, scaler[br - 1]);
642 
643 	if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
644 		LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_HIGH);
645 	} else {
646 		LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_LOW);
647 	}
648 
649 	if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
650 		LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_2EDGE);
651 	} else {
652 		LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_1EDGE);
653 	}
654 
655 	LL_SPI_SetTransferDirection(spi, LL_SPI_FULL_DUPLEX);
656 
657 	if (config->operation & SPI_TRANSFER_LSB) {
658 		LL_SPI_SetTransferBitOrder(spi, LL_SPI_LSB_FIRST);
659 	} else {
660 		LL_SPI_SetTransferBitOrder(spi, LL_SPI_MSB_FIRST);
661 	}
662 
663 	LL_SPI_DisableCRC(spi);
664 
665 	if (spi_cs_is_gpio(config) || !IS_ENABLED(CONFIG_SPI_STM32_USE_HW_SS)) {
666 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
667 		if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) {
668 			if (LL_SPI_GetNSSPolarity(spi) == LL_SPI_NSS_POLARITY_LOW)
669 				LL_SPI_SetInternalSSLevel(spi, LL_SPI_SS_LEVEL_HIGH);
670 		}
671 #endif
672 		LL_SPI_SetNSSMode(spi, LL_SPI_NSS_SOFT);
673 	} else {
674 		if (config->operation & SPI_OP_MODE_SLAVE) {
675 			LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_INPUT);
676 		} else {
677 			LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_OUTPUT);
678 		}
679 	}
680 
681 	if (config->operation & SPI_OP_MODE_SLAVE) {
682 		LL_SPI_SetMode(spi, LL_SPI_MODE_SLAVE);
683 	} else {
684 		LL_SPI_SetMode(spi, LL_SPI_MODE_MASTER);
685 	}
686 
687 	if (SPI_WORD_SIZE_GET(config->operation) ==  8) {
688 		LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_8BIT);
689 	} else {
690 		LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_16BIT);
691 	}
692 
693 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
694 	LL_SPI_SetMasterSSIdleness(spi, cfg->mssi_clocks);
695 	LL_SPI_SetInterDataIdleness(spi, (cfg->midi_clocks << SPI_CFG2_MIDI_Pos));
696 #endif
697 
698 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo)
699 	ll_func_set_fifo_threshold_8bit(spi);
700 #endif
701 
702 	/* At this point, it's mandatory to set this on the context! */
703 	data->ctx.config = config;
704 
705 	LOG_DBG("Installed config %p: freq %uHz (div = %u),"
706 		    " mode %u/%u/%u, slave %u",
707 		    config, clock >> br, 1 << br,
708 		    (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0,
709 		    (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0,
710 		    (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) ? 1 : 0,
711 		    config->slave);
712 
713 	return 0;
714 }
715 
spi_stm32_release(const struct device * dev,const struct spi_config * config)716 static int spi_stm32_release(const struct device *dev,
717 			     const struct spi_config *config)
718 {
719 	struct spi_stm32_data *data = dev->data;
720 	const struct spi_stm32_config *cfg = dev->config;
721 
722 	spi_context_unlock_unconditionally(&data->ctx);
723 	ll_func_disable_spi(cfg->spi);
724 
725 	return 0;
726 }
727 
728 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
spi_stm32_count_bufset_frames(const struct spi_config * config,const struct spi_buf_set * bufs)729 static int32_t spi_stm32_count_bufset_frames(const struct spi_config *config,
730 					     const struct spi_buf_set *bufs)
731 {
732 	if (bufs == NULL) {
733 		return 0;
734 	}
735 
736 	uint32_t num_bytes = 0;
737 
738 	for (size_t i = 0; i < bufs->count; i++) {
739 		num_bytes += bufs->buffers[i].len;
740 	}
741 
742 	uint8_t bytes_per_frame = SPI_WORD_SIZE_GET(config->operation) / 8;
743 
744 	if ((num_bytes % bytes_per_frame) != 0) {
745 		return -EINVAL;
746 	}
747 	return num_bytes / bytes_per_frame;
748 }
749 
spi_stm32_count_total_frames(const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)750 static int32_t spi_stm32_count_total_frames(const struct spi_config *config,
751 					    const struct spi_buf_set *tx_bufs,
752 					    const struct spi_buf_set *rx_bufs)
753 {
754 	int tx_frames = spi_stm32_count_bufset_frames(config, tx_bufs);
755 
756 	if (tx_frames < 0) {
757 		return tx_frames;
758 	}
759 
760 	int rx_frames = spi_stm32_count_bufset_frames(config, rx_bufs);
761 
762 	if (rx_frames < 0) {
763 		return rx_frames;
764 	}
765 
766 	if (tx_frames > UINT16_MAX || rx_frames > UINT16_MAX) {
767 		return -EMSGSIZE;
768 	}
769 
770 	return MAX(rx_frames, tx_frames);
771 }
772 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
773 
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)774 static int transceive(const struct device *dev,
775 		      const struct spi_config *config,
776 		      const struct spi_buf_set *tx_bufs,
777 		      const struct spi_buf_set *rx_bufs,
778 		      bool asynchronous,
779 		      spi_callback_t cb,
780 		      void *userdata)
781 {
782 	const struct spi_stm32_config *cfg = dev->config;
783 	struct spi_stm32_data *data = dev->data;
784 	SPI_TypeDef *spi = cfg->spi;
785 	int ret;
786 
787 	if (!tx_bufs && !rx_bufs) {
788 		return 0;
789 	}
790 
791 #ifndef CONFIG_SPI_STM32_INTERRUPT
792 	if (asynchronous) {
793 		return -ENOTSUP;
794 	}
795 #endif /* CONFIG_SPI_STM32_INTERRUPT */
796 
797 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
798 
799 	spi_stm32_pm_policy_state_lock_get(dev);
800 
801 	ret = spi_stm32_configure(dev, config);
802 	if (ret) {
803 		goto end;
804 	}
805 
806 	/* Set buffers info */
807 	if (SPI_WORD_SIZE_GET(config->operation) == 8) {
808 		spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
809 	} else {
810 		spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2);
811 	}
812 
813 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
814 	if (cfg->fifo_enabled && SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) {
815 		int total_frames = spi_stm32_count_total_frames(
816 			config, tx_bufs, rx_bufs);
817 		if (total_frames < 0) {
818 			ret = total_frames;
819 			goto end;
820 		}
821 		LL_SPI_SetTransferSize(spi, (uint32_t)total_frames);
822 	}
823 
824 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
825 
826 	LL_SPI_Enable(spi);
827 
828 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
829 	/* With the STM32MP1, STM32U5 and the STM32H7,
830 	 * if the device is the SPI master,
831 	 * we need to enable the start of the transfer with
832 	 * LL_SPI_StartMasterTransfer(spi)
833 	 */
834 	if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
835 		LL_SPI_StartMasterTransfer(spi);
836 		while (!LL_SPI_IsActiveMasterTransfer(spi)) {
837 			/* NOP */
838 		}
839 	}
840 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
841 
842 #ifdef CONFIG_SOC_SERIES_STM32H7X
843 	/*
844 	 * Add a small delay after enabling to prevent transfer stalling at high
845 	 * system clock frequency (see errata sheet ES0392).
846 	 */
847 	k_busy_wait(WAIT_1US);
848 #endif /* CONFIG_SOC_SERIES_STM32H7X */
849 
850 	/* This is turned off in spi_stm32_complete(). */
851 	spi_stm32_cs_control(dev, true);
852 
853 #ifdef CONFIG_SPI_STM32_INTERRUPT
854 
855 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
856 	if (cfg->fifo_enabled) {
857 		LL_SPI_EnableIT_EOT(spi);
858 	}
859 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
860 
861 	ll_func_enable_int_errors(spi);
862 
863 	if (rx_bufs) {
864 		ll_func_enable_int_rx_not_empty(spi);
865 	}
866 
867 	ll_func_enable_int_tx_empty(spi);
868 
869 	ret = spi_context_wait_for_completion(&data->ctx);
870 #else /* CONFIG_SPI_STM32_INTERRUPT */
871 	do {
872 		ret = spi_stm32_shift_frames(cfg, data);
873 	} while (!ret && spi_stm32_transfer_ongoing(data));
874 
875 	spi_stm32_complete(dev, ret);
876 
877 #ifdef CONFIG_SPI_SLAVE
878 	if (spi_context_is_slave(&data->ctx) && !ret) {
879 		ret = data->ctx.recv_frames;
880 	}
881 #endif /* CONFIG_SPI_SLAVE */
882 
883 #endif /* CONFIG_SPI_STM32_INTERRUPT */
884 
885 end:
886 	spi_context_release(&data->ctx, ret);
887 
888 	return ret;
889 }
890 
891 #ifdef CONFIG_SPI_STM32_DMA
892 
wait_dma_rx_tx_done(const struct device * dev)893 static int wait_dma_rx_tx_done(const struct device *dev)
894 {
895 	struct spi_stm32_data *data = dev->data;
896 	int res = -1;
897 	k_timeout_t timeout;
898 
899 	/*
900 	 * In slave mode we do not know when the transaction will start. Hence,
901 	 * it doesn't make sense to have timeout in this case.
902 	 */
903 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(&data->ctx)) {
904 		timeout = K_FOREVER;
905 	} else {
906 		timeout = K_MSEC(1000);
907 	}
908 
909 	while (1) {
910 		res = k_sem_take(&data->status_sem, timeout);
911 		if (res != 0) {
912 			return res;
913 		}
914 
915 		if (data->status_flags & SPI_STM32_DMA_ERROR_FLAG) {
916 			return -EIO;
917 		}
918 
919 		if (data->status_flags & SPI_STM32_DMA_DONE_FLAG) {
920 			return 0;
921 		}
922 	}
923 
924 	return res;
925 }
926 
927 #ifdef CONFIG_DCACHE
buf_in_nocache(uintptr_t buf,size_t len_bytes)928 static bool buf_in_nocache(uintptr_t buf, size_t len_bytes)
929 {
930 	bool buf_within_nocache = false;
931 
932 #ifdef CONFIG_NOCACHE_MEMORY
933 	/* Check if buffer is in nocache region defined by the linker */
934 	buf_within_nocache = (buf >= ((uintptr_t)_nocache_ram_start)) &&
935 		((buf + len_bytes - 1) <= ((uintptr_t)_nocache_ram_end));
936 	if (buf_within_nocache) {
937 		return true;
938 	}
939 #endif /* CONFIG_NOCACHE_MEMORY */
940 
941 	/* Check if buffer is in nocache memory region defined in DT */
942 	buf_within_nocache = mem_attr_check_buf(
943 		(void *)buf, len_bytes, DT_MEM_ARM(ATTR_MPU_RAM_NOCACHE)) == 0;
944 
945 	return buf_within_nocache;
946 }
947 
is_dummy_buffer(const struct spi_buf * buf)948 static bool is_dummy_buffer(const struct spi_buf *buf)
949 {
950 	return buf->buf == NULL;
951 }
952 
spi_buf_set_in_nocache(const struct spi_buf_set * bufs)953 static bool spi_buf_set_in_nocache(const struct spi_buf_set *bufs)
954 {
955 	for (size_t i = 0; i < bufs->count; i++) {
956 		const struct spi_buf *buf = &bufs->buffers[i];
957 
958 		if (!is_dummy_buffer(buf) &&
959 				!buf_in_nocache((uintptr_t)buf->buf, buf->len)) {
960 			return false;
961 		}
962 	}
963 	return true;
964 }
965 #endif /* CONFIG_DCACHE */
966 
transceive_dma(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)967 static int transceive_dma(const struct device *dev,
968 		      const struct spi_config *config,
969 		      const struct spi_buf_set *tx_bufs,
970 		      const struct spi_buf_set *rx_bufs,
971 		      bool asynchronous,
972 		      spi_callback_t cb,
973 		      void *userdata)
974 {
975 	const struct spi_stm32_config *cfg = dev->config;
976 	struct spi_stm32_data *data = dev->data;
977 	SPI_TypeDef *spi = cfg->spi;
978 	int ret;
979 	int err;
980 
981 	if (!tx_bufs && !rx_bufs) {
982 		return 0;
983 	}
984 
985 	if (asynchronous) {
986 		return -ENOTSUP;
987 	}
988 
989 #ifdef CONFIG_DCACHE
990 	if ((tx_bufs != NULL && !spi_buf_set_in_nocache(tx_bufs)) ||
991 		(rx_bufs != NULL && !spi_buf_set_in_nocache(rx_bufs))) {
992 		return -EFAULT;
993 	}
994 #endif /* CONFIG_DCACHE */
995 
996 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
997 
998 	spi_stm32_pm_policy_state_lock_get(dev);
999 
1000 	k_sem_reset(&data->status_sem);
1001 
1002 	ret = spi_stm32_configure(dev, config);
1003 	if (ret) {
1004 		goto end;
1005 	}
1006 
1007 	/* Set buffers info */
1008 	if (SPI_WORD_SIZE_GET(config->operation) == 8) {
1009 		spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
1010 	} else {
1011 		spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2);
1012 	}
1013 
1014 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1015 	/* set request before enabling (else SPI CFG1 reg is write protected) */
1016 	LL_SPI_EnableDMAReq_RX(spi);
1017 	LL_SPI_EnableDMAReq_TX(spi);
1018 
1019 	LL_SPI_Enable(spi);
1020 	if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
1021 		LL_SPI_StartMasterTransfer(spi);
1022 	}
1023 #else
1024 	LL_SPI_Enable(spi);
1025 #endif /* st_stm32h7_spi */
1026 
1027 	/* This is turned off in spi_stm32_complete(). */
1028 	spi_stm32_cs_control(dev, true);
1029 
1030 	while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
1031 		size_t dma_len;
1032 
1033 		if (data->ctx.rx_len == 0) {
1034 			dma_len = data->ctx.tx_len;
1035 		} else if (data->ctx.tx_len == 0) {
1036 			dma_len = data->ctx.rx_len;
1037 		} else {
1038 			dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
1039 		}
1040 
1041 		data->status_flags = 0;
1042 
1043 		ret = spi_dma_move_buffers(dev, dma_len);
1044 		if (ret != 0) {
1045 			break;
1046 		}
1047 
1048 #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1049 
1050 		/* toggle the DMA request to restart the transfer */
1051 		LL_SPI_EnableDMAReq_RX(spi);
1052 		LL_SPI_EnableDMAReq_TX(spi);
1053 #endif /* ! st_stm32h7_spi */
1054 
1055 		ret = wait_dma_rx_tx_done(dev);
1056 		if (ret != 0) {
1057 			break;
1058 		}
1059 
1060 #ifdef SPI_SR_FTLVL
1061 		while (LL_SPI_GetTxFIFOLevel(spi) > 0) {
1062 		}
1063 #endif /* SPI_SR_FTLVL */
1064 
1065 #ifdef CONFIG_SPI_STM32_ERRATA_BUSY
1066 		WAIT_FOR(ll_func_spi_dma_busy(spi) != 0,
1067 			 CONFIG_SPI_STM32_BUSY_FLAG_TIMEOUT,
1068 			 k_yield());
1069 #else
1070 		/* wait until spi is no more busy (spi TX fifo is really empty) */
1071 		while (ll_func_spi_dma_busy(spi) == 0) {
1072 		}
1073 #endif /* CONFIG_SPI_STM32_ERRATA_BUSY */
1074 
1075 #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1076 		/* toggle the DMA transfer request */
1077 		LL_SPI_DisableDMAReq_TX(spi);
1078 		LL_SPI_DisableDMAReq_RX(spi);
1079 #endif /* ! st_stm32h7_spi */
1080 
1081 		uint8_t frame_size_bytes = bits2bytes(
1082 			SPI_WORD_SIZE_GET(config->operation));
1083 
1084 		spi_context_update_tx(&data->ctx, frame_size_bytes, dma_len);
1085 		spi_context_update_rx(&data->ctx, frame_size_bytes, dma_len);
1086 	}
1087 
1088 	/* spi complete relies on SPI Status Reg which cannot be disabled */
1089 	spi_stm32_complete(dev, ret);
1090 	/* disable spi instance after completion */
1091 	LL_SPI_Disable(spi);
1092 	/* The Config. Reg. on some mcus is write un-protected when SPI is disabled */
1093 	LL_SPI_DisableDMAReq_TX(spi);
1094 	LL_SPI_DisableDMAReq_RX(spi);
1095 
1096 	err = dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel);
1097 	if (err) {
1098 		LOG_DBG("Rx dma_stop failed with error %d", err);
1099 	}
1100 	err = dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel);
1101 	if (err) {
1102 		LOG_DBG("Tx dma_stop failed with error %d", err);
1103 	}
1104 
1105 #ifdef CONFIG_SPI_SLAVE
1106 	if (spi_context_is_slave(&data->ctx) && !ret) {
1107 		ret = data->ctx.recv_frames;
1108 	}
1109 #endif /* CONFIG_SPI_SLAVE */
1110 
1111 end:
1112 	spi_context_release(&data->ctx, ret);
1113 
1114 	spi_stm32_pm_policy_state_lock_put(dev);
1115 
1116 	return ret;
1117 }
1118 #endif /* CONFIG_SPI_STM32_DMA */
1119 
spi_stm32_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)1120 static int spi_stm32_transceive(const struct device *dev,
1121 				const struct spi_config *config,
1122 				const struct spi_buf_set *tx_bufs,
1123 				const struct spi_buf_set *rx_bufs)
1124 {
1125 #ifdef CONFIG_SPI_STM32_DMA
1126 	struct spi_stm32_data *data = dev->data;
1127 
1128 	if ((data->dma_tx.dma_dev != NULL)
1129 	 && (data->dma_rx.dma_dev != NULL)) {
1130 		return transceive_dma(dev, config, tx_bufs, rx_bufs,
1131 				      false, NULL, NULL);
1132 	}
1133 #endif /* CONFIG_SPI_STM32_DMA */
1134 	return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
1135 }
1136 
1137 #ifdef CONFIG_SPI_ASYNC
spi_stm32_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)1138 static int spi_stm32_transceive_async(const struct device *dev,
1139 				      const struct spi_config *config,
1140 				      const struct spi_buf_set *tx_bufs,
1141 				      const struct spi_buf_set *rx_bufs,
1142 				      spi_callback_t cb,
1143 				      void *userdata)
1144 {
1145 	return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
1146 }
1147 #endif /* CONFIG_SPI_ASYNC */
1148 
1149 static DEVICE_API(spi, api_funcs) = {
1150 	.transceive = spi_stm32_transceive,
1151 #ifdef CONFIG_SPI_ASYNC
1152 	.transceive_async = spi_stm32_transceive_async,
1153 #endif
1154 #ifdef CONFIG_SPI_RTIO
1155 	.iodev_submit = spi_rtio_iodev_default_submit,
1156 #endif
1157 	.release = spi_stm32_release,
1158 };
1159 
spi_stm32_is_subghzspi(const struct device * dev)1160 static inline bool spi_stm32_is_subghzspi(const struct device *dev)
1161 {
1162 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz)
1163 	const struct spi_stm32_config *cfg = dev->config;
1164 
1165 	return cfg->use_subghzspi_nss;
1166 #else
1167 	ARG_UNUSED(dev);
1168 	return false;
1169 #endif /* st_stm32_spi_subghz */
1170 }
1171 
spi_stm32_init(const struct device * dev)1172 static int spi_stm32_init(const struct device *dev)
1173 {
1174 	struct spi_stm32_data *data __attribute__((unused)) = dev->data;
1175 	const struct spi_stm32_config *cfg = dev->config;
1176 	int err;
1177 
1178 	if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) {
1179 		LOG_ERR("clock control device not ready");
1180 		return -ENODEV;
1181 	}
1182 
1183 	err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
1184 			       (clock_control_subsys_t) &cfg->pclken[0]);
1185 	if (err < 0) {
1186 		LOG_ERR("Could not enable SPI clock");
1187 		return err;
1188 	}
1189 
1190 	if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) {
1191 		err = clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
1192 					      (clock_control_subsys_t) &cfg->pclken[1],
1193 					      NULL);
1194 		if (err < 0) {
1195 			LOG_ERR("Could not select SPI domain clock");
1196 			return err;
1197 		}
1198 	}
1199 
1200 	if (!spi_stm32_is_subghzspi(dev)) {
1201 		/* Configure dt provided device signals when available */
1202 		err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1203 		if (err < 0) {
1204 			LOG_ERR("SPI pinctrl setup failed (%d)", err);
1205 			return err;
1206 		}
1207 	}
1208 
1209 #ifdef CONFIG_SPI_STM32_INTERRUPT
1210 	cfg->irq_config(dev);
1211 #endif /* CONFIG_SPI_STM32_INTERRUPT */
1212 
1213 #ifdef CONFIG_SPI_STM32_DMA
1214 	if ((data->dma_rx.dma_dev != NULL) &&
1215 				!device_is_ready(data->dma_rx.dma_dev)) {
1216 		LOG_ERR("%s device not ready", data->dma_rx.dma_dev->name);
1217 		return -ENODEV;
1218 	}
1219 
1220 	if ((data->dma_tx.dma_dev != NULL) &&
1221 				!device_is_ready(data->dma_tx.dma_dev)) {
1222 		LOG_ERR("%s device not ready", data->dma_tx.dma_dev->name);
1223 		return -ENODEV;
1224 	}
1225 
1226 	LOG_DBG("SPI with DMA transfer");
1227 
1228 #endif /* CONFIG_SPI_STM32_DMA */
1229 
1230 	err = spi_context_cs_configure_all(&data->ctx);
1231 	if (err < 0) {
1232 		return err;
1233 	}
1234 
1235 	spi_context_unlock_unconditionally(&data->ctx);
1236 
1237 	return pm_device_runtime_enable(dev);
1238 }
1239 
1240 #ifdef CONFIG_PM_DEVICE
spi_stm32_pm_action(const struct device * dev,enum pm_device_action action)1241 static int spi_stm32_pm_action(const struct device *dev,
1242 			       enum pm_device_action action)
1243 {
1244 	const struct spi_stm32_config *config = dev->config;
1245 	const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
1246 	int err;
1247 
1248 
1249 	switch (action) {
1250 	case PM_DEVICE_ACTION_RESUME:
1251 		if (!spi_stm32_is_subghzspi(dev)) {
1252 			/* Set pins to active state */
1253 			err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
1254 			if (err < 0) {
1255 				return err;
1256 			}
1257 		}
1258 
1259 		/* enable clock */
1260 		err = clock_control_on(clk, (clock_control_subsys_t)&config->pclken[0]);
1261 		if (err != 0) {
1262 			LOG_ERR("Could not enable SPI clock");
1263 			return err;
1264 		}
1265 		break;
1266 	case PM_DEVICE_ACTION_SUSPEND:
1267 		/* Stop device clock. */
1268 		err = clock_control_off(clk, (clock_control_subsys_t)&config->pclken[0]);
1269 		if (err != 0) {
1270 			LOG_ERR("Could not disable SPI clock");
1271 			return err;
1272 		}
1273 
1274 		if (!spi_stm32_is_subghzspi(dev)) {
1275 			/* Move pins to sleep state */
1276 			err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
1277 			if ((err < 0) && (err != -ENOENT)) {
1278 				/*
1279 				 * If returning -ENOENT, no pins where defined for sleep mode :
1280 				 * Do not output on console (might sleep already) when going to
1281 				 * sleep,
1282 				 * "SPI pinctrl sleep state not available"
1283 				 * and don't block PM suspend.
1284 				 * Else return the error.
1285 				 */
1286 				return err;
1287 			}
1288 		}
1289 		break;
1290 	default:
1291 		return -ENOTSUP;
1292 	}
1293 
1294 	return 0;
1295 }
1296 #endif /* CONFIG_PM_DEVICE */
1297 
1298 #ifdef CONFIG_SPI_STM32_INTERRUPT
1299 #define STM32_SPI_IRQ_HANDLER_DECL(id)					\
1300 	static void spi_stm32_irq_config_func_##id(const struct device *dev)
1301 #define STM32_SPI_IRQ_HANDLER_FUNC(id)					\
1302 	.irq_config = spi_stm32_irq_config_func_##id,
1303 #define STM32_SPI_IRQ_HANDLER(id)					\
1304 static void spi_stm32_irq_config_func_##id(const struct device *dev)		\
1305 {									\
1306 	IRQ_CONNECT(DT_INST_IRQN(id),					\
1307 		    DT_INST_IRQ(id, priority),				\
1308 		    spi_stm32_isr, DEVICE_DT_INST_GET(id), 0);		\
1309 	irq_enable(DT_INST_IRQN(id));					\
1310 }
1311 #else
1312 #define STM32_SPI_IRQ_HANDLER_DECL(id)
1313 #define STM32_SPI_IRQ_HANDLER_FUNC(id)
1314 #define STM32_SPI_IRQ_HANDLER(id)
1315 #endif /* CONFIG_SPI_STM32_INTERRUPT */
1316 
1317 #define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev)	\
1318 	.dma_dev = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)),			\
1319 	.channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel),	\
1320 	.dma_cfg = {							\
1321 		.dma_slot = STM32_DMA_SLOT(index, dir, slot),\
1322 		.channel_direction = STM32_DMA_CONFIG_DIRECTION(	\
1323 					STM32_DMA_CHANNEL_CONFIG(index, dir)),       \
1324 		.source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE(    \
1325 					STM32_DMA_CHANNEL_CONFIG(index, dir)),       \
1326 		.dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE(     \
1327 				STM32_DMA_CHANNEL_CONFIG(index, dir)),	\
1328 		.source_burst_length = 1, /* SINGLE transfer */		\
1329 		.dest_burst_length = 1, /* SINGLE transfer */		\
1330 		.channel_priority = STM32_DMA_CONFIG_PRIORITY(		\
1331 					STM32_DMA_CHANNEL_CONFIG(index, dir)),\
1332 		.dma_callback = dma_callback,				\
1333 		.block_count = 2,					\
1334 	},								\
1335 	.src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC(	\
1336 				STM32_DMA_CHANNEL_CONFIG(index, dir)),	\
1337 	.dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC(	\
1338 				STM32_DMA_CHANNEL_CONFIG(index, dir)),	\
1339 	.fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD(		\
1340 				STM32_DMA_FEATURES(index, dir)),		\
1341 
1342 
1343 #ifdef CONFIG_SPI_STM32_DMA
1344 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest)			\
1345 	.dma_##dir = {							\
1346 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir),		\
1347 			(SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)),\
1348 			(NULL))						\
1349 		},
1350 #define SPI_DMA_STATUS_SEM(id)						\
1351 	.status_sem = Z_SEM_INITIALIZER(				\
1352 		spi_stm32_dev_data_##id.status_sem, 0, 1),
1353 #else
1354 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest)
1355 #define SPI_DMA_STATUS_SEM(id)
1356 #endif /* CONFIG_SPI_STM32_DMA */
1357 
1358 #define SPI_SUPPORTS_FIFO(id)	DT_INST_NODE_HAS_PROP(id, fifo_enable)
1359 #define SPI_GET_FIFO_PROP(id)	DT_INST_PROP(id, fifo_enable)
1360 #define SPI_FIFO_ENABLED(id)	COND_CODE_1(SPI_SUPPORTS_FIFO(id), (SPI_GET_FIFO_PROP(id)), (0))
1361 
1362 #define STM32_SPI_INIT(id)						\
1363 STM32_SPI_IRQ_HANDLER_DECL(id);						\
1364 									\
1365 PINCTRL_DT_INST_DEFINE(id);						\
1366 									\
1367 static const struct stm32_pclken pclken_##id[] =			\
1368 					       STM32_DT_INST_CLOCKS(id);\
1369 									\
1370 static const struct spi_stm32_config spi_stm32_cfg_##id = {		\
1371 	.spi = (SPI_TypeDef *) DT_INST_REG_ADDR(id),			\
1372 	.pclken = pclken_##id,						\
1373 	.pclk_len = DT_INST_NUM_CLOCKS(id),				\
1374 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),			\
1375 	.fifo_enabled = SPI_FIFO_ENABLED(id),				\
1376 	STM32_SPI_IRQ_HANDLER_FUNC(id)					\
1377 	IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz),	\
1378 		(.use_subghzspi_nss =					\
1379 			DT_INST_PROP_OR(id, use_subghzspi_nss, false),))\
1380 	IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi),		\
1381 		(.midi_clocks =						\
1382 			DT_INST_PROP(id, midi_clock),))			\
1383 	IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi),		\
1384 		(.mssi_clocks =						\
1385 			DT_INST_PROP(id, mssi_clock),))			\
1386 };									\
1387 									\
1388 static struct spi_stm32_data spi_stm32_dev_data_##id = {		\
1389 	SPI_CONTEXT_INIT_LOCK(spi_stm32_dev_data_##id, ctx),		\
1390 	SPI_CONTEXT_INIT_SYNC(spi_stm32_dev_data_##id, ctx),		\
1391 	SPI_DMA_CHANNEL(id, rx, RX, PERIPHERAL, MEMORY)			\
1392 	SPI_DMA_CHANNEL(id, tx, TX, MEMORY, PERIPHERAL)			\
1393 	SPI_DMA_STATUS_SEM(id)						\
1394 	SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)		\
1395 };									\
1396 									\
1397 PM_DEVICE_DT_INST_DEFINE(id, spi_stm32_pm_action);			\
1398 									\
1399 SPI_DEVICE_DT_INST_DEFINE(id, spi_stm32_init, PM_DEVICE_DT_INST_GET(id),\
1400 		    &spi_stm32_dev_data_##id, &spi_stm32_cfg_##id,	\
1401 		    POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,		\
1402 		    &api_funcs);					\
1403 									\
1404 STM32_SPI_IRQ_HANDLER(id)
1405 
1406 DT_INST_FOREACH_STATUS_OKAY(STM32_SPI_INIT)
1407