1 /*
2 * Copyright (c) 2016 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT st_stm32_spi
8
9 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(spi_ll_stm32);
12
13 #include <zephyr/sys/util.h>
14 #include <zephyr/kernel.h>
15 #include <soc.h>
16 #include <stm32_ll_spi.h>
17 #include <errno.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/drivers/spi/rtio.h>
20 #include <zephyr/drivers/pinctrl.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/pm/policy.h>
23 #include <zephyr/pm/device.h>
24 #include <zephyr/pm/device_runtime.h>
25 #ifdef CONFIG_SPI_STM32_DMA
26 #include <zephyr/drivers/dma/dma_stm32.h>
27 #include <zephyr/drivers/dma.h>
28 #endif
29 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
30 #include <zephyr/drivers/clock_control.h>
31 #include <zephyr/irq.h>
32 #include <zephyr/mem_mgmt/mem_attr.h>
33
34 #ifdef CONFIG_DCACHE
35 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
36 #endif /* CONFIG_DCACHE */
37
38 #ifdef CONFIG_NOCACHE_MEMORY
39 #include <zephyr/linker/linker-defs.h>
40 #elif defined(CONFIG_CACHE_MANAGEMENT)
41 #include <zephyr/arch/cache.h>
42 #endif /* CONFIG_NOCACHE_MEMORY */
43
44 #include "spi_ll_stm32.h"
45
46 #if defined(CONFIG_DCACHE) && \
47 !defined(CONFIG_NOCACHE_MEMORY)
48 /* currently, manual cache coherency management is only done on dummy_rx_tx_buffer */
49 #define SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED 1
50 #else
51 #define SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED 0
52 #endif /* defined(CONFIG_DCACHE) && !defined(CONFIG_NOCACHE_MEMORY) */
53
54 #define WAIT_1US 1U
55
56 /*
57 * Check for SPI_SR_FRE to determine support for TI mode frame format
58 * error flag, because STM32F1 SoCs do not support it and STM32CUBE
59 * for F1 family defines an unused LL_SPI_SR_FRE.
60 */
61 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
62 #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCE | LL_SPI_SR_MODF | \
63 LL_SPI_SR_OVR | LL_SPI_SR_TIFRE)
64 #else
65 #if defined(LL_SPI_SR_UDR)
66 #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \
67 LL_SPI_SR_OVR | LL_SPI_SR_FRE)
68 #elif defined(SPI_SR_FRE)
69 #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \
70 LL_SPI_SR_OVR | LL_SPI_SR_FRE)
71 #else
72 #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | LL_SPI_SR_OVR)
73 #endif
74 #endif /* CONFIG_SOC_SERIES_STM32MP1X */
75
spi_stm32_pm_policy_state_lock_get(const struct device * dev)76 static void spi_stm32_pm_policy_state_lock_get(const struct device *dev)
77 {
78 if (IS_ENABLED(CONFIG_PM)) {
79 struct spi_stm32_data *data = dev->data;
80
81 if (!data->pm_policy_state_on) {
82 data->pm_policy_state_on = true;
83 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
84 if (IS_ENABLED(CONFIG_PM_S2RAM)) {
85 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
86 }
87 pm_device_runtime_get(dev);
88 }
89 }
90 }
91
spi_stm32_pm_policy_state_lock_put(const struct device * dev)92 static void spi_stm32_pm_policy_state_lock_put(const struct device *dev)
93 {
94 if (IS_ENABLED(CONFIG_PM)) {
95 struct spi_stm32_data *data = dev->data;
96
97 if (data->pm_policy_state_on) {
98 data->pm_policy_state_on = false;
99 pm_device_runtime_put(dev);
100 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
101 if (IS_ENABLED(CONFIG_PM_S2RAM)) {
102 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
103 }
104 }
105 }
106 }
107
108 #ifdef CONFIG_SPI_STM32_DMA
bits2bytes(uint32_t bits)109 static uint32_t bits2bytes(uint32_t bits)
110 {
111 return bits / 8;
112 }
113
114 /* dummy buffer is used for transferring NOP when tx buf is null
115 * and used as a dummy sink for when rx buf is null.
116 */
117 /*
118 * If Nocache Memory is supported, buffer will be placed in nocache region by
119 * the linker to avoid potential DMA cache-coherency problems.
120 * If Nocache Memory is not supported, cache coherency might need to be kept
121 * manually. See SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED.
122 */
123 static __aligned(32) uint32_t dummy_rx_tx_buffer __nocache;
124
125 /* This function is executed in the interrupt context */
dma_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)126 static void dma_callback(const struct device *dma_dev, void *arg,
127 uint32_t channel, int status)
128 {
129 ARG_UNUSED(dma_dev);
130
131 /* arg holds SPI DMA data
132 * Passed in spi_stm32_dma_tx/rx_load()
133 */
134 struct spi_stm32_data *spi_dma_data = arg;
135
136 if (status < 0) {
137 LOG_ERR("DMA callback error with channel %d.", channel);
138 spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG;
139 } else {
140 /* identify the origin of this callback */
141 if (channel == spi_dma_data->dma_tx.channel) {
142 /* this part of the transfer ends */
143 spi_dma_data->status_flags |= SPI_STM32_DMA_TX_DONE_FLAG;
144 } else if (channel == spi_dma_data->dma_rx.channel) {
145 /* this part of the transfer ends */
146 spi_dma_data->status_flags |= SPI_STM32_DMA_RX_DONE_FLAG;
147 } else {
148 LOG_ERR("DMA callback channel %d is not valid.", channel);
149 spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG;
150 }
151 }
152
153 k_sem_give(&spi_dma_data->status_sem);
154 }
155
spi_stm32_dma_tx_load(const struct device * dev,const uint8_t * buf,size_t len)156 static int spi_stm32_dma_tx_load(const struct device *dev, const uint8_t *buf,
157 size_t len)
158 {
159 const struct spi_stm32_config *cfg = dev->config;
160 struct spi_stm32_data *data = dev->data;
161 struct dma_block_config *blk_cfg;
162 int ret;
163
164 /* remember active TX DMA channel (used in callback) */
165 struct stream *stream = &data->dma_tx;
166
167 blk_cfg = &stream->dma_blk_cfg;
168
169 /* prepare the block for this TX DMA channel */
170 memset(blk_cfg, 0, sizeof(struct dma_block_config));
171 blk_cfg->block_size = len;
172
173 /* tx direction has memory as source and periph as dest. */
174 if (buf == NULL) {
175 /* if tx buff is null, then sends NOP on the line. */
176 dummy_rx_tx_buffer = 0;
177 #if SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED
178 arch_dcache_flush_range((void *)&dummy_rx_tx_buffer, sizeof(uint32_t));
179 #endif /* SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED */
180 blk_cfg->source_address = (uint32_t)&dummy_rx_tx_buffer;
181 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
182 } else {
183 blk_cfg->source_address = (uint32_t)buf;
184 if (data->dma_tx.src_addr_increment) {
185 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
186 } else {
187 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
188 }
189 }
190
191 blk_cfg->dest_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_TX);
192 /* fifo mode NOT USED there */
193 if (data->dma_tx.dst_addr_increment) {
194 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
195 } else {
196 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
197 }
198
199 /* give the fifo mode from the DT */
200 blk_cfg->fifo_mode_control = data->dma_tx.fifo_threshold;
201
202 /* direction is given by the DT */
203 stream->dma_cfg.head_block = blk_cfg;
204 /* give the dma channel data as arg, as the callback comes from the dma */
205 stream->dma_cfg.user_data = data;
206 /* pass our client origin to the dma: data->dma_tx.dma_channel */
207 ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
208 &stream->dma_cfg);
209 /* the channel is the actual stream from 0 */
210 if (ret != 0) {
211 return ret;
212 }
213
214 /* gives the request ID to the dma mux */
215 return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
216 }
217
spi_stm32_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)218 static int spi_stm32_dma_rx_load(const struct device *dev, uint8_t *buf,
219 size_t len)
220 {
221 const struct spi_stm32_config *cfg = dev->config;
222 struct spi_stm32_data *data = dev->data;
223 struct dma_block_config *blk_cfg;
224 int ret;
225
226 /* retrieve active RX DMA channel (used in callback) */
227 struct stream *stream = &data->dma_rx;
228
229 blk_cfg = &stream->dma_blk_cfg;
230
231 /* prepare the block for this RX DMA channel */
232 memset(blk_cfg, 0, sizeof(struct dma_block_config));
233 blk_cfg->block_size = len;
234
235
236 /* rx direction has periph as source and mem as dest. */
237 if (buf == NULL) {
238 /* if rx buff is null, then write data to dummy address. */
239 blk_cfg->dest_address = (uint32_t)&dummy_rx_tx_buffer;
240 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
241 } else {
242 blk_cfg->dest_address = (uint32_t)buf;
243 if (data->dma_rx.dst_addr_increment) {
244 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
245 } else {
246 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
247 }
248 }
249
250 blk_cfg->source_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_RX);
251 if (data->dma_rx.src_addr_increment) {
252 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
253 } else {
254 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
255 }
256
257 /* give the fifo mode from the DT */
258 blk_cfg->fifo_mode_control = data->dma_rx.fifo_threshold;
259
260 /* direction is given by the DT */
261 stream->dma_cfg.head_block = blk_cfg;
262 stream->dma_cfg.user_data = data;
263
264
265 /* pass our client origin to the dma: data->dma_rx.channel */
266 ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
267 &stream->dma_cfg);
268 /* the channel is the actual stream from 0 */
269 if (ret != 0) {
270 return ret;
271 }
272
273 /* gives the request ID to the dma mux */
274 return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
275 }
276
spi_dma_move_rx_buffers(const struct device * dev,size_t len)277 static int spi_dma_move_rx_buffers(const struct device *dev, size_t len)
278 {
279 struct spi_stm32_data *data = dev->data;
280 size_t dma_segment_len;
281
282 dma_segment_len = len * data->dma_rx.dma_cfg.dest_data_size;
283 return spi_stm32_dma_rx_load(dev, data->ctx.rx_buf, dma_segment_len);
284 }
285
spi_dma_move_tx_buffers(const struct device * dev,size_t len)286 static int spi_dma_move_tx_buffers(const struct device *dev, size_t len)
287 {
288 struct spi_stm32_data *data = dev->data;
289 size_t dma_segment_len;
290
291 dma_segment_len = len * data->dma_tx.dma_cfg.source_data_size;
292 return spi_stm32_dma_tx_load(dev, data->ctx.tx_buf, dma_segment_len);
293 }
294
spi_dma_move_buffers(const struct device * dev,size_t len)295 static int spi_dma_move_buffers(const struct device *dev, size_t len)
296 {
297 int ret;
298
299 ret = spi_dma_move_rx_buffers(dev, len);
300
301 if (ret != 0) {
302 return ret;
303 }
304
305 return spi_dma_move_tx_buffers(dev, len);
306 }
307
308 #endif /* CONFIG_SPI_STM32_DMA */
309
310 /* Value to shift out when no application data needs transmitting. */
311 #define SPI_STM32_TX_NOP 0x00
312
spi_stm32_send_next_frame(SPI_TypeDef * spi,struct spi_stm32_data * data)313 static void spi_stm32_send_next_frame(SPI_TypeDef *spi,
314 struct spi_stm32_data *data)
315 {
316 const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation);
317 uint32_t tx_frame = SPI_STM32_TX_NOP;
318
319 if (frame_size == 8) {
320 if (spi_context_tx_buf_on(&data->ctx)) {
321 tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
322 }
323 LL_SPI_TransmitData8(spi, tx_frame);
324 spi_context_update_tx(&data->ctx, 1, 1);
325 } else {
326 if (spi_context_tx_buf_on(&data->ctx)) {
327 tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
328 }
329 LL_SPI_TransmitData16(spi, tx_frame);
330 spi_context_update_tx(&data->ctx, 2, 1);
331 }
332 }
333
spi_stm32_read_next_frame(SPI_TypeDef * spi,struct spi_stm32_data * data)334 static void spi_stm32_read_next_frame(SPI_TypeDef *spi,
335 struct spi_stm32_data *data)
336 {
337 const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation);
338 uint32_t rx_frame = 0;
339
340 if (frame_size == 8) {
341 rx_frame = LL_SPI_ReceiveData8(spi);
342 if (spi_context_rx_buf_on(&data->ctx)) {
343 UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
344 }
345 spi_context_update_rx(&data->ctx, 1, 1);
346 } else {
347 rx_frame = LL_SPI_ReceiveData16(spi);
348 if (spi_context_rx_buf_on(&data->ctx)) {
349 UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
350 }
351 spi_context_update_rx(&data->ctx, 2, 1);
352 }
353 }
354
spi_stm32_transfer_ongoing(struct spi_stm32_data * data)355 static bool spi_stm32_transfer_ongoing(struct spi_stm32_data *data)
356 {
357 return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
358 }
359
spi_stm32_get_err(SPI_TypeDef * spi)360 static int spi_stm32_get_err(SPI_TypeDef *spi)
361 {
362 uint32_t sr = LL_SPI_ReadReg(spi, SR);
363
364 if (sr & SPI_STM32_ERR_MSK) {
365 LOG_ERR("%s: err=%d", __func__,
366 sr & (uint32_t)SPI_STM32_ERR_MSK);
367
368 /* OVR error must be explicitly cleared */
369 if (LL_SPI_IsActiveFlag_OVR(spi)) {
370 LL_SPI_ClearFlag_OVR(spi);
371 }
372
373 return -EIO;
374 }
375
376 return 0;
377 }
378
spi_stm32_shift_fifo(SPI_TypeDef * spi,struct spi_stm32_data * data)379 static void spi_stm32_shift_fifo(SPI_TypeDef *spi, struct spi_stm32_data *data)
380 {
381 uint32_t transfer_dir = LL_SPI_GetTransferDirection(spi);
382
383 if (transfer_dir != LL_SPI_HALF_DUPLEX_TX &&
384 ll_func_rx_is_not_empty(spi)) {
385 spi_stm32_read_next_frame(spi, data);
386 }
387
388 if (transfer_dir != LL_SPI_HALF_DUPLEX_RX &&
389 ll_func_tx_is_not_full(spi)) {
390 spi_stm32_send_next_frame(spi, data);
391 }
392 }
393
394 /* Shift a SPI frame as master. */
spi_stm32_shift_m(const struct spi_stm32_config * cfg,struct spi_stm32_data * data)395 static void spi_stm32_shift_m(const struct spi_stm32_config *cfg,
396 struct spi_stm32_data *data)
397 {
398 if (cfg->fifo_enabled) {
399 spi_stm32_shift_fifo(cfg->spi, data);
400 } else {
401 uint32_t transfer_dir = LL_SPI_GetTransferDirection(cfg->spi);
402
403 if (transfer_dir != LL_SPI_HALF_DUPLEX_RX) {
404 while (!ll_func_tx_is_not_full(cfg->spi)) {
405 /* NOP */
406 }
407
408 spi_stm32_send_next_frame(cfg->spi, data);
409 }
410
411 if (transfer_dir != LL_SPI_HALF_DUPLEX_TX) {
412 while (!ll_func_rx_is_not_empty(cfg->spi)) {
413 /* NOP */
414 }
415
416 spi_stm32_read_next_frame(cfg->spi, data);
417 }
418 }
419 }
420
421 /* Shift a SPI frame as slave. */
spi_stm32_shift_s(SPI_TypeDef * spi,struct spi_stm32_data * data)422 static void spi_stm32_shift_s(SPI_TypeDef *spi, struct spi_stm32_data *data)
423 {
424 if (ll_func_tx_is_not_full(spi) && spi_context_tx_on(&data->ctx)) {
425 uint16_t tx_frame;
426
427 if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
428 tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
429 LL_SPI_TransmitData8(spi, tx_frame);
430 spi_context_update_tx(&data->ctx, 1, 1);
431 } else {
432 tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
433 LL_SPI_TransmitData16(spi, tx_frame);
434 spi_context_update_tx(&data->ctx, 2, 1);
435 }
436 } else {
437 ll_func_disable_int_tx_empty(spi);
438 }
439
440 if (ll_func_rx_is_not_empty(spi) &&
441 spi_context_rx_buf_on(&data->ctx)) {
442 uint16_t rx_frame;
443
444 if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
445 rx_frame = LL_SPI_ReceiveData8(spi);
446 UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
447 spi_context_update_rx(&data->ctx, 1, 1);
448 } else {
449 rx_frame = LL_SPI_ReceiveData16(spi);
450 UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
451 spi_context_update_rx(&data->ctx, 2, 1);
452 }
453 }
454 }
455
456 /*
457 * Without a FIFO, we can only shift out one frame's worth of SPI
458 * data, and read the response back.
459 *
460 * TODO: support 16-bit data frames.
461 */
spi_stm32_shift_frames(const struct spi_stm32_config * cfg,struct spi_stm32_data * data)462 static int spi_stm32_shift_frames(const struct spi_stm32_config *cfg,
463 struct spi_stm32_data *data)
464 {
465 spi_operation_t operation = data->ctx.config->operation;
466
467 if (SPI_OP_MODE_GET(operation) == SPI_OP_MODE_MASTER) {
468 spi_stm32_shift_m(cfg, data);
469 } else {
470 spi_stm32_shift_s(cfg->spi, data);
471 }
472
473 return spi_stm32_get_err(cfg->spi);
474 }
475
spi_stm32_cs_control(const struct device * dev,bool on)476 static void spi_stm32_cs_control(const struct device *dev, bool on)
477 {
478 struct spi_stm32_data *data = dev->data;
479
480 spi_context_cs_control(&data->ctx, on);
481
482 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz)
483 const struct spi_stm32_config *cfg = dev->config;
484
485 if (cfg->use_subghzspi_nss) {
486 if (on) {
487 LL_PWR_SelectSUBGHZSPI_NSS();
488 } else {
489 LL_PWR_UnselectSUBGHZSPI_NSS();
490 }
491 }
492 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) */
493 }
494
spi_stm32_complete(const struct device * dev,int status)495 static void spi_stm32_complete(const struct device *dev, int status)
496 {
497 const struct spi_stm32_config *cfg = dev->config;
498 SPI_TypeDef *spi = cfg->spi;
499 struct spi_stm32_data *data = dev->data;
500
501 #ifdef CONFIG_SPI_STM32_INTERRUPT
502 ll_func_disable_int_tx_empty(spi);
503 ll_func_disable_int_rx_not_empty(spi);
504 ll_func_disable_int_errors(spi);
505
506 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
507 if (cfg->fifo_enabled) {
508 LL_SPI_DisableIT_EOT(spi);
509 }
510 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
511
512 #endif /* CONFIG_SPI_STM32_INTERRUPT */
513
514
515 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo)
516 /* Flush RX buffer */
517 while (ll_func_rx_is_not_empty(spi)) {
518 (void) LL_SPI_ReceiveData8(spi);
519 }
520 #endif /* compat st_stm32_spi_fifo*/
521
522 if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
523 while (ll_func_spi_is_busy(spi)) {
524 /* NOP */
525 }
526
527 spi_stm32_cs_control(dev, false);
528 }
529
530 /* BSY flag is cleared when MODF flag is raised */
531 if (LL_SPI_IsActiveFlag_MODF(spi)) {
532 LL_SPI_ClearFlag_MODF(spi);
533 }
534
535 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
536 uint32_t transfer_dir = LL_SPI_GetTransferDirection(spi);
537
538 if (cfg->fifo_enabled) {
539 LL_SPI_ClearFlag_TXTF(spi);
540 LL_SPI_ClearFlag_OVR(spi);
541 LL_SPI_ClearFlag_EOT(spi);
542 LL_SPI_SetTransferSize(spi, 0);
543 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_RX) {
544 LL_SPI_SetTransferSize(spi, 0);
545 }
546 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
547
548 if (!(data->ctx.config->operation & SPI_HOLD_ON_CS)) {
549 ll_func_disable_spi(spi);
550 }
551
552 #ifdef CONFIG_SPI_STM32_INTERRUPT
553 spi_context_complete(&data->ctx, dev, status);
554 #endif
555
556 spi_stm32_pm_policy_state_lock_put(dev);
557 }
558
559 #ifdef CONFIG_SPI_STM32_INTERRUPT
spi_stm32_isr(const struct device * dev)560 static void spi_stm32_isr(const struct device *dev)
561 {
562 const struct spi_stm32_config *cfg = dev->config;
563 struct spi_stm32_data *data = dev->data;
564 SPI_TypeDef *spi = cfg->spi;
565 int err;
566
567 /* Some spurious interrupts are triggered when SPI is not enabled; ignore them.
568 * Do it only when fifo is enabled to leave non-fifo functionality untouched for now
569 */
570 if (cfg->fifo_enabled) {
571 if (!LL_SPI_IsEnabled(spi)) {
572 return;
573 }
574 }
575
576 err = spi_stm32_get_err(spi);
577 if (err) {
578 spi_stm32_complete(dev, err);
579 return;
580 }
581
582 if (spi_stm32_transfer_ongoing(data)) {
583 err = spi_stm32_shift_frames(cfg, data);
584 }
585
586 if (err) {
587 spi_stm32_complete(dev, err);
588 }
589
590 uint32_t transfer_dir = LL_SPI_GetTransferDirection(spi);
591
592 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
593 if (!spi_stm32_transfer_ongoing(data)) {
594 spi_stm32_complete(dev, err);
595 }
596 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
597 if (!spi_context_tx_on(&data->ctx)) {
598 spi_stm32_complete(dev, err);
599 }
600 } else {
601 if (!spi_context_rx_on(&data->ctx)) {
602 spi_stm32_complete(dev, err);
603 }
604 }
605 }
606 #endif /* CONFIG_SPI_STM32_INTERRUPT */
607
spi_stm32_configure(const struct device * dev,const struct spi_config * config,bool write)608 static int spi_stm32_configure(const struct device *dev,
609 const struct spi_config *config,
610 bool write)
611 {
612 const struct spi_stm32_config *cfg = dev->config;
613 struct spi_stm32_data *data = dev->data;
614 const uint32_t scaler[] = {
615 LL_SPI_BAUDRATEPRESCALER_DIV2,
616 LL_SPI_BAUDRATEPRESCALER_DIV4,
617 LL_SPI_BAUDRATEPRESCALER_DIV8,
618 LL_SPI_BAUDRATEPRESCALER_DIV16,
619 LL_SPI_BAUDRATEPRESCALER_DIV32,
620 LL_SPI_BAUDRATEPRESCALER_DIV64,
621 LL_SPI_BAUDRATEPRESCALER_DIV128,
622 LL_SPI_BAUDRATEPRESCALER_DIV256
623 };
624 SPI_TypeDef *spi = cfg->spi;
625 uint32_t clock;
626 int br;
627
628 if (spi_context_configured(&data->ctx, config)) {
629 if (config->operation & SPI_HALF_DUPLEX) {
630 if (write) {
631 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_TX);
632 } else {
633 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_RX);
634 }
635 }
636 return 0;
637 }
638
639 if ((SPI_WORD_SIZE_GET(config->operation) != 8)
640 && (SPI_WORD_SIZE_GET(config->operation) != 16)) {
641 return -ENOTSUP;
642 }
643
644 /* configure the frame format Motorola (default) or TI */
645 if ((config->operation & SPI_FRAME_FORMAT_TI) == SPI_FRAME_FORMAT_TI) {
646 #ifdef LL_SPI_PROTOCOL_TI
647 LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_TI);
648 #else
649 LOG_ERR("Frame Format TI not supported");
650 /* on stm32F1 or some stm32L1 (cat1,2) without SPI_CR2_FRF */
651 return -ENOTSUP;
652 #endif
653 #if defined(LL_SPI_PROTOCOL_MOTOROLA) && defined(SPI_CR2_FRF)
654 } else {
655 LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_MOTOROLA);
656 #endif
657 }
658
659 if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) {
660 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
661 (clock_control_subsys_t) &cfg->pclken[1], &clock) < 0) {
662 LOG_ERR("Failed call clock_control_get_rate(pclk[1])");
663 return -EIO;
664 }
665 } else {
666 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
667 (clock_control_subsys_t) &cfg->pclken[0], &clock) < 0) {
668 LOG_ERR("Failed call clock_control_get_rate(pclk[0])");
669 return -EIO;
670 }
671 }
672
673 for (br = 1 ; br <= ARRAY_SIZE(scaler) ; ++br) {
674 uint32_t clk = clock >> br;
675
676 if (clk <= config->frequency) {
677 break;
678 }
679 }
680
681 if (br > ARRAY_SIZE(scaler)) {
682 LOG_ERR("Unsupported frequency %uHz, max %uHz, min %uHz",
683 config->frequency,
684 clock >> 1,
685 clock >> ARRAY_SIZE(scaler));
686 return -EINVAL;
687 }
688
689 LL_SPI_Disable(spi);
690 LL_SPI_SetBaudRatePrescaler(spi, scaler[br - 1]);
691
692 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
693 LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_HIGH);
694 } else {
695 LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_LOW);
696 }
697
698 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
699 LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_2EDGE);
700 } else {
701 LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_1EDGE);
702 }
703
704 if (config->operation & SPI_HALF_DUPLEX) {
705 if (write) {
706 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_TX);
707 } else {
708 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_RX);
709 }
710 } else {
711 LL_SPI_SetTransferDirection(spi, LL_SPI_FULL_DUPLEX);
712 }
713
714 if (config->operation & SPI_TRANSFER_LSB) {
715 LL_SPI_SetTransferBitOrder(spi, LL_SPI_LSB_FIRST);
716 } else {
717 LL_SPI_SetTransferBitOrder(spi, LL_SPI_MSB_FIRST);
718 }
719
720 LL_SPI_DisableCRC(spi);
721
722 if (spi_cs_is_gpio(config) || !IS_ENABLED(CONFIG_SPI_STM32_USE_HW_SS)) {
723 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
724 if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) {
725 if (LL_SPI_GetNSSPolarity(spi) == LL_SPI_NSS_POLARITY_LOW)
726 LL_SPI_SetInternalSSLevel(spi, LL_SPI_SS_LEVEL_HIGH);
727 }
728 #endif
729 LL_SPI_SetNSSMode(spi, LL_SPI_NSS_SOFT);
730 } else {
731 if (config->operation & SPI_OP_MODE_SLAVE) {
732 LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_INPUT);
733 } else {
734 LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_OUTPUT);
735 }
736 }
737
738 if (config->operation & SPI_OP_MODE_SLAVE) {
739 LL_SPI_SetMode(spi, LL_SPI_MODE_SLAVE);
740 } else {
741 LL_SPI_SetMode(spi, LL_SPI_MODE_MASTER);
742 }
743
744 if (SPI_WORD_SIZE_GET(config->operation) == 8) {
745 LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_8BIT);
746 } else {
747 LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_16BIT);
748 }
749
750 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
751 LL_SPI_SetMasterSSIdleness(spi, cfg->mssi_clocks);
752 LL_SPI_SetInterDataIdleness(spi, (cfg->midi_clocks << SPI_CFG2_MIDI_Pos));
753 #endif
754
755 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo)
756 ll_func_set_fifo_threshold_8bit(spi);
757 #endif
758
759 /* At this point, it's mandatory to set this on the context! */
760 data->ctx.config = config;
761
762 LOG_DBG("Installed config %p: freq %uHz (div = %u),"
763 " mode %u/%u/%u, slave %u",
764 config, clock >> br, 1 << br,
765 (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0,
766 (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0,
767 (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) ? 1 : 0,
768 config->slave);
769
770 return 0;
771 }
772
spi_stm32_release(const struct device * dev,const struct spi_config * config)773 static int spi_stm32_release(const struct device *dev,
774 const struct spi_config *config)
775 {
776 struct spi_stm32_data *data = dev->data;
777 const struct spi_stm32_config *cfg = dev->config;
778
779 spi_context_unlock_unconditionally(&data->ctx);
780 ll_func_disable_spi(cfg->spi);
781
782 return 0;
783 }
784
785 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
spi_stm32_count_bufset_frames(const struct spi_config * config,const struct spi_buf_set * bufs)786 static int32_t spi_stm32_count_bufset_frames(const struct spi_config *config,
787 const struct spi_buf_set *bufs)
788 {
789 if (bufs == NULL) {
790 return 0;
791 }
792
793 uint32_t num_bytes = 0;
794
795 for (size_t i = 0; i < bufs->count; i++) {
796 num_bytes += bufs->buffers[i].len;
797 }
798
799 uint8_t bytes_per_frame = SPI_WORD_SIZE_GET(config->operation) / 8;
800
801 if ((num_bytes % bytes_per_frame) != 0) {
802 return -EINVAL;
803 }
804
805 int frames = num_bytes / bytes_per_frame;
806
807 if (frames > UINT16_MAX) {
808 return -EMSGSIZE;
809 }
810
811 return frames;
812 }
813
spi_stm32_count_total_frames(const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)814 static int32_t spi_stm32_count_total_frames(const struct spi_config *config,
815 const struct spi_buf_set *tx_bufs,
816 const struct spi_buf_set *rx_bufs)
817 {
818 int tx_frames = spi_stm32_count_bufset_frames(config, tx_bufs);
819
820 if (tx_frames < 0) {
821 return tx_frames;
822 }
823
824 int rx_frames = spi_stm32_count_bufset_frames(config, rx_bufs);
825
826 if (rx_frames < 0) {
827 return rx_frames;
828 }
829
830 return MAX(rx_frames, tx_frames);
831 }
832 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
833
spi_stm32_half_duplex_switch_to_receive(const struct spi_stm32_config * cfg,struct spi_stm32_data * data)834 static int spi_stm32_half_duplex_switch_to_receive(const struct spi_stm32_config *cfg,
835 struct spi_stm32_data *data)
836 {
837 SPI_TypeDef *spi = cfg->spi;
838
839 if (!spi_context_tx_on(&data->ctx) &&
840 spi_context_rx_on(&data->ctx)) {
841 #ifndef CONFIG_SPI_STM32_INTERRUPT
842 while (ll_func_spi_is_busy(spi)) {
843 /* NOP */
844 }
845 LL_SPI_Disable(spi);
846 #endif /* CONFIG_SPI_STM32_INTERRUPT*/
847
848 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
849 const struct spi_config *config = data->ctx.config;
850
851 if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) {
852 int num_bytes = spi_context_total_rx_len(&data->ctx);
853 uint8_t bytes_per_frame = SPI_WORD_SIZE_GET(config->operation) / 8;
854
855 if ((num_bytes % bytes_per_frame) != 0) {
856 return -EINVAL;
857 }
858
859 LL_SPI_SetTransferSize(spi, (uint32_t) num_bytes / bytes_per_frame);
860 }
861 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
862
863 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_RX);
864
865 LL_SPI_Enable(spi);
866
867 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
868 /* With the STM32MP1, STM32U5 and the STM32H7,
869 * if the device is the SPI master,
870 * we need to enable the start of the transfer with
871 * LL_SPI_StartMasterTransfer(spi).
872 */
873 if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
874 LL_SPI_StartMasterTransfer(spi);
875 while (!LL_SPI_IsActiveMasterTransfer(spi)) {
876 /* NOP */
877 }
878 }
879 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
880
881 #if CONFIG_SOC_SERIES_STM32H7X
882 /*
883 * Add a small delay after enabling to prevent transfer stalling at high
884 * system clock frequency (see errata sheet ES0392).
885 */
886 k_busy_wait(WAIT_1US);
887 #endif
888
889 #ifdef CONFIG_SPI_STM32_INTERRUPT
890 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
891 if (cfg->fifo_enabled) {
892 LL_SPI_EnableIT_EOT(spi);
893 }
894 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
895
896 ll_func_enable_int_errors(spi);
897 ll_func_enable_int_rx_not_empty(spi);
898 #endif /* CONFIG_SPI_STM32_INTERRUPT */
899 }
900
901 return 0;
902 }
903
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)904 static int transceive(const struct device *dev,
905 const struct spi_config *config,
906 const struct spi_buf_set *tx_bufs,
907 const struct spi_buf_set *rx_bufs,
908 bool asynchronous,
909 spi_callback_t cb,
910 void *userdata)
911 {
912 const struct spi_stm32_config *cfg = dev->config;
913 struct spi_stm32_data *data = dev->data;
914 SPI_TypeDef *spi = cfg->spi;
915 int ret;
916
917 if (!tx_bufs && !rx_bufs) {
918 return 0;
919 }
920
921 #ifndef CONFIG_SPI_STM32_INTERRUPT
922 if (asynchronous) {
923 return -ENOTSUP;
924 }
925 #endif /* CONFIG_SPI_STM32_INTERRUPT */
926
927 spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
928
929 spi_stm32_pm_policy_state_lock_get(dev);
930
931 ret = spi_stm32_configure(dev, config, tx_bufs != NULL);
932 if (ret) {
933 goto end;
934 }
935
936 /* Set buffers info */
937 if (SPI_WORD_SIZE_GET(config->operation) == 8) {
938 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
939 } else {
940 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2);
941 }
942
943 uint32_t transfer_dir = LL_SPI_GetTransferDirection(spi);
944
945 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
946 if (cfg->fifo_enabled && SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) {
947 int total_frames;
948
949 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
950 total_frames = spi_stm32_count_total_frames(
951 config, tx_bufs, rx_bufs);
952 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
953 total_frames = spi_stm32_count_bufset_frames(
954 config, tx_bufs);
955 } else {
956 total_frames = spi_stm32_count_bufset_frames(
957 config, rx_bufs);
958 }
959
960 if (total_frames < 0) {
961 ret = total_frames;
962 goto end;
963 }
964 LL_SPI_SetTransferSize(spi, (uint32_t)total_frames);
965 }
966
967 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
968
969 LL_SPI_Enable(spi);
970
971 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
972 /* With the STM32MP1, STM32U5 and the STM32H7,
973 * if the device is the SPI master,
974 * we need to enable the start of the transfer with
975 * LL_SPI_StartMasterTransfer(spi)
976 */
977 if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
978 LL_SPI_StartMasterTransfer(spi);
979 while (!LL_SPI_IsActiveMasterTransfer(spi)) {
980 /* NOP */
981 }
982 }
983 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
984
985 #ifdef CONFIG_SOC_SERIES_STM32H7X
986 /*
987 * Add a small delay after enabling to prevent transfer stalling at high
988 * system clock frequency (see errata sheet ES0392).
989 */
990 k_busy_wait(WAIT_1US);
991 #endif /* CONFIG_SOC_SERIES_STM32H7X */
992
993 /* This is turned off in spi_stm32_complete(). */
994 spi_stm32_cs_control(dev, true);
995
996 #ifdef CONFIG_SPI_STM32_INTERRUPT
997
998 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
999 if (cfg->fifo_enabled) {
1000 LL_SPI_EnableIT_EOT(spi);
1001 }
1002 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */
1003
1004 ll_func_enable_int_errors(spi);
1005
1006 if (rx_bufs) {
1007 ll_func_enable_int_rx_not_empty(spi);
1008 }
1009
1010 ll_func_enable_int_tx_empty(spi);
1011
1012 do {
1013 ret = spi_context_wait_for_completion(&data->ctx);
1014
1015 if (!ret &&
1016 transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1017 ret = spi_stm32_half_duplex_switch_to_receive(cfg, data);
1018 transfer_dir = LL_SPI_GetTransferDirection(spi);
1019 }
1020 } while (!ret && spi_stm32_transfer_ongoing(data));
1021 #else /* CONFIG_SPI_STM32_INTERRUPT */
1022 do {
1023 ret = spi_stm32_shift_frames(cfg, data);
1024
1025 if (!ret &&
1026 transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1027 ret = spi_stm32_half_duplex_switch_to_receive(cfg, data);
1028 transfer_dir = LL_SPI_GetTransferDirection(spi);
1029 }
1030 } while (!ret && spi_stm32_transfer_ongoing(data));
1031
1032 spi_stm32_complete(dev, ret);
1033
1034 #ifdef CONFIG_SPI_SLAVE
1035 if (spi_context_is_slave(&data->ctx) && !ret) {
1036 ret = data->ctx.recv_frames;
1037 }
1038 #endif /* CONFIG_SPI_SLAVE */
1039
1040 #endif /* CONFIG_SPI_STM32_INTERRUPT */
1041
1042 end:
1043 spi_context_release(&data->ctx, ret);
1044
1045 return ret;
1046 }
1047
1048 #ifdef CONFIG_SPI_STM32_DMA
1049
wait_dma_rx_tx_done(const struct device * dev)1050 static int wait_dma_rx_tx_done(const struct device *dev)
1051 {
1052 struct spi_stm32_data *data = dev->data;
1053 int res = -1;
1054 k_timeout_t timeout;
1055
1056 /*
1057 * In slave mode we do not know when the transaction will start. Hence,
1058 * it doesn't make sense to have timeout in this case.
1059 */
1060 if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(&data->ctx)) {
1061 timeout = K_FOREVER;
1062 } else {
1063 timeout = K_MSEC(1000);
1064 }
1065
1066 while (1) {
1067 res = k_sem_take(&data->status_sem, timeout);
1068 if (res != 0) {
1069 return res;
1070 }
1071
1072 if (data->status_flags & SPI_STM32_DMA_ERROR_FLAG) {
1073 return -EIO;
1074 }
1075
1076 if (data->status_flags & SPI_STM32_DMA_DONE_FLAG) {
1077 return 0;
1078 }
1079 }
1080
1081 return res;
1082 }
1083
1084 #ifdef CONFIG_DCACHE
buf_in_nocache(uintptr_t buf,size_t len_bytes)1085 static bool buf_in_nocache(uintptr_t buf, size_t len_bytes)
1086 {
1087 bool buf_within_nocache = false;
1088
1089 #ifdef CONFIG_NOCACHE_MEMORY
1090 /* Check if buffer is in nocache region defined by the linker */
1091 buf_within_nocache = (buf >= ((uintptr_t)_nocache_ram_start)) &&
1092 ((buf + len_bytes - 1) <= ((uintptr_t)_nocache_ram_end));
1093 if (buf_within_nocache) {
1094 return true;
1095 }
1096 #endif /* CONFIG_NOCACHE_MEMORY */
1097
1098 /* Check if buffer is in nocache memory region defined in DT */
1099 buf_within_nocache = mem_attr_check_buf(
1100 (void *)buf, len_bytes, DT_MEM_ARM(ATTR_MPU_RAM_NOCACHE)) == 0;
1101
1102 return buf_within_nocache;
1103 }
1104
is_dummy_buffer(const struct spi_buf * buf)1105 static bool is_dummy_buffer(const struct spi_buf *buf)
1106 {
1107 return buf->buf == NULL;
1108 }
1109
spi_buf_set_in_nocache(const struct spi_buf_set * bufs)1110 static bool spi_buf_set_in_nocache(const struct spi_buf_set *bufs)
1111 {
1112 for (size_t i = 0; i < bufs->count; i++) {
1113 const struct spi_buf *buf = &bufs->buffers[i];
1114
1115 if (!is_dummy_buffer(buf) &&
1116 !buf_in_nocache((uintptr_t)buf->buf, buf->len)) {
1117 return false;
1118 }
1119 }
1120 return true;
1121 }
1122 #endif /* CONFIG_DCACHE */
1123
transceive_dma(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)1124 static int transceive_dma(const struct device *dev,
1125 const struct spi_config *config,
1126 const struct spi_buf_set *tx_bufs,
1127 const struct spi_buf_set *rx_bufs,
1128 bool asynchronous,
1129 spi_callback_t cb,
1130 void *userdata)
1131 {
1132 const struct spi_stm32_config *cfg = dev->config;
1133 struct spi_stm32_data *data = dev->data;
1134 SPI_TypeDef *spi = cfg->spi;
1135 int ret;
1136 int err;
1137
1138 if (!tx_bufs && !rx_bufs) {
1139 return 0;
1140 }
1141
1142 if (asynchronous) {
1143 return -ENOTSUP;
1144 }
1145
1146 #ifdef CONFIG_DCACHE
1147 if ((tx_bufs != NULL && !spi_buf_set_in_nocache(tx_bufs)) ||
1148 (rx_bufs != NULL && !spi_buf_set_in_nocache(rx_bufs))) {
1149 return -EFAULT;
1150 }
1151 #endif /* CONFIG_DCACHE */
1152
1153 spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
1154
1155 spi_stm32_pm_policy_state_lock_get(dev);
1156
1157 k_sem_reset(&data->status_sem);
1158
1159 ret = spi_stm32_configure(dev, config, tx_bufs != NULL);
1160 if (ret) {
1161 goto end;
1162 }
1163
1164 uint32_t transfer_dir = LL_SPI_GetTransferDirection(spi);
1165
1166 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1167 if (transfer_dir == LL_SPI_HALF_DUPLEX_RX) {
1168 int frames = spi_stm32_count_bufset_frames(config, rx_bufs);
1169
1170 if (frames < 0) {
1171 ret = frames;
1172 goto end;
1173 }
1174
1175 LL_SPI_SetTransferSize(cfg->spi, frames);
1176 }
1177 #endif /* st_stm32h7_spi */
1178
1179 /* Set buffers info */
1180 if (SPI_WORD_SIZE_GET(config->operation) == 8) {
1181 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
1182 } else {
1183 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2);
1184 }
1185
1186 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1187 /* set request before enabling (else SPI CFG1 reg is write protected) */
1188 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
1189 LL_SPI_EnableDMAReq_RX(spi);
1190 LL_SPI_EnableDMAReq_TX(spi);
1191 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1192 LL_SPI_EnableDMAReq_TX(spi);
1193 } else {
1194 LL_SPI_EnableDMAReq_RX(spi);
1195 }
1196
1197 LL_SPI_Enable(spi);
1198
1199 /* In half-duplex rx mode, start transfer after
1200 * setting DMA configurations
1201 */
1202 if (transfer_dir != LL_SPI_HALF_DUPLEX_RX &&
1203 LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
1204 LL_SPI_StartMasterTransfer(spi);
1205 }
1206 #else
1207 LL_SPI_Enable(spi);
1208 #endif /* st_stm32h7_spi */
1209
1210 /* This is turned off in spi_stm32_complete(). */
1211 spi_stm32_cs_control(dev, true);
1212
1213 while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
1214 size_t dma_len;
1215
1216 data->status_flags = 0;
1217
1218 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
1219 if (data->ctx.rx_len == 0) {
1220 dma_len = data->ctx.tx_len;
1221 } else if (data->ctx.tx_len == 0) {
1222 dma_len = data->ctx.rx_len;
1223 } else {
1224 dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
1225 }
1226
1227 ret = spi_dma_move_buffers(dev, dma_len);
1228 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1229 dma_len = data->ctx.tx_len;
1230 ret = spi_dma_move_tx_buffers(dev, dma_len);
1231 } else {
1232 dma_len = data->ctx.rx_len;
1233 ret = spi_dma_move_rx_buffers(dev, dma_len);
1234 }
1235
1236 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1237 if (transfer_dir == LL_SPI_HALF_DUPLEX_RX &&
1238 LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) {
1239 LL_SPI_StartMasterTransfer(spi);
1240 }
1241 #endif /* st_stm32h7_spi */
1242
1243 if (ret != 0) {
1244 break;
1245 }
1246
1247 #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1248
1249 /* toggle the DMA request to restart the transfer */
1250 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
1251 LL_SPI_EnableDMAReq_RX(spi);
1252 LL_SPI_EnableDMAReq_TX(spi);
1253 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1254 LL_SPI_EnableDMAReq_TX(spi);
1255 } else {
1256 LL_SPI_EnableDMAReq_RX(spi);
1257 }
1258 #endif /* ! st_stm32h7_spi */
1259
1260 ret = wait_dma_rx_tx_done(dev);
1261 if (ret != 0) {
1262 break;
1263 }
1264
1265 #ifdef SPI_SR_FTLVL
1266 while (LL_SPI_GetTxFIFOLevel(spi) > 0) {
1267 }
1268 #endif /* SPI_SR_FTLVL */
1269
1270 #ifdef CONFIG_SPI_STM32_ERRATA_BUSY
1271 WAIT_FOR(ll_func_spi_dma_busy(spi) != 0,
1272 CONFIG_SPI_STM32_BUSY_FLAG_TIMEOUT,
1273 k_yield());
1274 #else
1275 /* wait until spi is no more busy (spi TX fifo is really empty) */
1276 while (ll_func_spi_dma_busy(spi) == 0) {
1277 }
1278 #endif /* CONFIG_SPI_STM32_ERRATA_BUSY */
1279
1280 #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1281 /* toggle the DMA transfer request */
1282 LL_SPI_DisableDMAReq_TX(spi);
1283 LL_SPI_DisableDMAReq_RX(spi);
1284 #endif /* ! st_stm32h7_spi */
1285
1286 uint8_t frame_size_bytes = bits2bytes(
1287 SPI_WORD_SIZE_GET(config->operation));
1288
1289 if (transfer_dir == LL_SPI_FULL_DUPLEX) {
1290 spi_context_update_tx(&data->ctx, frame_size_bytes, dma_len);
1291 spi_context_update_rx(&data->ctx, frame_size_bytes, dma_len);
1292 } else if (transfer_dir == LL_SPI_HALF_DUPLEX_TX) {
1293 spi_context_update_tx(&data->ctx, frame_size_bytes, dma_len);
1294 } else {
1295 spi_context_update_rx(&data->ctx, frame_size_bytes, dma_len);
1296 }
1297
1298 if (transfer_dir == LL_SPI_HALF_DUPLEX_TX &&
1299 !spi_context_tx_on(&data->ctx) &&
1300 spi_context_rx_on(&data->ctx)) {
1301 LL_SPI_Disable(spi);
1302 LL_SPI_SetTransferDirection(spi, LL_SPI_HALF_DUPLEX_RX);
1303
1304 transfer_dir = LL_SPI_HALF_DUPLEX_RX;
1305
1306 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi)
1307 int frames = spi_stm32_count_bufset_frames(config, rx_bufs);
1308
1309 if (frames < 0) {
1310 ret = frames;
1311 break;
1312 }
1313
1314 LL_SPI_SetTransferSize(cfg->spi, frames);
1315
1316 LL_SPI_EnableDMAReq_RX(spi);
1317 #endif /* st_stm32h7_spi */
1318
1319 LL_SPI_Enable(spi);
1320 }
1321 }
1322
1323 /* spi complete relies on SPI Status Reg which cannot be disabled */
1324 spi_stm32_complete(dev, ret);
1325 /* disable spi instance after completion */
1326 LL_SPI_Disable(spi);
1327 /* The Config. Reg. on some mcus is write un-protected when SPI is disabled */
1328 LL_SPI_DisableDMAReq_TX(spi);
1329 LL_SPI_DisableDMAReq_RX(spi);
1330
1331 err = dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel);
1332 if (err) {
1333 LOG_DBG("Rx dma_stop failed with error %d", err);
1334 }
1335 err = dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel);
1336 if (err) {
1337 LOG_DBG("Tx dma_stop failed with error %d", err);
1338 }
1339
1340 #ifdef CONFIG_SPI_SLAVE
1341 if (spi_context_is_slave(&data->ctx) && !ret) {
1342 ret = data->ctx.recv_frames;
1343 }
1344 #endif /* CONFIG_SPI_SLAVE */
1345
1346 end:
1347 spi_context_release(&data->ctx, ret);
1348
1349 spi_stm32_pm_policy_state_lock_put(dev);
1350
1351 return ret;
1352 }
1353 #endif /* CONFIG_SPI_STM32_DMA */
1354
spi_stm32_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)1355 static int spi_stm32_transceive(const struct device *dev,
1356 const struct spi_config *config,
1357 const struct spi_buf_set *tx_bufs,
1358 const struct spi_buf_set *rx_bufs)
1359 {
1360 #ifdef CONFIG_SPI_STM32_DMA
1361 struct spi_stm32_data *data = dev->data;
1362
1363 if ((data->dma_tx.dma_dev != NULL)
1364 && (data->dma_rx.dma_dev != NULL)) {
1365 return transceive_dma(dev, config, tx_bufs, rx_bufs,
1366 false, NULL, NULL);
1367 }
1368 #endif /* CONFIG_SPI_STM32_DMA */
1369 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
1370 }
1371
1372 #ifdef CONFIG_SPI_ASYNC
spi_stm32_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)1373 static int spi_stm32_transceive_async(const struct device *dev,
1374 const struct spi_config *config,
1375 const struct spi_buf_set *tx_bufs,
1376 const struct spi_buf_set *rx_bufs,
1377 spi_callback_t cb,
1378 void *userdata)
1379 {
1380 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
1381 }
1382 #endif /* CONFIG_SPI_ASYNC */
1383
1384 static DEVICE_API(spi, api_funcs) = {
1385 .transceive = spi_stm32_transceive,
1386 #ifdef CONFIG_SPI_ASYNC
1387 .transceive_async = spi_stm32_transceive_async,
1388 #endif
1389 #ifdef CONFIG_SPI_RTIO
1390 .iodev_submit = spi_rtio_iodev_default_submit,
1391 #endif
1392 .release = spi_stm32_release,
1393 };
1394
spi_stm32_is_subghzspi(const struct device * dev)1395 static inline bool spi_stm32_is_subghzspi(const struct device *dev)
1396 {
1397 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz)
1398 const struct spi_stm32_config *cfg = dev->config;
1399
1400 return cfg->use_subghzspi_nss;
1401 #else
1402 ARG_UNUSED(dev);
1403 return false;
1404 #endif /* st_stm32_spi_subghz */
1405 }
1406
spi_stm32_init(const struct device * dev)1407 static int spi_stm32_init(const struct device *dev)
1408 {
1409 struct spi_stm32_data *data __attribute__((unused)) = dev->data;
1410 const struct spi_stm32_config *cfg = dev->config;
1411 int err;
1412
1413 if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) {
1414 LOG_ERR("clock control device not ready");
1415 return -ENODEV;
1416 }
1417
1418 err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
1419 (clock_control_subsys_t) &cfg->pclken[0]);
1420 if (err < 0) {
1421 LOG_ERR("Could not enable SPI clock");
1422 return err;
1423 }
1424
1425 if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) {
1426 err = clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
1427 (clock_control_subsys_t) &cfg->pclken[1],
1428 NULL);
1429 if (err < 0) {
1430 LOG_ERR("Could not select SPI domain clock");
1431 return err;
1432 }
1433 }
1434
1435 if (!spi_stm32_is_subghzspi(dev)) {
1436 /* Configure dt provided device signals when available */
1437 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1438 if (err < 0) {
1439 LOG_ERR("SPI pinctrl setup failed (%d)", err);
1440 return err;
1441 }
1442 }
1443
1444 #ifdef CONFIG_SPI_STM32_INTERRUPT
1445 cfg->irq_config(dev);
1446 #endif /* CONFIG_SPI_STM32_INTERRUPT */
1447
1448 #ifdef CONFIG_SPI_STM32_DMA
1449 if ((data->dma_rx.dma_dev != NULL) &&
1450 !device_is_ready(data->dma_rx.dma_dev)) {
1451 LOG_ERR("%s device not ready", data->dma_rx.dma_dev->name);
1452 return -ENODEV;
1453 }
1454
1455 if ((data->dma_tx.dma_dev != NULL) &&
1456 !device_is_ready(data->dma_tx.dma_dev)) {
1457 LOG_ERR("%s device not ready", data->dma_tx.dma_dev->name);
1458 return -ENODEV;
1459 }
1460
1461 LOG_DBG("SPI with DMA transfer");
1462
1463 #endif /* CONFIG_SPI_STM32_DMA */
1464
1465 err = spi_context_cs_configure_all(&data->ctx);
1466 if (err < 0) {
1467 return err;
1468 }
1469
1470 spi_context_unlock_unconditionally(&data->ctx);
1471
1472 return pm_device_runtime_enable(dev);
1473 }
1474
1475 #ifdef CONFIG_PM_DEVICE
spi_stm32_pm_action(const struct device * dev,enum pm_device_action action)1476 static int spi_stm32_pm_action(const struct device *dev,
1477 enum pm_device_action action)
1478 {
1479 const struct spi_stm32_config *config = dev->config;
1480 const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
1481 int err;
1482
1483
1484 switch (action) {
1485 case PM_DEVICE_ACTION_RESUME:
1486 if (!spi_stm32_is_subghzspi(dev)) {
1487 /* Set pins to active state */
1488 err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
1489 if (err < 0) {
1490 return err;
1491 }
1492 }
1493
1494 /* enable clock */
1495 err = clock_control_on(clk, (clock_control_subsys_t)&config->pclken[0]);
1496 if (err != 0) {
1497 LOG_ERR("Could not enable SPI clock");
1498 return err;
1499 }
1500 break;
1501 case PM_DEVICE_ACTION_SUSPEND:
1502 /* Stop device clock. */
1503 err = clock_control_off(clk, (clock_control_subsys_t)&config->pclken[0]);
1504 if (err != 0) {
1505 LOG_ERR("Could not disable SPI clock");
1506 return err;
1507 }
1508
1509 if (!spi_stm32_is_subghzspi(dev)) {
1510 /* Move pins to sleep state */
1511 err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
1512 if ((err < 0) && (err != -ENOENT)) {
1513 /*
1514 * If returning -ENOENT, no pins where defined for sleep mode :
1515 * Do not output on console (might sleep already) when going to
1516 * sleep,
1517 * "SPI pinctrl sleep state not available"
1518 * and don't block PM suspend.
1519 * Else return the error.
1520 */
1521 return err;
1522 }
1523 }
1524 break;
1525 default:
1526 return -ENOTSUP;
1527 }
1528
1529 return 0;
1530 }
1531 #endif /* CONFIG_PM_DEVICE */
1532
1533 #ifdef CONFIG_SPI_STM32_INTERRUPT
1534 #define STM32_SPI_IRQ_HANDLER_DECL(id) \
1535 static void spi_stm32_irq_config_func_##id(const struct device *dev)
1536 #define STM32_SPI_IRQ_HANDLER_FUNC(id) \
1537 .irq_config = spi_stm32_irq_config_func_##id,
1538 #define STM32_SPI_IRQ_HANDLER(id) \
1539 static void spi_stm32_irq_config_func_##id(const struct device *dev) \
1540 { \
1541 IRQ_CONNECT(DT_INST_IRQN(id), \
1542 DT_INST_IRQ(id, priority), \
1543 spi_stm32_isr, DEVICE_DT_INST_GET(id), 0); \
1544 irq_enable(DT_INST_IRQN(id)); \
1545 }
1546 #else
1547 #define STM32_SPI_IRQ_HANDLER_DECL(id)
1548 #define STM32_SPI_IRQ_HANDLER_FUNC(id)
1549 #define STM32_SPI_IRQ_HANDLER(id)
1550 #endif /* CONFIG_SPI_STM32_INTERRUPT */
1551
1552 #define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \
1553 .dma_dev = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \
1554 .channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \
1555 .dma_cfg = { \
1556 .dma_slot = STM32_DMA_SLOT(index, dir, slot),\
1557 .channel_direction = STM32_DMA_CONFIG_DIRECTION( \
1558 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1559 .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \
1560 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1561 .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \
1562 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1563 .source_burst_length = 1, /* SINGLE transfer */ \
1564 .dest_burst_length = 1, /* SINGLE transfer */ \
1565 .channel_priority = STM32_DMA_CONFIG_PRIORITY( \
1566 STM32_DMA_CHANNEL_CONFIG(index, dir)),\
1567 .dma_callback = dma_callback, \
1568 .block_count = 2, \
1569 }, \
1570 .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \
1571 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1572 .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \
1573 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1574 .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \
1575 STM32_DMA_FEATURES(index, dir)), \
1576
1577
1578 #ifdef CONFIG_SPI_STM32_DMA
1579 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) \
1580 .dma_##dir = { \
1581 COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir), \
1582 (SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)),\
1583 (NULL)) \
1584 },
1585 #define SPI_DMA_STATUS_SEM(id) \
1586 .status_sem = Z_SEM_INITIALIZER( \
1587 spi_stm32_dev_data_##id.status_sem, 0, 1),
1588 #else
1589 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest)
1590 #define SPI_DMA_STATUS_SEM(id)
1591 #endif /* CONFIG_SPI_STM32_DMA */
1592
1593 #define SPI_SUPPORTS_FIFO(id) DT_INST_NODE_HAS_PROP(id, fifo_enable)
1594 #define SPI_GET_FIFO_PROP(id) DT_INST_PROP(id, fifo_enable)
1595 #define SPI_FIFO_ENABLED(id) COND_CODE_1(SPI_SUPPORTS_FIFO(id), (SPI_GET_FIFO_PROP(id)), (0))
1596
1597 #define STM32_SPI_INIT(id) \
1598 STM32_SPI_IRQ_HANDLER_DECL(id); \
1599 \
1600 PINCTRL_DT_INST_DEFINE(id); \
1601 \
1602 static const struct stm32_pclken pclken_##id[] = \
1603 STM32_DT_INST_CLOCKS(id);\
1604 \
1605 static const struct spi_stm32_config spi_stm32_cfg_##id = { \
1606 .spi = (SPI_TypeDef *) DT_INST_REG_ADDR(id), \
1607 .pclken = pclken_##id, \
1608 .pclk_len = DT_INST_NUM_CLOCKS(id), \
1609 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \
1610 .fifo_enabled = SPI_FIFO_ENABLED(id), \
1611 STM32_SPI_IRQ_HANDLER_FUNC(id) \
1612 IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz), \
1613 (.use_subghzspi_nss = \
1614 DT_INST_PROP_OR(id, use_subghzspi_nss, false),))\
1615 IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi), \
1616 (.midi_clocks = \
1617 DT_INST_PROP(id, midi_clock),)) \
1618 IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi), \
1619 (.mssi_clocks = \
1620 DT_INST_PROP(id, mssi_clock),)) \
1621 }; \
1622 \
1623 static struct spi_stm32_data spi_stm32_dev_data_##id = { \
1624 SPI_CONTEXT_INIT_LOCK(spi_stm32_dev_data_##id, ctx), \
1625 SPI_CONTEXT_INIT_SYNC(spi_stm32_dev_data_##id, ctx), \
1626 SPI_DMA_CHANNEL(id, rx, RX, PERIPHERAL, MEMORY) \
1627 SPI_DMA_CHANNEL(id, tx, TX, MEMORY, PERIPHERAL) \
1628 SPI_DMA_STATUS_SEM(id) \
1629 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx) \
1630 }; \
1631 \
1632 PM_DEVICE_DT_INST_DEFINE(id, spi_stm32_pm_action); \
1633 \
1634 SPI_DEVICE_DT_INST_DEFINE(id, spi_stm32_init, PM_DEVICE_DT_INST_GET(id),\
1635 &spi_stm32_dev_data_##id, &spi_stm32_cfg_##id, \
1636 POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
1637 &api_funcs); \
1638 \
1639 STM32_SPI_IRQ_HANDLER(id)
1640
1641 DT_INST_FOREACH_STATUS_OKAY(STM32_SPI_INIT)
1642