1 /*
2  * Copyright (c) 2017 Google LLC.
3  * Copyright (c) 2018 qianfan Zhao.
4  * Copyright (c) 2023 Gerson Fernando Budke.
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #define DT_DRV_COMPAT atmel_sam_spi
10 
11 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(spi_sam);
14 
15 #include "spi_context.h"
16 #include <errno.h>
17 #include <zephyr/spinlock.h>
18 #include <zephyr/device.h>
19 #include <zephyr/drivers/spi.h>
20 #include <zephyr/drivers/dma.h>
21 #include <zephyr/drivers/pinctrl.h>
22 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
23 #include <zephyr/rtio/rtio.h>
24 #include <zephyr/sys/__assert.h>
25 #include <zephyr/sys/util.h>
26 #include <soc.h>
27 
28 #define SAM_SPI_CHIP_SELECT_COUNT			4
29 
30 /* Number of bytes in transfer before using DMA if available */
31 #define SAM_SPI_DMA_THRESHOLD                           32
32 
33 /* Device constant configuration parameters */
34 struct spi_sam_config {
35 	Spi *regs;
36 	const struct atmel_sam_pmc_config clock_cfg;
37 	const struct pinctrl_dev_config *pcfg;
38 	bool loopback;
39 
40 #ifdef CONFIG_SPI_SAM_DMA
41 	const struct device *dma_dev;
42 	const uint32_t dma_tx_channel;
43 	const uint32_t dma_tx_perid;
44 	const uint32_t dma_rx_channel;
45 	const uint32_t dma_rx_perid;
46 #endif /* CONFIG_SPI_SAM_DMA */
47 };
48 
49 /* Device run time data */
50 struct spi_sam_data {
51 	struct spi_context ctx;
52 	struct k_spinlock lock;
53 
54 #ifdef CONFIG_SPI_RTIO
55 	struct rtio *r; /* context for thread calls */
56 	struct mpsc io_q;
57 	struct rtio_iodev iodev;
58 	struct rtio_iodev_sqe *txn_head;
59 	struct rtio_iodev_sqe *txn_curr;
60 	struct spi_dt_spec dt_spec;
61 #endif
62 
63 #ifdef CONFIG_SPI_SAM_DMA
64 	struct k_sem dma_sem;
65 #endif /* CONFIG_SPI_SAM_DMA */
66 };
67 
spi_spin_lock(const struct device * dev)68 static inline k_spinlock_key_t spi_spin_lock(const struct device *dev)
69 {
70 	struct spi_sam_data *data = dev->data;
71 
72 	return k_spin_lock(&data->lock);
73 }
74 
spi_spin_unlock(const struct device * dev,k_spinlock_key_t key)75 static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key)
76 {
77 	struct spi_sam_data *data = dev->data;
78 
79 	k_spin_unlock(&data->lock, key);
80 }
81 
spi_slave_to_mr_pcs(int slave)82 static int spi_slave_to_mr_pcs(int slave)
83 {
84 	int pcs[SAM_SPI_CHIP_SELECT_COUNT] = {0x0, 0x1, 0x3, 0x7};
85 
86 	/* SPI worked in fixed peripheral mode(SPI_MR.PS = 0) and disabled chip
87 	 * select decode(SPI_MR.PCSDEC = 0), based on Atmel | SMART ARM-based
88 	 * Flash MCU DATASHEET 40.8.2 SPI Mode Register:
89 	 * PCS = xxx0    NPCS[3:0] = 1110
90 	 * PCS = xx01    NPCS[3:0] = 1101
91 	 * PCS = x011    NPCS[3:0] = 1011
92 	 * PCS = 0111    NPCS[3:0] = 0111
93 	 */
94 
95 	return pcs[slave];
96 }
97 
spi_sam_configure(const struct device * dev,const struct spi_config * config)98 static int spi_sam_configure(const struct device *dev,
99 			     const struct spi_config *config)
100 {
101 	const struct spi_sam_config *cfg = dev->config;
102 	struct spi_sam_data *data = dev->data;
103 	Spi *regs = cfg->regs;
104 	uint32_t spi_mr = 0U, spi_csr = 0U;
105 	uint16_t spi_csr_idx = spi_cs_is_gpio(config) ? 0 : config->slave;
106 	int div;
107 
108 	if (spi_context_configured(&data->ctx, config)) {
109 		return 0;
110 	}
111 
112 	if (config->operation & SPI_HALF_DUPLEX) {
113 		LOG_ERR("Half-duplex not supported");
114 		return -ENOTSUP;
115 	}
116 
117 	if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) {
118 		/* Slave mode is not implemented. */
119 		return -ENOTSUP;
120 	}
121 
122 	if (config->slave > (SAM_SPI_CHIP_SELECT_COUNT - 1)) {
123 		LOG_ERR("Slave %d is greater than %d",
124 			config->slave, SAM_SPI_CHIP_SELECT_COUNT - 1);
125 		return -EINVAL;
126 	}
127 
128 	/* Set master mode, disable mode fault detection, set fixed peripheral
129 	 * select mode.
130 	 */
131 	spi_mr |= (SPI_MR_MSTR | SPI_MR_MODFDIS);
132 	spi_mr |= SPI_MR_PCS(spi_slave_to_mr_pcs(spi_csr_idx));
133 
134 	if (cfg->loopback) {
135 		spi_mr |= SPI_MR_LLB;
136 	}
137 
138 	if ((config->operation & SPI_MODE_CPOL) != 0U) {
139 		spi_csr |= SPI_CSR_CPOL;
140 	}
141 
142 	if ((config->operation & SPI_MODE_CPHA) == 0U) {
143 		spi_csr |= SPI_CSR_NCPHA;
144 	}
145 
146 	if (SPI_WORD_SIZE_GET(config->operation) != 8) {
147 		return -ENOTSUP;
148 	} else {
149 		spi_csr |= SPI_CSR_BITS(SPI_CSR_BITS_8_BIT);
150 	}
151 
152 	/* Use the requested or next highest possible frequency */
153 	div = SOC_ATMEL_SAM_MCK_FREQ_HZ / config->frequency;
154 	div = CLAMP(div, 1, UINT8_MAX);
155 	spi_csr |= SPI_CSR_SCBR(div);
156 
157 	regs->SPI_CR = SPI_CR_SPIDIS; /* Disable SPI */
158 	regs->SPI_MR = spi_mr;
159 	regs->SPI_CSR[spi_csr_idx] = spi_csr;
160 	regs->SPI_CR = SPI_CR_SPIEN; /* Enable SPI */
161 
162 	data->ctx.config = config;
163 
164 	return 0;
165 }
166 
167 /* Finish any ongoing writes and drop any remaining read data */
spi_sam_finish(Spi * regs)168 static void spi_sam_finish(Spi *regs)
169 {
170 	while ((regs->SPI_SR & SPI_SR_TXEMPTY) == 0) {
171 	}
172 
173 	while (regs->SPI_SR & SPI_SR_RDRF) {
174 		(void)regs->SPI_RDR;
175 	}
176 }
177 
178 /* Fast path that transmits a buf */
spi_sam_fast_tx(Spi * regs,const uint8_t * tx_buf,const uint32_t tx_buf_len)179 static void spi_sam_fast_tx(Spi *regs, const uint8_t *tx_buf, const uint32_t tx_buf_len)
180 {
181 	const uint8_t *p = tx_buf;
182 	const uint8_t *pend = (uint8_t *)tx_buf + tx_buf_len;
183 	uint8_t ch;
184 
185 	while (p != pend) {
186 		ch = *p++;
187 
188 		while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
189 		}
190 
191 		regs->SPI_TDR = SPI_TDR_TD(ch);
192 	}
193 }
194 
195 /* Fast path that reads into a buf */
spi_sam_fast_rx(Spi * regs,uint8_t * rx_buf,const uint32_t rx_buf_len)196 static void spi_sam_fast_rx(Spi *regs, uint8_t *rx_buf, const uint32_t rx_buf_len)
197 {
198 	uint8_t *rx = rx_buf;
199 	int len = rx_buf_len;
200 
201 	if (len <= 0) {
202 		return;
203 	}
204 
205 	/* Write the first byte */
206 	regs->SPI_TDR = SPI_TDR_TD(0);
207 	len--;
208 
209 	while (len) {
210 		while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
211 		}
212 
213 		/* Read byte N+0 from the receive register */
214 		while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
215 		}
216 
217 		*rx = (uint8_t)regs->SPI_RDR;
218 		rx++;
219 
220 		/* Load byte N+1 into the transmit register */
221 		regs->SPI_TDR = SPI_TDR_TD(0);
222 		len--;
223 	}
224 
225 	/* Read the final incoming byte */
226 	while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
227 	}
228 
229 	*rx = (uint8_t)regs->SPI_RDR;
230 }
231 
232 /* Fast path that writes and reads bufs of the same length */
spi_sam_fast_txrx(Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,const uint32_t len)233 static void spi_sam_fast_txrx(Spi *regs,
234 			      const uint8_t *tx_buf,
235 			      const uint8_t *rx_buf,
236 			      const uint32_t len)
237 {
238 	const uint8_t *tx = tx_buf;
239 	const uint8_t *txend = tx_buf + len;
240 	uint8_t *rx = (uint8_t *)rx_buf;
241 
242 	if (len == 0) {
243 		return;
244 	}
245 
246 	/*
247 	 * The code below interleaves the transmit writes with the
248 	 * receive reads to keep the bus fully utilised.  The code is
249 	 * equivalent to:
250 	 *
251 	 * Transmit byte 0
252 	 * Loop:
253 	 * - Transmit byte n+1
254 	 * - Receive byte n
255 	 * Receive the final byte
256 	 */
257 
258 	/* Write the first byte */
259 	regs->SPI_TDR = SPI_TDR_TD(*tx++);
260 
261 	while (tx != txend) {
262 		while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
263 		}
264 
265 		/* Load byte N+1 into the transmit register.  TX is
266 		 * single buffered and we have at most one byte in
267 		 * flight so skip the DRE check.
268 		 */
269 		regs->SPI_TDR = SPI_TDR_TD(*tx++);
270 
271 		/* Read byte N+0 from the receive register */
272 		while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
273 		}
274 
275 		*rx++ = (uint8_t)regs->SPI_RDR;
276 	}
277 
278 	/* Read the final incoming byte */
279 	while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
280 	}
281 
282 	*rx = (uint8_t)regs->SPI_RDR;
283 
284 }
285 
286 
287 #ifdef CONFIG_SPI_SAM_DMA
288 
289 static __aligned(4) uint32_t tx_dummy;
290 static __aligned(4) uint32_t rx_dummy;
291 
292 #ifdef CONFIG_SPI_RTIO
293 static void spi_sam_iodev_complete(const struct device *dev, int status);
294 #endif
295 
dma_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)296 static void dma_callback(const struct device *dma_dev, void *user_data,
297 	uint32_t channel, int status)
298 {
299 	ARG_UNUSED(dma_dev);
300 	ARG_UNUSED(channel);
301 	ARG_UNUSED(status);
302 
303 	const struct device *dev = user_data;
304 	struct spi_sam_data *drv_data = dev->data;
305 
306 #ifdef CONFIG_SPI_RTIO
307 	if (drv_data->txn_head != NULL) {
308 		spi_sam_iodev_complete(dev, status);
309 		return;
310 	}
311 #endif
312 	k_sem_give(&drv_data->dma_sem);
313 }
314 
315 
316 /* DMA transceive path */
spi_sam_dma_txrx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,const uint32_t len)317 static int spi_sam_dma_txrx(const struct device *dev,
318 			    Spi *regs,
319 			    const uint8_t *tx_buf,
320 			    const uint8_t *rx_buf,
321 			    const uint32_t len)
322 {
323 	const struct spi_sam_config *drv_cfg = dev->config;
324 	struct spi_sam_data *drv_data = dev->data;
325 #ifdef CONFIG_SPI_RTIO
326 	bool blocking = drv_data->txn_head == NULL;
327 #else
328 	bool blocking = true;
329 #endif
330 
331 	int res = 0;
332 
333 	__ASSERT_NO_MSG(rx_buf != NULL || tx_buf != NULL);
334 
335 	struct dma_config rx_dma_cfg = {
336 		.source_data_size = 1,
337 		.dest_data_size = 1,
338 		.block_count = 1,
339 		.dma_slot = drv_cfg->dma_rx_perid,
340 		.channel_direction = PERIPHERAL_TO_MEMORY,
341 		.source_burst_length = 1,
342 		.dest_burst_length = 1,
343 		.complete_callback_en = true,
344 		.dma_callback = NULL,
345 		.user_data = (void *)dev,
346 	};
347 
348 	uint32_t dest_address, dest_addr_adjust;
349 
350 	if (rx_buf != NULL) {
351 		dest_address = (uint32_t)rx_buf;
352 		dest_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
353 	} else {
354 		dest_address = (uint32_t)&rx_dummy;
355 		dest_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
356 	}
357 
358 	struct dma_block_config rx_block_cfg = {
359 		.dest_addr_adj = dest_addr_adjust,
360 		.block_size = len,
361 		.source_address = (uint32_t)&regs->SPI_RDR,
362 		.dest_address = dest_address
363 	};
364 
365 	rx_dma_cfg.head_block = &rx_block_cfg;
366 
367 	struct dma_config tx_dma_cfg = {
368 		.source_data_size = 1,
369 		.dest_data_size = 1,
370 		.block_count = 1,
371 		.dma_slot = drv_cfg->dma_tx_perid,
372 		.channel_direction = MEMORY_TO_PERIPHERAL,
373 		.source_burst_length = 1,
374 		.dest_burst_length = 1,
375 		.complete_callback_en = true,
376 		.dma_callback = dma_callback,
377 		.user_data = (void *)dev,
378 	};
379 
380 	uint32_t source_address, source_addr_adjust;
381 
382 	if (tx_buf != NULL) {
383 		source_address = (uint32_t)tx_buf;
384 		source_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
385 	} else {
386 		source_address = (uint32_t)&tx_dummy;
387 		source_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
388 	}
389 
390 	struct dma_block_config tx_block_cfg = {
391 		.source_addr_adj = source_addr_adjust,
392 		.block_size = len,
393 		.source_address = source_address,
394 		.dest_address = (uint32_t)&regs->SPI_TDR
395 	};
396 
397 	tx_dma_cfg.head_block = &tx_block_cfg;
398 
399 	res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_rx_channel, &rx_dma_cfg);
400 	if (res != 0) {
401 		LOG_ERR("failed to configure SPI DMA RX");
402 		goto out;
403 	}
404 
405 	res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_tx_channel, &tx_dma_cfg);
406 	if (res != 0) {
407 		LOG_ERR("failed to configure SPI DMA TX");
408 		goto out;
409 	}
410 
411 	/* Clocking begins on tx, so start rx first */
412 	res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
413 	if (res != 0) {
414 		LOG_ERR("failed to start SPI DMA RX");
415 		goto out;
416 	}
417 
418 	res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_tx_channel);
419 	if (res != 0) {
420 		LOG_ERR("failed to start SPI DMA TX");
421 		dma_stop(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
422 	}
423 
424 	/* Move up a level or wrap in branch when blocking */
425 	if (blocking) {
426 		k_sem_take(&drv_data->dma_sem, K_FOREVER);
427 		spi_sam_finish(regs);
428 	} else {
429 		res = -EWOULDBLOCK;
430 	}
431 
432 out:
433 	return res;
434 }
435 
436 #endif /* CONFIG_SPI_SAM_DMA */
437 
438 
spi_sam_rx(const struct device * dev,Spi * regs,uint8_t * rx_buf,uint32_t rx_buf_len)439 static inline int spi_sam_rx(const struct device *dev,
440 			      Spi *regs,
441 			      uint8_t *rx_buf,
442 			      uint32_t rx_buf_len)
443 {
444 	k_spinlock_key_t key;
445 
446 #ifdef CONFIG_SPI_SAM_DMA
447 	const struct spi_sam_config *cfg = dev->config;
448 
449 	if (rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
450 		key = spi_spin_lock(dev);
451 		spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
452 	} else {
453 		return spi_sam_dma_txrx(dev, regs, NULL, rx_buf, rx_buf_len);
454 	}
455 #else
456 	key = spi_spin_lock(dev);
457 	spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
458 #endif
459 	spi_sam_finish(regs);
460 
461 	spi_spin_unlock(dev, key);
462 	return 0;
463 }
464 
spi_sam_tx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,uint32_t tx_buf_len)465 static inline int spi_sam_tx(const struct device *dev,
466 			     Spi *regs,
467 			     const uint8_t *tx_buf,
468 			     uint32_t tx_buf_len)
469 {
470 	k_spinlock_key_t key;
471 
472 #ifdef CONFIG_SPI_SAM_DMA
473 	const struct spi_sam_config *cfg = dev->config;
474 
475 	if (tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
476 		key = spi_spin_lock(dev);
477 		spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
478 	} else {
479 		return spi_sam_dma_txrx(dev, regs, tx_buf, NULL, tx_buf_len);
480 	}
481 #else
482 	key = spi_spin_lock(dev);
483 	spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
484 #endif
485 	spi_sam_finish(regs);
486 	spi_spin_unlock(dev, key);
487 	return 0;
488 }
489 
490 
spi_sam_txrx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,uint32_t buf_len)491 static inline int spi_sam_txrx(const struct device *dev,
492 				Spi *regs,
493 				const uint8_t *tx_buf,
494 				const uint8_t *rx_buf,
495 				uint32_t buf_len)
496 {
497 	k_spinlock_key_t key;
498 
499 #ifdef CONFIG_SPI_SAM_DMA
500 	const struct spi_sam_config *cfg = dev->config;
501 
502 	if (buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
503 		key = spi_spin_lock(dev);
504 		spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
505 	} else {
506 		return spi_sam_dma_txrx(dev, regs, tx_buf, rx_buf, buf_len);
507 	}
508 #else
509 	key = spi_spin_lock(dev);
510 	spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
511 #endif
512 	spi_sam_finish(regs);
513 	spi_spin_unlock(dev, key);
514 	return 0;
515 }
516 
517 #ifndef CONFIG_SPI_RTIO
518 
519 /* Fast path where every overlapping tx and rx buffer is the same length */
spi_sam_fast_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)520 static void spi_sam_fast_transceive(const struct device *dev,
521 				    const struct spi_config *config,
522 				    const struct spi_buf_set *tx_bufs,
523 				    const struct spi_buf_set *rx_bufs)
524 {
525 	const struct spi_sam_config *cfg = dev->config;
526 	size_t tx_count = 0;
527 	size_t rx_count = 0;
528 	Spi *regs = cfg->regs;
529 	const struct spi_buf *tx = NULL;
530 	const struct spi_buf *rx = NULL;
531 
532 	if (tx_bufs) {
533 		tx = tx_bufs->buffers;
534 		tx_count = tx_bufs->count;
535 	}
536 
537 	if (rx_bufs) {
538 		rx = rx_bufs->buffers;
539 		rx_count = rx_bufs->count;
540 	}
541 
542 	while (tx_count != 0 && rx_count != 0) {
543 		if (tx->buf == NULL) {
544 			spi_sam_rx(dev, regs, rx->buf, rx->len);
545 		} else if (rx->buf == NULL) {
546 			spi_sam_tx(dev, regs, tx->buf, tx->len);
547 		} else if (rx->len == tx->len) {
548 			spi_sam_txrx(dev, regs, tx->buf, rx->buf, rx->len);
549 		} else {
550 			__ASSERT_NO_MSG("Invalid fast transceive configuration");
551 		}
552 
553 		tx++;
554 		tx_count--;
555 		rx++;
556 		rx_count--;
557 	}
558 
559 	for (; tx_count != 0; tx_count--) {
560 		spi_sam_tx(dev, regs, tx->buf, tx->len);
561 		tx++;
562 	}
563 
564 	for (; rx_count != 0; rx_count--) {
565 		spi_sam_rx(dev, regs, rx->buf, rx->len);
566 		rx++;
567 	}
568 }
569 
spi_sam_transfer_ongoing(struct spi_sam_data * data)570 static bool spi_sam_transfer_ongoing(struct spi_sam_data *data)
571 {
572 	return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
573 }
574 
spi_sam_shift_master(Spi * regs,struct spi_sam_data * data)575 static void spi_sam_shift_master(Spi *regs, struct spi_sam_data *data)
576 {
577 	uint8_t tx;
578 	uint8_t rx;
579 
580 	if (spi_context_tx_buf_on(&data->ctx)) {
581 		tx = *(uint8_t *)(data->ctx.tx_buf);
582 	} else {
583 		tx = 0U;
584 	}
585 
586 	while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
587 	}
588 
589 	regs->SPI_TDR = SPI_TDR_TD(tx);
590 	spi_context_update_tx(&data->ctx, 1, 1);
591 
592 	while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
593 	}
594 
595 	rx = (uint8_t)regs->SPI_RDR;
596 
597 	if (spi_context_rx_buf_on(&data->ctx)) {
598 		*data->ctx.rx_buf = rx;
599 	}
600 	spi_context_update_rx(&data->ctx, 1, 1);
601 }
602 
603 /* Returns true if the request is suitable for the fast
604  * path. Specifically, the bufs are a sequence of:
605  *
606  * - Zero or more RX and TX buf pairs where each is the same length.
607  * - Zero or more trailing RX only bufs
608  * - Zero or more trailing TX only bufs
609  */
spi_sam_is_regular(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)610 static bool spi_sam_is_regular(const struct spi_buf_set *tx_bufs,
611 			       const struct spi_buf_set *rx_bufs)
612 {
613 	const struct spi_buf *tx = NULL;
614 	const struct spi_buf *rx = NULL;
615 	size_t tx_count = 0;
616 	size_t rx_count = 0;
617 
618 	if (tx_bufs) {
619 		tx = tx_bufs->buffers;
620 		tx_count = tx_bufs->count;
621 	}
622 
623 	if (rx_bufs) {
624 		rx = rx_bufs->buffers;
625 		rx_count = rx_bufs->count;
626 	}
627 
628 	if (!tx || !rx) {
629 		return true;
630 	}
631 
632 	while (tx_count != 0 && rx_count != 0) {
633 		if (tx->len != rx->len) {
634 			return false;
635 		}
636 
637 		tx++;
638 		tx_count--;
639 		rx++;
640 		rx_count--;
641 	}
642 
643 	return true;
644 }
645 
646 #else
647 
648 static void spi_sam_iodev_complete(const struct device *dev, int status);
649 static void spi_sam_iodev_next(const struct device *dev, bool completion);
650 
spi_sam_iodev_start(const struct device * dev)651 static void spi_sam_iodev_start(const struct device *dev)
652 {
653 	const struct spi_sam_config *cfg = dev->config;
654 	struct spi_sam_data *data = dev->data;
655 	struct rtio_sqe *sqe = &data->txn_curr->sqe;
656 	int ret = 0;
657 
658 	switch (sqe->op) {
659 	case RTIO_OP_RX:
660 		ret = spi_sam_rx(dev, cfg->regs, sqe->buf, sqe->buf_len);
661 		break;
662 	case RTIO_OP_TX:
663 		ret = spi_sam_tx(dev, cfg->regs, sqe->buf, sqe->buf_len);
664 		break;
665 	case RTIO_OP_TINY_TX:
666 		ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_buf, sqe->tiny_buf_len);
667 		break;
668 	case RTIO_OP_TXRX:
669 		ret = spi_sam_txrx(dev, cfg->regs, sqe->tx_buf, sqe->rx_buf, sqe->txrx_buf_len);
670 		break;
671 	default:
672 		LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
673 		struct rtio_iodev_sqe *txn_head = data->txn_head;
674 
675 		spi_sam_iodev_next(dev, true);
676 		rtio_iodev_sqe_err(txn_head, -EINVAL);
677 		ret = 0;
678 	}
679 	if (ret == 0) {
680 		spi_sam_iodev_complete(dev, 0);
681 	}
682 }
683 
spi_sam_iodev_next(const struct device * dev,bool completion)684 static void spi_sam_iodev_next(const struct device *dev, bool completion)
685 {
686 	struct spi_sam_data *data = dev->data;
687 
688 	k_spinlock_key_t key  = spi_spin_lock(dev);
689 
690 	if (!completion && data->txn_curr != NULL) {
691 		spi_spin_unlock(dev, key);
692 		return;
693 	}
694 
695 	struct mpsc_node *next = mpsc_pop(&data->io_q);
696 
697 	if (next != NULL) {
698 		struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
699 
700 		data->txn_head = next_sqe;
701 		data->txn_curr = next_sqe;
702 	} else {
703 		data->txn_head = NULL;
704 		data->txn_curr = NULL;
705 	}
706 
707 	spi_spin_unlock(dev, key);
708 
709 	if (data->txn_curr != NULL) {
710 		struct spi_dt_spec *spi_dt_spec = data->txn_curr->sqe.iodev->data;
711 		struct spi_config *spi_cfg = &spi_dt_spec->config;
712 
713 		spi_sam_configure(dev, spi_cfg);
714 		spi_context_cs_control(&data->ctx, true);
715 		spi_sam_iodev_start(dev);
716 	}
717 }
718 
spi_sam_iodev_complete(const struct device * dev,int status)719 static void spi_sam_iodev_complete(const struct device *dev, int status)
720 {
721 	struct spi_sam_data *data = dev->data;
722 
723 	if (data->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
724 		data->txn_curr = rtio_txn_next(data->txn_curr);
725 		spi_sam_iodev_start(dev);
726 	} else {
727 		struct rtio_iodev_sqe *txn_head = data->txn_head;
728 
729 		spi_context_cs_control(&data->ctx, false);
730 		spi_sam_iodev_next(dev, true);
731 		rtio_iodev_sqe_ok(txn_head, status);
732 	}
733 }
734 
spi_sam_iodev_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)735 static void spi_sam_iodev_submit(const struct device *dev,
736 				 struct rtio_iodev_sqe *iodev_sqe)
737 {
738 	struct spi_sam_data *data = dev->data;
739 
740 	mpsc_push(&data->io_q, &iodev_sqe->q);
741 	spi_sam_iodev_next(dev, false);
742 }
743 #endif
744 
spi_sam_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)745 static int spi_sam_transceive(const struct device *dev,
746 			      const struct spi_config *config,
747 			      const struct spi_buf_set *tx_bufs,
748 			      const struct spi_buf_set *rx_bufs)
749 {
750 	struct spi_sam_data *data = dev->data;
751 	int err = 0;
752 
753 	spi_context_lock(&data->ctx, false, NULL, NULL, config);
754 
755 #if CONFIG_SPI_RTIO
756 	struct rtio_sqe *sqe;
757 	struct rtio_cqe *cqe;
758 
759 	struct spi_dt_spec *dt_spec = &data->dt_spec;
760 
761 	dt_spec->config = *config;
762 
763 	int ret = spi_rtio_copy(data->r, &data->iodev, tx_bufs, rx_bufs, &sqe);
764 
765 	if (ret < 0) {
766 		err = ret;
767 		goto done;
768 	}
769 
770 	/* Submit request and wait */
771 	rtio_submit(data->r, ret);
772 
773 	while (ret > 0) {
774 		cqe = rtio_cqe_consume(data->r);
775 
776 		if (cqe->result < 0) {
777 			err = cqe->result;
778 		}
779 
780 		rtio_cqe_release(data->r, cqe);
781 
782 		ret--;
783 	}
784 #else
785 	const struct spi_sam_config *cfg = dev->config;
786 
787 	err = spi_sam_configure(dev, config);
788 	if (err != 0) {
789 		goto done;
790 	}
791 
792 	spi_context_cs_control(&data->ctx, true);
793 
794 	if (spi_sam_is_regular(tx_bufs, rx_bufs)) {
795 		spi_sam_fast_transceive(dev, config, tx_bufs, rx_bufs);
796 	} else {
797 		spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
798 
799 		do {
800 			spi_sam_shift_master(cfg->regs, data);
801 		} while (spi_sam_transfer_ongoing(data));
802 	}
803 
804 	spi_context_cs_control(&data->ctx, false);
805 #endif
806 done:
807 	spi_context_release(&data->ctx, err);
808 	return err;
809 }
810 
spi_sam_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)811 static int spi_sam_transceive_sync(const struct device *dev,
812 				   const struct spi_config *config,
813 				   const struct spi_buf_set *tx_bufs,
814 				   const struct spi_buf_set *rx_bufs)
815 {
816 	return spi_sam_transceive(dev, config, tx_bufs, rx_bufs);
817 }
818 
819 #ifdef CONFIG_SPI_ASYNC
spi_sam_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)820 static int spi_sam_transceive_async(const struct device *dev,
821 				    const struct spi_config *config,
822 				    const struct spi_buf_set *tx_bufs,
823 				    const struct spi_buf_set *rx_bufs,
824 				    spi_callback_t cb,
825 				    void *userdata)
826 {
827 	/* TODO: implement async transceive */
828 	return -ENOTSUP;
829 }
830 #endif /* CONFIG_SPI_ASYNC */
831 
spi_sam_release(const struct device * dev,const struct spi_config * config)832 static int spi_sam_release(const struct device *dev,
833 			   const struct spi_config *config)
834 {
835 	struct spi_sam_data *data = dev->data;
836 
837 	spi_context_unlock_unconditionally(&data->ctx);
838 
839 	return 0;
840 }
841 
spi_sam_init(const struct device * dev)842 static int spi_sam_init(const struct device *dev)
843 {
844 	int err;
845 	const struct spi_sam_config *cfg = dev->config;
846 	struct spi_sam_data *data = dev->data;
847 
848 	/* Enable SPI clock in PMC */
849 	(void)clock_control_on(SAM_DT_PMC_CONTROLLER,
850 			       (clock_control_subsys_t)&cfg->clock_cfg);
851 
852 	err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
853 	if (err < 0) {
854 		return err;
855 	}
856 
857 	err = spi_context_cs_configure_all(&data->ctx);
858 	if (err < 0) {
859 		return err;
860 	}
861 
862 #ifdef CONFIG_SPI_SAM_DMA
863 	k_sem_init(&data->dma_sem, 0, K_SEM_MAX_LIMIT);
864 #endif
865 
866 #ifdef CONFIG_SPI_RTIO
867 	data->dt_spec.bus = dev;
868 	data->iodev.api = &spi_iodev_api;
869 	data->iodev.data = &data->dt_spec;
870 	mpsc_init(&data->io_q);
871 #endif
872 
873 	spi_context_unlock_unconditionally(&data->ctx);
874 
875 	/* The device will be configured and enabled when transceive
876 	 * is called.
877 	 */
878 
879 	return 0;
880 }
881 
882 static const struct spi_driver_api spi_sam_driver_api = {
883 	.transceive = spi_sam_transceive_sync,
884 #ifdef CONFIG_SPI_ASYNC
885 	.transceive_async = spi_sam_transceive_async,
886 #endif
887 #ifdef CONFIG_SPI_RTIO
888 	.iodev_submit = spi_sam_iodev_submit,
889 #endif
890 	.release = spi_sam_release,
891 };
892 
893 #define SPI_DMA_INIT(n)										\
894 	.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)),				\
895 	.dma_tx_channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel),				\
896 	.dma_tx_perid = DT_INST_DMAS_CELL_BY_NAME(n, tx, perid),				\
897 	.dma_rx_channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel),				\
898 	.dma_rx_perid = DT_INST_DMAS_CELL_BY_NAME(n, rx, perid),
899 
900 #ifdef CONFIG_SPI_SAM_DMA
901 #define SPI_SAM_USE_DMA(n) DT_INST_DMAS_HAS_NAME(n, tx)
902 #else
903 #define SPI_SAM_USE_DMA(n) 0
904 #endif
905 
906 #define SPI_SAM_DEFINE_CONFIG(n)								\
907 	static const struct spi_sam_config spi_sam_config_##n = {				\
908 		.regs = (Spi *)DT_INST_REG_ADDR(n),						\
909 		.clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n),					\
910 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),					\
911 		.loopback = DT_INST_PROP(n, loopback),						\
912 		COND_CODE_1(SPI_SAM_USE_DMA(n), (SPI_DMA_INIT(n)), ())				\
913 	}
914 
915 #define SPI_SAM_RTIO_DEFINE(n) RTIO_DEFINE(spi_sam_rtio_##n, CONFIG_SPI_SAM_RTIO_SQ_SIZE,	\
916 					   CONFIG_SPI_SAM_RTIO_SQ_SIZE)
917 
918 #define SPI_SAM_DEVICE_INIT(n)									\
919 	PINCTRL_DT_INST_DEFINE(n);								\
920 	SPI_SAM_DEFINE_CONFIG(n);								\
921 	COND_CODE_1(CONFIG_SPI_RTIO, (SPI_SAM_RTIO_DEFINE(n)), ());				\
922 	static struct spi_sam_data spi_sam_dev_data_##n = {					\
923 		SPI_CONTEXT_INIT_LOCK(spi_sam_dev_data_##n, ctx),				\
924 		SPI_CONTEXT_INIT_SYNC(spi_sam_dev_data_##n, ctx),				\
925 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)				\
926 		IF_ENABLED(CONFIG_SPI_RTIO, (.r = &spi_sam_rtio_##n))				\
927 	};											\
928 	DEVICE_DT_INST_DEFINE(n, &spi_sam_init, NULL,						\
929 			    &spi_sam_dev_data_##n,						\
930 			    &spi_sam_config_##n, POST_KERNEL,					\
931 			    CONFIG_SPI_INIT_PRIORITY, &spi_sam_driver_api);
932 
933 DT_INST_FOREACH_STATUS_OKAY(SPI_SAM_DEVICE_INIT)
934