1 /*
2  * Copyright 2024-2025 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nxp_lpspi
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL);
11 
12 #include "spi_nxp_lpspi_priv.h"
13 
14 struct lpspi_driver_data {
15 	size_t fill_len;
16 	uint8_t word_size_bytes;
17 };
18 
rx_fifo_cur_len(LPSPI_Type * base)19 static inline uint8_t rx_fifo_cur_len(LPSPI_Type *base)
20 {
21 	return (base->FSR & LPSPI_FSR_RXCOUNT_MASK) >> LPSPI_FSR_RXCOUNT_SHIFT;
22 }
23 
tx_fifo_cur_len(LPSPI_Type * base)24 static inline uint8_t tx_fifo_cur_len(LPSPI_Type *base)
25 {
26 	return (base->FSR & LPSPI_FSR_TXCOUNT_MASK) >> LPSPI_FSR_TXCOUNT_SHIFT;
27 }
28 
29 
30 /* Reads a word from the RX fifo and handles writing it into the RX spi buf */
lpspi_rx_word_write_bytes(const struct device * dev,size_t offset)31 static inline void lpspi_rx_word_write_bytes(const struct device *dev, size_t offset)
32 {
33 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
34 	struct spi_mcux_data *data = dev->data;
35 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
36 	struct spi_context *ctx = &data->ctx;
37 	uint8_t num_bytes = MIN(lpspi_data->word_size_bytes, ctx->rx_len);
38 	uint8_t *buf = ctx->rx_buf + offset;
39 	uint32_t word = LPSPI_ReadData(base);
40 
41 	if (!spi_context_rx_buf_on(ctx) && spi_context_rx_on(ctx)) {
42 		/* receive no actual data if rx buf is NULL */
43 		return;
44 	}
45 
46 	for (uint8_t i = 0; i < num_bytes; i++) {
47 		buf[i] = (uint8_t)(word >> (BITS_PER_BYTE * i));
48 	}
49 }
50 
51 /* Reads a maximum number of words from RX fifo and writes them to the remainder of the RX buf */
lpspi_rx_buf_write_words(const struct device * dev,uint8_t max_read)52 static inline size_t lpspi_rx_buf_write_words(const struct device *dev, uint8_t max_read)
53 {
54 	struct spi_mcux_data *data = dev->data;
55 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
56 	struct spi_context *ctx = &data->ctx;
57 	size_t buf_len = DIV_ROUND_UP(ctx->rx_len, lpspi_data->word_size_bytes);
58 	uint8_t words_read = 0;
59 	size_t offset = 0;
60 
61 	while (buf_len-- > 0 && max_read-- > 0) {
62 		lpspi_rx_word_write_bytes(dev, offset);
63 		offset += lpspi_data->word_size_bytes;
64 		words_read++;
65 	}
66 
67 	return words_read;
68 }
69 
lpspi_handle_rx_irq(const struct device * dev)70 static inline void lpspi_handle_rx_irq(const struct device *dev)
71 {
72 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
73 	struct spi_mcux_data *data = dev->data;
74 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
75 	struct spi_context *ctx = &data->ctx;
76 	uint8_t rx_fsr = rx_fifo_cur_len(base);
77 	uint8_t total_words_written = 0;
78 	uint8_t total_words_read = 0;
79 	uint8_t words_read;
80 
81 	LPSPI_ClearStatusFlags(base, kLPSPI_RxDataReadyFlag);
82 
83 	LOG_DBG("RX FIFO: %d, RX BUF: %p", rx_fsr, ctx->rx_buf);
84 
85 	while ((rx_fsr = rx_fifo_cur_len(base)) > 0 && spi_context_rx_on(ctx)) {
86 		words_read = lpspi_rx_buf_write_words(dev, rx_fsr);
87 		total_words_read += words_read;
88 		total_words_written += (spi_context_rx_buf_on(ctx) ? words_read : 0);
89 		spi_context_update_rx(ctx, lpspi_data->word_size_bytes, words_read);
90 	}
91 
92 	LOG_DBG("RX done %d words to spi buf", total_words_written);
93 
94 	if (spi_context_rx_len_left(ctx) == 0) {
95 		LPSPI_DisableInterrupts(base, (uint32_t)kLPSPI_RxInterruptEnable);
96 		LPSPI_FlushFifo(base, false, true);
97 	}
98 }
99 
lpspi_next_tx_word(const struct device * dev,int offset)100 static inline uint32_t lpspi_next_tx_word(const struct device *dev, int offset)
101 {
102 	struct spi_mcux_data *data = dev->data;
103 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
104 	struct spi_context *ctx = &data->ctx;
105 	const uint8_t *byte = ctx->tx_buf + offset;
106 	uint32_t num_bytes = MIN(lpspi_data->word_size_bytes, ctx->tx_len);
107 	uint32_t next_word = 0;
108 
109 	for (uint8_t i = 0; i < num_bytes; i++) {
110 		next_word |= byte[i] << (BITS_PER_BYTE * i);
111 	}
112 
113 	return next_word;
114 }
115 
lpspi_fill_tx_fifo(const struct device * dev)116 static inline void lpspi_fill_tx_fifo(const struct device *dev)
117 {
118 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
119 	struct spi_mcux_data *data = dev->data;
120 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
121 	size_t bytes_in_xfer = lpspi_data->fill_len * lpspi_data->word_size_bytes;
122 	size_t offset;
123 
124 	for (offset = 0; offset < bytes_in_xfer; offset += lpspi_data->word_size_bytes) {
125 		LPSPI_WriteData(base, lpspi_next_tx_word(dev, offset));
126 	}
127 
128 	LOG_DBG("Filled TX FIFO to %d words (%d bytes)", lpspi_data->fill_len, offset);
129 }
130 
lpspi_fill_tx_fifo_nop(const struct device * dev)131 static void lpspi_fill_tx_fifo_nop(const struct device *dev)
132 {
133 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
134 	struct spi_mcux_data *data = dev->data;
135 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
136 
137 	for (int i = 0; i < lpspi_data->fill_len; i++) {
138 		LPSPI_WriteData(base, 0);
139 	}
140 
141 	LOG_DBG("Filled TX fifo with %d NOPs", lpspi_data->fill_len);
142 }
143 
lpspi_next_tx_fill(const struct device * dev)144 static void lpspi_next_tx_fill(const struct device *dev)
145 {
146 	const struct spi_mcux_config *config = dev->config;
147 	struct spi_mcux_data *data = dev->data;
148 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
149 	struct spi_context *ctx = &data->ctx;
150 	size_t max_chunk;
151 
152 	/* Convert bytes to words for this xfer */
153 	max_chunk = DIV_ROUND_UP(ctx->tx_len, lpspi_data->word_size_bytes);
154 	max_chunk = MIN(max_chunk, config->tx_fifo_size);
155 	lpspi_data->fill_len = max_chunk;
156 
157 	if (spi_context_tx_buf_on(ctx)) {
158 		lpspi_fill_tx_fifo(dev);
159 	} else {
160 		lpspi_fill_tx_fifo_nop(dev);
161 	}
162 }
163 
lpspi_handle_tx_irq(const struct device * dev)164 static inline void lpspi_handle_tx_irq(const struct device *dev)
165 {
166 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
167 	struct spi_mcux_data *data = dev->data;
168 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
169 	struct spi_context *ctx = &data->ctx;
170 
171 	spi_context_update_tx(ctx, lpspi_data->word_size_bytes, lpspi_data->fill_len);
172 
173 	LPSPI_ClearStatusFlags(base, kLPSPI_TxDataRequestFlag);
174 
175 	if (!spi_context_tx_on(ctx)) {
176 		LPSPI_DisableInterrupts(base, (uint32_t)kLPSPI_TxInterruptEnable);
177 		return;
178 	}
179 
180 	lpspi_next_tx_fill(data->dev);
181 }
182 
lpspi_isr(const struct device * dev)183 static void lpspi_isr(const struct device *dev)
184 {
185 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
186 	const struct spi_mcux_config *config = dev->config;
187 	uint32_t status_flags = LPSPI_GetStatusFlags(base);
188 	struct spi_mcux_data *data = dev->data;
189 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
190 	struct spi_context *ctx = &data->ctx;
191 
192 	if (status_flags & kLPSPI_RxDataReadyFlag) {
193 		lpspi_handle_rx_irq(dev);
194 	}
195 
196 	if (status_flags & kLPSPI_TxDataRequestFlag) {
197 		lpspi_handle_tx_irq(dev);
198 	}
199 
200 	if (spi_context_tx_on(ctx)) {
201 		return;
202 	}
203 
204 	if (spi_context_rx_len_left(ctx) == 1) {
205 		base->TCR &= ~LPSPI_TCR_CONT_MASK;
206 	} else if (spi_context_rx_on(ctx)) {
207 		size_t rx_fifo_len = rx_fifo_cur_len(base);
208 		size_t expected_rx_left = rx_fifo_len < ctx->rx_len ? ctx->rx_len - rx_fifo_len : 0;
209 		size_t max_fill = MIN(expected_rx_left, config->rx_fifo_size);
210 		size_t tx_current_fifo_len = tx_fifo_cur_len(base);
211 
212 		lpspi_data->fill_len = tx_current_fifo_len < ctx->rx_len ?
213 					max_fill - tx_current_fifo_len : 0;
214 
215 		lpspi_fill_tx_fifo_nop(dev);
216 	} else {
217 		spi_context_complete(ctx, dev, 0);
218 		NVIC_ClearPendingIRQ(config->irqn);
219 		base->TCR &= ~LPSPI_TCR_CONT_MASK;
220 		lpspi_wait_tx_fifo_empty(dev);
221 		spi_context_cs_control(ctx, false);
222 		spi_context_release(&data->ctx, 0);
223 	}
224 }
225 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)226 static int transceive(const struct device *dev, const struct spi_config *spi_cfg,
227 		      const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
228 		      bool asynchronous, spi_callback_t cb, void *userdata)
229 {
230 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
231 	struct spi_mcux_data *data = dev->data;
232 	struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
233 	struct spi_context *ctx = &data->ctx;
234 	int ret = 0;
235 
236 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
237 
238 	lpspi_data->word_size_bytes = SPI_WORD_SIZE_GET(spi_cfg->operation) / BITS_PER_BYTE;
239 	if (lpspi_data->word_size_bytes > 4) {
240 		LOG_ERR("Maximum 4 byte word size");
241 		ret = -EINVAL;
242 		goto error;
243 	}
244 
245 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, lpspi_data->word_size_bytes);
246 
247 	ret = spi_mcux_configure(dev, spi_cfg);
248 	if (ret) {
249 		goto error;
250 	}
251 
252 	LPSPI_FlushFifo(base, true, true);
253 	LPSPI_ClearStatusFlags(base, (uint32_t)kLPSPI_AllStatusFlag);
254 	LPSPI_DisableInterrupts(base, (uint32_t)kLPSPI_AllInterruptEnable);
255 
256 	LOG_DBG("Starting LPSPI transfer");
257 	spi_context_cs_control(ctx, true);
258 
259 	LPSPI_SetFifoWatermarks(base, 0, 0);
260 	LPSPI_Enable(base, true);
261 
262 	/* keep the chip select asserted until the end of the zephyr xfer */
263 	base->TCR |= LPSPI_TCR_CONT_MASK;
264 	/* tcr is written to tx fifo */
265 	lpspi_wait_tx_fifo_empty(dev);
266 
267 	/* start the transfer sequence which are handled by irqs */
268 	lpspi_next_tx_fill(dev);
269 
270 	LPSPI_EnableInterrupts(base, (uint32_t)kLPSPI_TxInterruptEnable |
271 				     (uint32_t)kLPSPI_RxInterruptEnable);
272 
273 	return spi_context_wait_for_completion(ctx);
274 
275 error:
276 	spi_context_release(ctx, ret);
277 	return ret;
278 }
279 
spi_mcux_transceive_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)280 static int spi_mcux_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg,
281 				    const struct spi_buf_set *tx_bufs,
282 				    const struct spi_buf_set *rx_bufs)
283 {
284 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
285 }
286 
287 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)288 static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg,
289 				     const struct spi_buf_set *tx_bufs,
290 				     const struct spi_buf_set *rx_bufs, spi_callback_t cb,
291 				     void *userdata)
292 {
293 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
294 }
295 #endif /* CONFIG_SPI_ASYNC */
296 
297 static DEVICE_API(spi, spi_mcux_driver_api) = {
298 	.transceive = spi_mcux_transceive_sync,
299 #ifdef CONFIG_SPI_ASYNC
300 	.transceive_async = spi_mcux_transceive_async,
301 #endif
302 #ifdef CONFIG_SPI_RTIO
303 	.iodev_submit = spi_rtio_iodev_default_submit,
304 #endif
305 	.release = spi_mcux_release,
306 };
307 
spi_mcux_init(const struct device * dev)308 static int spi_mcux_init(const struct device *dev)
309 {
310 	struct spi_mcux_data *data = dev->data;
311 	int err = 0;
312 
313 	err = spi_nxp_init_common(dev);
314 	if (err) {
315 		return err;
316 	}
317 
318 	spi_context_unlock_unconditionally(&data->ctx);
319 
320 	return 0;
321 }
322 
323 #define LPSPI_INIT(n)                                                                              \
324 	SPI_NXP_LPSPI_COMMON_INIT(n)                                                               \
325 	SPI_MCUX_LPSPI_CONFIG_INIT(n)                                                              \
326                                                                                                    \
327 	static struct lpspi_driver_data lpspi_##n##_driver_data;                                   \
328                                                                                                    \
329 	static struct spi_mcux_data spi_mcux_data_##n = {                                          \
330 		SPI_NXP_LPSPI_COMMON_DATA_INIT(n)                                                  \
331 		.driver_data = &lpspi_##n##_driver_data,                                           \
332 	};                                                                                         \
333                                                                                                    \
334 	SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, &spi_mcux_data_##n,                      \
335 				  &spi_mcux_config_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,     \
336 				  &spi_mcux_driver_api);
337 
338 #define SPI_MCUX_LPSPI_INIT_IF_DMA(n) IF_DISABLED(SPI_NXP_LPSPI_HAS_DMAS(n), (LPSPI_INIT(n)))
339 
340 #define SPI_MCUX_LPSPI_INIT(n)                                                                     \
341 	COND_CODE_1(CONFIG_SPI_MCUX_LPSPI_DMA,				   \
342 						(SPI_MCUX_LPSPI_INIT_IF_DMA(n)), (LPSPI_INIT(n)))
343 
344 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT)
345