1 /*
2 * Copyright (c) 2016, Freescale Semiconductor, Inc.
3 * Copyright (c) 2017,2019, NXP
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT nxp_lpc_spi
9
10 #include <errno.h>
11 #include <zephyr/drivers/spi.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <fsl_spi.h>
14 #include <zephyr/logging/log.h>
15 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
16 #include <zephyr/drivers/dma.h>
17 #endif
18 #include <zephyr/drivers/pinctrl.h>
19 #include <zephyr/sys_clock.h>
20 #include <zephyr/irq.h>
21
22 LOG_MODULE_REGISTER(spi_mcux_flexcomm, CONFIG_SPI_LOG_LEVEL);
23
24 #include "spi_context.h"
25
26 #define SPI_CHIP_SELECT_COUNT 4
27 #define SPI_MAX_DATA_WIDTH 16
28
29 struct spi_mcux_config {
30 SPI_Type *base;
31 const struct device *clock_dev;
32 clock_control_subsys_t clock_subsys;
33 void (*irq_config_func)(const struct device *dev);
34 uint32_t pre_delay;
35 uint32_t post_delay;
36 uint32_t frame_delay;
37 uint32_t transfer_delay;
38 uint32_t def_char;
39 const struct pinctrl_dev_config *pincfg;
40 };
41
42 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
43 #define SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG 0x01
44 #define SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG 0x02
45 #define SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG 0x04
46 #define SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG \
47 (SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG | SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG)
48
49 struct stream {
50 const struct device *dma_dev;
51 uint32_t channel; /* stores the channel for dma */
52 struct dma_config dma_cfg;
53 struct dma_block_config dma_blk_cfg[2];
54 };
55 #endif
56
57 struct spi_mcux_data {
58 const struct device *dev;
59 spi_master_handle_t handle;
60 struct spi_context ctx;
61 size_t transfer_len;
62 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
63 volatile uint32_t status_flags;
64 struct stream dma_rx;
65 struct stream dma_tx;
66 /* dummy value used for transferring NOP when tx buf is null */
67 uint32_t dummy_tx_buffer;
68 /* Used to send the last word */
69 uint32_t last_word;
70 #endif
71 };
72
spi_mcux_transfer_next_packet(const struct device * dev)73 static void spi_mcux_transfer_next_packet(const struct device *dev)
74 {
75 const struct spi_mcux_config *config = dev->config;
76 struct spi_mcux_data *data = dev->data;
77 SPI_Type *base = config->base;
78 struct spi_context *ctx = &data->ctx;
79 spi_transfer_t transfer;
80 status_t status;
81
82 if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
83 /* nothing left to rx or tx, we're done! */
84 spi_context_cs_control(&data->ctx, false);
85 spi_context_complete(&data->ctx, dev, 0);
86 return;
87 }
88
89 transfer.configFlags = 0;
90 if (ctx->tx_len == 0) {
91 /* rx only, nothing to tx */
92 transfer.txData = NULL;
93 transfer.rxData = ctx->rx_buf;
94 transfer.dataSize = ctx->rx_len;
95 } else if (ctx->rx_len == 0) {
96 /* tx only, nothing to rx */
97 transfer.txData = (uint8_t *) ctx->tx_buf;
98 transfer.rxData = NULL;
99 transfer.dataSize = ctx->tx_len;
100 } else if (ctx->tx_len == ctx->rx_len) {
101 /* rx and tx are the same length */
102 transfer.txData = (uint8_t *) ctx->tx_buf;
103 transfer.rxData = ctx->rx_buf;
104 transfer.dataSize = ctx->tx_len;
105 } else if (ctx->tx_len > ctx->rx_len) {
106 /* Break up the tx into multiple transfers so we don't have to
107 * rx into a longer intermediate buffer. Leave chip select
108 * active between transfers.
109 */
110 transfer.txData = (uint8_t *) ctx->tx_buf;
111 transfer.rxData = ctx->rx_buf;
112 transfer.dataSize = ctx->rx_len;
113 } else {
114 /* Break up the rx into multiple transfers so we don't have to
115 * tx from a longer intermediate buffer. Leave chip select
116 * active between transfers.
117 */
118 transfer.txData = (uint8_t *) ctx->tx_buf;
119 transfer.rxData = ctx->rx_buf;
120 transfer.dataSize = ctx->tx_len;
121 }
122
123 if (ctx->tx_count <= 1 && ctx->rx_count <= 1) {
124 transfer.configFlags = kSPI_FrameAssert;
125 }
126
127 data->transfer_len = transfer.dataSize;
128
129 status = SPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
130 if (status != kStatus_Success) {
131 LOG_ERR("Transfer could not start");
132 }
133 }
134
spi_mcux_isr(const struct device * dev)135 static void spi_mcux_isr(const struct device *dev)
136 {
137 const struct spi_mcux_config *config = dev->config;
138 struct spi_mcux_data *data = dev->data;
139 SPI_Type *base = config->base;
140
141 SPI_MasterTransferHandleIRQ(base, &data->handle);
142 }
143
spi_mcux_transfer_callback(SPI_Type * base,spi_master_handle_t * handle,status_t status,void * userData)144 static void spi_mcux_transfer_callback(SPI_Type *base,
145 spi_master_handle_t *handle, status_t status, void *userData)
146 {
147 struct spi_mcux_data *data = userData;
148
149 spi_context_update_tx(&data->ctx, 1, data->transfer_len);
150 spi_context_update_rx(&data->ctx, 1, data->transfer_len);
151
152 spi_mcux_transfer_next_packet(data->dev);
153 }
154
spi_clock_cycles(uint32_t delay_ns,uint32_t sck_frequency_hz)155 static uint8_t spi_clock_cycles(uint32_t delay_ns, uint32_t sck_frequency_hz)
156 {
157 /* Convert delay_ns to an integer number of clock cycles of frequency
158 * sck_frequency_hz. The maximum delay is 15 clock cycles.
159 */
160 uint8_t delay_cycles = (uint64_t)delay_ns * sck_frequency_hz / NSEC_PER_SEC;
161
162 delay_cycles = MIN(delay_cycles, 15);
163
164 return delay_cycles;
165 }
166
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)167 static int spi_mcux_configure(const struct device *dev,
168 const struct spi_config *spi_cfg)
169 {
170 const struct spi_mcux_config *config = dev->config;
171 struct spi_mcux_data *data = dev->data;
172 SPI_Type *base = config->base;
173 uint32_t clock_freq;
174 uint32_t word_size;
175
176 if (spi_context_configured(&data->ctx, spi_cfg)) {
177 /* This configuration is already in use */
178 return 0;
179 }
180
181 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
182 LOG_ERR("Half-duplex not supported");
183 return -ENOTSUP;
184 }
185
186 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
187 if (word_size > SPI_MAX_DATA_WIDTH) {
188 LOG_ERR("Word size %d is greater than %d",
189 word_size, SPI_MAX_DATA_WIDTH);
190 return -EINVAL;
191 }
192
193 /*
194 * Do master or slave initialisation, depending on the
195 * mode requested.
196 */
197 if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) {
198 spi_master_config_t master_config;
199
200 SPI_MasterGetDefaultConfig(&master_config);
201
202 if (!device_is_ready(config->clock_dev)) {
203 LOG_ERR("clock control device not ready");
204 return -ENODEV;
205 }
206
207 /* Get the clock frequency */
208 if (clock_control_get_rate(config->clock_dev,
209 config->clock_subsys, &clock_freq)) {
210 return -EINVAL;
211 }
212
213 if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) {
214 LOG_ERR("Slave %d is greater than %d",
215 spi_cfg->slave, SPI_CHIP_SELECT_COUNT);
216 return -EINVAL;
217 }
218
219 master_config.sselNum = spi_cfg->slave;
220 master_config.sselPol = kSPI_SpolActiveAllLow;
221 master_config.dataWidth = word_size - 1;
222
223 master_config.polarity =
224 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
225 ? kSPI_ClockPolarityActiveLow
226 : kSPI_ClockPolarityActiveHigh;
227
228 master_config.phase =
229 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
230 ? kSPI_ClockPhaseSecondEdge
231 : kSPI_ClockPhaseFirstEdge;
232
233 master_config.direction =
234 (spi_cfg->operation & SPI_TRANSFER_LSB)
235 ? kSPI_LsbFirst
236 : kSPI_MsbFirst;
237
238 master_config.baudRate_Bps = spi_cfg->frequency;
239
240 spi_delay_config_t *delayConfig = &master_config.delayConfig;
241
242 delayConfig->preDelay = spi_clock_cycles(config->pre_delay,
243 spi_cfg->frequency);
244 delayConfig->postDelay = spi_clock_cycles(config->post_delay,
245 spi_cfg->frequency);
246 delayConfig->frameDelay = spi_clock_cycles(config->frame_delay,
247 spi_cfg->frequency);
248 delayConfig->transferDelay = spi_clock_cycles(config->transfer_delay,
249 spi_cfg->frequency);
250
251 SPI_MasterInit(base, &master_config, clock_freq);
252
253 SPI_SetDummyData(base, (uint8_t)config->def_char);
254
255 SPI_MasterTransferCreateHandle(base, &data->handle,
256 spi_mcux_transfer_callback, data);
257
258 data->ctx.config = spi_cfg;
259 } else {
260 spi_slave_config_t slave_config;
261
262 SPI_SlaveGetDefaultConfig(&slave_config);
263
264 slave_config.polarity =
265 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
266 ? kSPI_ClockPolarityActiveLow
267 : kSPI_ClockPolarityActiveHigh;
268
269 slave_config.phase =
270 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
271 ? kSPI_ClockPhaseSecondEdge
272 : kSPI_ClockPhaseFirstEdge;
273
274 slave_config.direction =
275 (spi_cfg->operation & SPI_TRANSFER_LSB)
276 ? kSPI_LsbFirst
277 : kSPI_MsbFirst;
278
279 /* SS pin active low */
280 slave_config.sselPol = kSPI_SpolActiveAllLow;
281 slave_config.dataWidth = word_size - 1;
282
283 SPI_SlaveInit(base, &slave_config);
284
285 SPI_SetDummyData(base, (uint8_t)config->def_char);
286
287 SPI_SlaveTransferCreateHandle(base, &data->handle,
288 spi_mcux_transfer_callback, data);
289
290 data->ctx.config = spi_cfg;
291 }
292
293 return 0;
294 }
295
296 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
297 /* Dummy buffer used as a sink when rc buf is null */
298 uint32_t dummy_rx_buffer;
299
300 /* This function is executed in the interrupt context */
spi_mcux_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)301 static void spi_mcux_dma_callback(const struct device *dev, void *arg,
302 uint32_t channel, int status)
303 {
304 /* arg directly holds the spi device */
305 const struct device *spi_dev = arg;
306 struct spi_mcux_data *data = spi_dev->data;
307
308 if (status < 0) {
309 LOG_ERR("DMA callback error with channel %d.", channel);
310 data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
311 } else {
312 /* identify the origin of this callback */
313 if (channel == data->dma_tx.channel) {
314 /* this part of the transfer ends */
315 data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG;
316 } else if (channel == data->dma_rx.channel) {
317 /* this part of the transfer ends */
318 data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG;
319 } else {
320 LOG_ERR("DMA callback channel %d is not valid.",
321 channel);
322 data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
323 }
324 }
325
326 spi_context_complete(&data->ctx, spi_dev, 0);
327 }
328
329
spi_mcux_prepare_txlastword(uint32_t * txLastWord,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len)330 static void spi_mcux_prepare_txlastword(uint32_t *txLastWord,
331 const uint8_t *buf, const struct spi_config *spi_cfg,
332 size_t len)
333 {
334 uint32_t word_size;
335
336 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
337
338 if (word_size > 8) {
339 *txLastWord = (((uint32_t)buf[len - 1U] << 8U) |
340 (buf[len - 2U]));
341 } else {
342 *txLastWord = buf[len - 1U];
343 }
344
345 *txLastWord |= (uint32_t)SPI_FIFOWR_EOT_MASK;
346
347 *txLastWord |= ((uint32_t)SPI_DEASSERT_ALL &
348 (~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
349
350 /* set width of data - range asserted at entry */
351 *txLastWord |= SPI_FIFOWR_LEN(word_size - 1);
352 }
353
spi_mcux_prepare_txdummy(uint32_t * dummy,bool last_packet,const struct spi_config * spi_cfg)354 static void spi_mcux_prepare_txdummy(uint32_t *dummy, bool last_packet,
355 const struct spi_config *spi_cfg)
356 {
357 uint32_t word_size;
358
359 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
360
361 if (last_packet) {
362 *dummy |= (uint32_t)SPI_FIFOWR_EOT_MASK;
363 }
364
365 *dummy |= ((uint32_t)SPI_DEASSERT_ALL &
366 (~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
367
368 /* set width of data - range asserted at entry */
369 *dummy |= SPI_FIFOWR_LEN(word_size - 1);
370 }
371
spi_mcux_dma_tx_load(const struct device * dev,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len,bool last_packet)372 static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf,
373 const struct spi_config *spi_cfg, size_t len, bool last_packet)
374 {
375 const struct spi_mcux_config *cfg = dev->config;
376 struct spi_mcux_data *data = dev->data;
377 struct dma_block_config *blk_cfg;
378 int ret;
379 SPI_Type *base = cfg->base;
380 uint32_t word_size;
381
382 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
383
384 /* remember active TX DMA channel (used in callback) */
385 struct stream *stream = &data->dma_tx;
386
387 blk_cfg = &stream->dma_blk_cfg[0];
388
389 /* prepare the block for this TX DMA channel */
390 memset(blk_cfg, 0, sizeof(struct dma_block_config));
391
392 /* tx direction has memory as source and periph as dest. */
393 if (buf == NULL) {
394 data->dummy_tx_buffer = 0;
395 data->last_word = 0;
396 spi_mcux_prepare_txdummy(&data->dummy_tx_buffer, last_packet, spi_cfg);
397
398 if (last_packet &&
399 ((word_size > 8) ? (len > 2U) : (len > 1U))) {
400 spi_mcux_prepare_txdummy(&data->last_word, last_packet, spi_cfg);
401 blk_cfg->source_gather_en = 1;
402 blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
403 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
404 blk_cfg->block_size = (word_size > 8) ?
405 (len - 2U) : (len - 1U);
406 blk_cfg->next_block = &stream->dma_blk_cfg[1];
407 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
408 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
409
410 blk_cfg = &stream->dma_blk_cfg[1];
411
412 /* prepare the block for this TX DMA channel */
413 memset(blk_cfg, 0, sizeof(struct dma_block_config));
414 blk_cfg->source_address = (uint32_t)&data->last_word;
415 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
416 blk_cfg->block_size = sizeof(uint32_t);
417 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
418 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
419 } else {
420 blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
421 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
422 blk_cfg->block_size = len;
423 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
424 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
425 }
426 } else {
427 if (last_packet) {
428 spi_mcux_prepare_txlastword(&data->last_word, buf, spi_cfg, len);
429 }
430 /* If last packet and data transfer frame is bigger then 1,
431 * use dma descriptor to send the last data.
432 */
433 if (last_packet &&
434 ((word_size > 8) ? (len > 2U) : (len > 1U))) {
435 blk_cfg->source_gather_en = 1;
436 blk_cfg->source_address = (uint32_t)buf;
437 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
438 blk_cfg->block_size = (word_size > 8) ?
439 (len - 2U) : (len - 1U);
440 blk_cfg->next_block = &stream->dma_blk_cfg[1];
441
442 blk_cfg = &stream->dma_blk_cfg[1];
443
444 /* prepare the block for this TX DMA channel */
445 memset(blk_cfg, 0, sizeof(struct dma_block_config));
446 blk_cfg->source_address = (uint32_t)&data->last_word;
447 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
448 blk_cfg->block_size = sizeof(uint32_t);
449 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
450 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
451 } else {
452 blk_cfg->source_address = (uint32_t)buf;
453 blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
454 blk_cfg->block_size = len;
455 }
456 }
457
458 /* Enables the DMA request from SPI txFIFO */
459 base->FIFOCFG |= SPI_FIFOCFG_DMATX_MASK;
460
461 /* direction is given by the DT */
462 stream->dma_cfg.head_block = &stream->dma_blk_cfg[0];
463 /* give the client dev as arg, as the callback comes from the dma */
464 stream->dma_cfg.user_data = (struct device *)dev;
465 /* pass our client origin to the dma: data->dma_tx.dma_channel */
466 ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
467 &stream->dma_cfg);
468 /* the channel is the actual stream from 0 */
469 if (ret != 0) {
470 return ret;
471 }
472
473 uint32_t tmpData = 0U;
474
475 spi_mcux_prepare_txdummy(&tmpData, last_packet, spi_cfg);
476
477 /* Setup the control info.
478 * Halfword writes to just the control bits (offset 0xE22) doesn't push
479 * anything into the FIFO. And the data access type of control bits must
480 * be uint16_t, byte writes or halfword writes to FIFOWR will push the
481 * data and the current control bits into the FIFO.
482 */
483 if ((last_packet) &&
484 ((word_size > 8) ? (len == 2U) : (len == 1U))) {
485 *((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
486 } else {
487 /* Clear the SPI_FIFOWR_EOT_MASK bit when data is not the last */
488 tmpData &= (~(uint32_t)SPI_FIFOWR_EOT_MASK);
489 *((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
490 }
491
492 /* gives the request ID */
493 return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
494 }
495
spi_mcux_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)496 static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf,
497 size_t len)
498 {
499 const struct spi_mcux_config *cfg = dev->config;
500 struct spi_mcux_data *data = dev->data;
501 struct dma_block_config *blk_cfg;
502 int ret;
503 SPI_Type *base = cfg->base;
504
505 /* retrieve active RX DMA channel (used in callback) */
506 struct stream *stream = &data->dma_rx;
507
508 blk_cfg = &stream->dma_blk_cfg[0];
509
510 /* prepare the block for this RX DMA channel */
511 memset(blk_cfg, 0, sizeof(struct dma_block_config));
512 blk_cfg->block_size = len;
513
514 /* rx direction has periph as source and mem as dest. */
515 if (buf == NULL) {
516 /* if rx buff is null, then write data to dummy address. */
517 blk_cfg->dest_address = (uint32_t)&dummy_rx_buffer;
518 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
519 } else {
520 blk_cfg->dest_address = (uint32_t)buf;
521 }
522
523 blk_cfg->source_address = (uint32_t)&base->FIFORD;
524
525 /* direction is given by the DT */
526 stream->dma_cfg.head_block = blk_cfg;
527 stream->dma_cfg.user_data = (struct device *)dev;
528
529 /* Enables the DMA request from SPI rxFIFO */
530 base->FIFOCFG |= SPI_FIFOCFG_DMARX_MASK;
531
532 /* pass our client origin to the dma: data->dma_rx.channel */
533 ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
534 &stream->dma_cfg);
535 /* the channel is the actual stream from 0 */
536 if (ret != 0) {
537 return ret;
538 }
539
540 /* gives the request ID */
541 return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
542 }
543
spi_mcux_dma_move_buffers(const struct device * dev,size_t len,const struct spi_config * spi_cfg,bool last_packet)544 static int spi_mcux_dma_move_buffers(const struct device *dev, size_t len,
545 const struct spi_config *spi_cfg, bool last_packet)
546 {
547 struct spi_mcux_data *data = dev->data;
548 int ret;
549
550 ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, len);
551
552 if (ret != 0) {
553 return ret;
554 }
555
556 ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, spi_cfg,
557 len, last_packet);
558
559 return ret;
560 }
561
wait_dma_rx_tx_done(const struct device * dev)562 static int wait_dma_rx_tx_done(const struct device *dev)
563 {
564 struct spi_mcux_data *data = dev->data;
565 int ret = -1;
566
567 while (1) {
568 ret = spi_context_wait_for_completion(&data->ctx);
569 if (data->status_flags & SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG) {
570 return -EIO;
571 }
572
573 if ((data->status_flags & SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) ==
574 SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) {
575 return 0;
576 }
577 }
578 }
579
transceive_dma(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)580 static int transceive_dma(const struct device *dev,
581 const struct spi_config *spi_cfg,
582 const struct spi_buf_set *tx_bufs,
583 const struct spi_buf_set *rx_bufs,
584 bool asynchronous,
585 spi_callback_t cb,
586 void *userdata)
587 {
588 const struct spi_mcux_config *config = dev->config;
589 struct spi_mcux_data *data = dev->data;
590 SPI_Type *base = config->base;
591 int ret;
592 uint32_t word_size;
593 uint16_t data_size;
594
595 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
596
597 ret = spi_mcux_configure(dev, spi_cfg);
598 if (ret) {
599 goto out;
600 }
601
602 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
603
604 spi_context_cs_control(&data->ctx, true);
605
606 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
607
608 data_size = (word_size > 8) ? (sizeof(uint16_t)) : (sizeof(uint8_t));
609 data->dma_rx.dma_cfg.source_data_size = data_size;
610 data->dma_rx.dma_cfg.dest_data_size = data_size;
611 data->dma_tx.dma_cfg.source_data_size = data_size;
612 data->dma_tx.dma_cfg.dest_data_size = data_size;
613
614 while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
615 size_t dma_len;
616
617 /* last is used to deassert chip select if this
618 * is the last transfer in the set.
619 */
620 bool last = false;
621
622 if (data->ctx.rx_len == 0) {
623 dma_len = data->ctx.tx_len;
624 last = true;
625 } else if (data->ctx.tx_len == 0) {
626 dma_len = data->ctx.rx_len;
627 last = true;
628 } else if (data->ctx.tx_len == data->ctx.rx_len) {
629 dma_len = data->ctx.rx_len;
630 last = true;
631 } else {
632 dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
633 last = false;
634 }
635
636 /* at this point, last just means whether or not
637 * this transfer will completely cover
638 * the current tx/rx buffer in data->ctx
639 * or require additional transfers because the
640 * the two buffers are not the same size.
641 *
642 * if it covers the current ctx tx/rx buffers, then
643 * we'll move to the next pair of buffers (if any)
644 * after the transfer, but if there are
645 * no more buffer pairs, then this is the last
646 * transfer in the set and we need to deassert CS.
647 */
648 if (last) {
649 /* this dma transfer should cover
650 * the entire current data->ctx set
651 * of buffers. if there are more
652 * buffers in the set, then we don't
653 * want to deassert CS.
654 */
655 if ((data->ctx.tx_count > 1) ||
656 (data->ctx.rx_count > 1)) {
657 /* more buffers to transfer so
658 * this isn't last
659 */
660 last = false;
661 }
662 }
663
664 data->status_flags = 0;
665
666 ret = spi_mcux_dma_move_buffers(dev, dma_len, spi_cfg, last);
667 if (ret != 0) {
668 break;
669 }
670
671 ret = wait_dma_rx_tx_done(dev);
672 if (ret != 0) {
673 break;
674 }
675
676 /* wait until TX FIFO is really empty */
677 while (0U == (base->FIFOSTAT & SPI_FIFOSTAT_TXEMPTY_MASK)) {
678 }
679
680 spi_context_update_tx(&data->ctx, 1, dma_len);
681 spi_context_update_rx(&data->ctx, 1, dma_len);
682 }
683
684 base->FIFOCFG &= ~SPI_FIFOCFG_DMATX_MASK;
685 base->FIFOCFG &= ~SPI_FIFOCFG_DMARX_MASK;
686
687 spi_context_cs_control(&data->ctx, false);
688
689 out:
690 spi_context_release(&data->ctx, ret);
691
692 return ret;
693 }
694
695 #endif
696
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)697 static int transceive(const struct device *dev,
698 const struct spi_config *spi_cfg,
699 const struct spi_buf_set *tx_bufs,
700 const struct spi_buf_set *rx_bufs,
701 bool asynchronous,
702 spi_callback_t cb,
703 void *userdata)
704 {
705 struct spi_mcux_data *data = dev->data;
706 int ret;
707
708 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
709
710 ret = spi_mcux_configure(dev, spi_cfg);
711 if (ret) {
712 goto out;
713 }
714
715 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
716
717 spi_context_cs_control(&data->ctx, true);
718
719 spi_mcux_transfer_next_packet(dev);
720
721 ret = spi_context_wait_for_completion(&data->ctx);
722 out:
723 spi_context_release(&data->ctx, ret);
724
725 return ret;
726 }
727
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)728 static int spi_mcux_transceive(const struct device *dev,
729 const struct spi_config *spi_cfg,
730 const struct spi_buf_set *tx_bufs,
731 const struct spi_buf_set *rx_bufs)
732 {
733 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
734 return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
735 #endif
736 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
737 }
738
739 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)740 static int spi_mcux_transceive_async(const struct device *dev,
741 const struct spi_config *spi_cfg,
742 const struct spi_buf_set *tx_bufs,
743 const struct spi_buf_set *rx_bufs,
744 spi_callback_t cb,
745 void *userdata)
746 {
747 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
748 return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
749 #endif
750
751 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
752 }
753 #endif /* CONFIG_SPI_ASYNC */
754
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)755 static int spi_mcux_release(const struct device *dev,
756 const struct spi_config *spi_cfg)
757 {
758 struct spi_mcux_data *data = dev->data;
759
760 spi_context_unlock_unconditionally(&data->ctx);
761
762 return 0;
763 }
764
spi_mcux_init(const struct device * dev)765 static int spi_mcux_init(const struct device *dev)
766 {
767 int err;
768 const struct spi_mcux_config *config = dev->config;
769 struct spi_mcux_data *data = dev->data;
770
771 config->irq_config_func(dev);
772
773 data->dev = dev;
774
775 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
776 if (err) {
777 return err;
778 }
779
780 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
781 if (!device_is_ready(data->dma_tx.dma_dev)) {
782 LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name);
783 return -ENODEV;
784 }
785
786 if (!device_is_ready(data->dma_rx.dma_dev)) {
787 LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name);
788 return -ENODEV;
789 }
790 #endif /* CONFIG_SPI_MCUX_FLEXCOMM_DMA */
791
792
793 err = spi_context_cs_configure_all(&data->ctx);
794 if (err < 0) {
795 return err;
796 }
797
798 spi_context_unlock_unconditionally(&data->ctx);
799
800 return 0;
801 }
802
803 static const struct spi_driver_api spi_mcux_driver_api = {
804 .transceive = spi_mcux_transceive,
805 #ifdef CONFIG_SPI_ASYNC
806 .transceive_async = spi_mcux_transceive_async,
807 #endif
808 .release = spi_mcux_release,
809 };
810
811 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id) \
812 static void spi_mcux_config_func_##id(const struct device *dev)
813 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id) \
814 .irq_config_func = spi_mcux_config_func_##id,
815 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id) \
816 static void spi_mcux_config_func_##id(const struct device *dev) \
817 { \
818 IRQ_CONNECT(DT_INST_IRQN(id), \
819 DT_INST_IRQ(id, priority), \
820 spi_mcux_isr, DEVICE_DT_INST_GET(id), \
821 0); \
822 irq_enable(DT_INST_IRQN(id)); \
823 }
824
825 #ifndef CONFIG_SPI_MCUX_FLEXCOMM_DMA
826 #define SPI_DMA_CHANNELS(id)
827 #else
828 #define SPI_DMA_CHANNELS(id) \
829 .dma_tx = { \
830 .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
831 .channel = \
832 DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \
833 .dma_cfg = { \
834 .channel_direction = MEMORY_TO_PERIPHERAL, \
835 .dma_callback = spi_mcux_dma_callback, \
836 .complete_callback_en = true, \
837 .block_count = 2, \
838 } \
839 }, \
840 .dma_rx = { \
841 .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
842 .channel = \
843 DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \
844 .dma_cfg = { \
845 .channel_direction = PERIPHERAL_TO_MEMORY, \
846 .dma_callback = spi_mcux_dma_callback, \
847 .block_count = 1, \
848 } \
849 }
850
851 #endif
852
853 #define SPI_MCUX_FLEXCOMM_DEVICE(id) \
854 SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id); \
855 PINCTRL_DT_INST_DEFINE(id); \
856 static const struct spi_mcux_config spi_mcux_config_##id = { \
857 .base = \
858 (SPI_Type *)DT_INST_REG_ADDR(id), \
859 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \
860 .clock_subsys = \
861 (clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\
862 SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id) \
863 .pre_delay = DT_INST_PROP_OR(id, pre_delay, 0), \
864 .post_delay = DT_INST_PROP_OR(id, post_delay, 0), \
865 .frame_delay = DT_INST_PROP_OR(id, frame_delay, 0), \
866 .transfer_delay = DT_INST_PROP_OR(id, transfer_delay, 0), \
867 .def_char = DT_INST_PROP_OR(id, def_char, 0), \
868 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \
869 }; \
870 static struct spi_mcux_data spi_mcux_data_##id = { \
871 SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx), \
872 SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx), \
873 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx) \
874 SPI_DMA_CHANNELS(id) \
875 }; \
876 DEVICE_DT_INST_DEFINE(id, \
877 &spi_mcux_init, \
878 NULL, \
879 &spi_mcux_data_##id, \
880 &spi_mcux_config_##id, \
881 POST_KERNEL, \
882 CONFIG_SPI_INIT_PRIORITY, \
883 &spi_mcux_driver_api); \
884 \
885 SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id)
886
887 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_FLEXCOMM_DEVICE)
888