1 /*
2 * Copyright (c) 2017, 2022-2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_lpc_usart
8
9 /** @file
10 * @brief UART driver for MCUX Flexcomm USART.
11 */
12
13 #include <errno.h>
14 #include <zephyr/device.h>
15 #include <zephyr/drivers/uart.h>
16 #include <zephyr/drivers/clock_control.h>
17 #include <zephyr/irq.h>
18 #include <fsl_usart.h>
19 #include <soc.h>
20 #include <fsl_device_registers.h>
21 #include <zephyr/drivers/pinctrl.h>
22 #ifdef CONFIG_UART_ASYNC_API
23 #include <zephyr/drivers/dma.h>
24 #include <fsl_inputmux.h>
25 #endif
26
27 #ifdef CONFIG_UART_ASYNC_API
28 struct mcux_flexcomm_uart_dma_config {
29 const struct device *dev;
30 DMA_Type *base;
31 uint8_t channel;
32 struct dma_config cfg;
33 };
34 #endif
35
36 struct mcux_flexcomm_config {
37 USART_Type *base;
38 const struct device *clock_dev;
39 clock_control_subsys_t clock_subsys;
40 uint32_t baud_rate;
41 uint8_t parity;
42 #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT
43 void (*irq_config_func)(const struct device *dev);
44 #endif
45 const struct pinctrl_dev_config *pincfg;
46 #ifdef CONFIG_UART_ASYNC_API
47 struct mcux_flexcomm_uart_dma_config tx_dma;
48 struct mcux_flexcomm_uart_dma_config rx_dma;
49 void (*rx_timeout_func)(struct k_work *work);
50 void (*tx_timeout_func)(struct k_work *work);
51 #endif
52 };
53
54 #if CONFIG_UART_ASYNC_API
55 struct mcux_flexcomm_uart_tx_data {
56 const uint8_t *xfer_buf;
57 size_t xfer_len;
58 struct dma_block_config active_block;
59 struct k_work_delayable timeout_work;
60 };
61
62 struct mcux_flexcomm_uart_rx_data {
63 uint8_t *xfer_buf;
64 size_t xfer_len;
65 struct dma_block_config active_block;
66 uint8_t *next_xfer_buf;
67 size_t next_xfer_len;
68 struct k_work_delayable timeout_work;
69 int32_t timeout;
70 size_t count;
71 size_t offset;
72 };
73 #endif
74
75 struct mcux_flexcomm_data {
76 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
77 uart_irq_callback_user_data_t irq_callback;
78 void *irq_cb_data;
79 #endif
80 #ifdef CONFIG_UART_ASYNC_API
81 uart_callback_t async_callback;
82 void *async_cb_data;
83 struct mcux_flexcomm_uart_tx_data tx_data;
84 struct mcux_flexcomm_uart_rx_data rx_data;
85 #endif
86 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
87 struct uart_config uart_config;
88 #endif
89 };
90
mcux_flexcomm_poll_in(const struct device * dev,unsigned char * c)91 static int mcux_flexcomm_poll_in(const struct device *dev, unsigned char *c)
92 {
93 const struct mcux_flexcomm_config *config = dev->config;
94 uint32_t flags = USART_GetStatusFlags(config->base);
95 int ret = -1;
96
97 if (flags & kUSART_RxFifoNotEmptyFlag) {
98 *c = USART_ReadByte(config->base);
99 ret = 0;
100 }
101
102 return ret;
103 }
104
mcux_flexcomm_poll_out(const struct device * dev,unsigned char c)105 static void mcux_flexcomm_poll_out(const struct device *dev,
106 unsigned char c)
107 {
108 const struct mcux_flexcomm_config *config = dev->config;
109
110 /* Wait until space is available in TX FIFO */
111 while (!(USART_GetStatusFlags(config->base) & kUSART_TxFifoEmptyFlag)) {
112 }
113
114 USART_WriteByte(config->base, c);
115 }
116
mcux_flexcomm_err_check(const struct device * dev)117 static int mcux_flexcomm_err_check(const struct device *dev)
118 {
119 const struct mcux_flexcomm_config *config = dev->config;
120 uint32_t flags = USART_GetStatusFlags(config->base);
121 int err = 0;
122
123 if (flags & kStatus_USART_RxRingBufferOverrun) {
124 err |= UART_ERROR_OVERRUN;
125 }
126
127 if (flags & kStatus_USART_ParityError) {
128 err |= UART_ERROR_PARITY;
129 }
130
131 if (flags & kStatus_USART_FramingError) {
132 err |= UART_ERROR_FRAMING;
133 }
134
135 USART_ClearStatusFlags(config->base,
136 kStatus_USART_RxRingBufferOverrun |
137 kStatus_USART_ParityError |
138 kStatus_USART_FramingError);
139
140 return err;
141 }
142
143 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_flexcomm_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)144 static int mcux_flexcomm_fifo_fill(const struct device *dev,
145 const uint8_t *tx_data,
146 int len)
147 {
148 const struct mcux_flexcomm_config *config = dev->config;
149 uint8_t num_tx = 0U;
150
151 while ((len - num_tx > 0) &&
152 (USART_GetStatusFlags(config->base)
153 & kUSART_TxFifoNotFullFlag)) {
154
155 USART_WriteByte(config->base, tx_data[num_tx++]);
156 }
157
158 return num_tx;
159 }
160
mcux_flexcomm_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)161 static int mcux_flexcomm_fifo_read(const struct device *dev, uint8_t *rx_data,
162 const int len)
163 {
164 const struct mcux_flexcomm_config *config = dev->config;
165 uint8_t num_rx = 0U;
166
167 while ((len - num_rx > 0) &&
168 (USART_GetStatusFlags(config->base)
169 & kUSART_RxFifoNotEmptyFlag)) {
170
171 rx_data[num_rx++] = USART_ReadByte(config->base);
172 }
173
174 return num_rx;
175 }
176
mcux_flexcomm_irq_tx_enable(const struct device * dev)177 static void mcux_flexcomm_irq_tx_enable(const struct device *dev)
178 {
179 const struct mcux_flexcomm_config *config = dev->config;
180 uint32_t mask = kUSART_TxLevelInterruptEnable;
181
182 USART_EnableInterrupts(config->base, mask);
183 }
184
mcux_flexcomm_irq_tx_disable(const struct device * dev)185 static void mcux_flexcomm_irq_tx_disable(const struct device *dev)
186 {
187 const struct mcux_flexcomm_config *config = dev->config;
188 uint32_t mask = kUSART_TxLevelInterruptEnable;
189
190 USART_DisableInterrupts(config->base, mask);
191 }
192
mcux_flexcomm_irq_tx_complete(const struct device * dev)193 static int mcux_flexcomm_irq_tx_complete(const struct device *dev)
194 {
195 const struct mcux_flexcomm_config *config = dev->config;
196
197 return (config->base->STAT & USART_STAT_TXIDLE_MASK) != 0;
198 }
199
mcux_flexcomm_irq_tx_ready(const struct device * dev)200 static int mcux_flexcomm_irq_tx_ready(const struct device *dev)
201 {
202 const struct mcux_flexcomm_config *config = dev->config;
203 uint32_t mask = kUSART_TxLevelInterruptEnable;
204 uint32_t flags = USART_GetStatusFlags(config->base);
205
206 return (USART_GetEnabledInterrupts(config->base) & mask)
207 && (flags & kUSART_TxFifoEmptyFlag);
208 }
209
mcux_flexcomm_irq_rx_enable(const struct device * dev)210 static void mcux_flexcomm_irq_rx_enable(const struct device *dev)
211 {
212 const struct mcux_flexcomm_config *config = dev->config;
213 uint32_t mask = kUSART_RxLevelInterruptEnable;
214
215 USART_EnableInterrupts(config->base, mask);
216 }
217
mcux_flexcomm_irq_rx_disable(const struct device * dev)218 static void mcux_flexcomm_irq_rx_disable(const struct device *dev)
219 {
220 const struct mcux_flexcomm_config *config = dev->config;
221 uint32_t mask = kUSART_RxLevelInterruptEnable;
222
223 USART_DisableInterrupts(config->base, mask);
224 }
225
mcux_flexcomm_irq_rx_full(const struct device * dev)226 static int mcux_flexcomm_irq_rx_full(const struct device *dev)
227 {
228 const struct mcux_flexcomm_config *config = dev->config;
229 uint32_t flags = USART_GetStatusFlags(config->base);
230
231 return (flags & kUSART_RxFifoNotEmptyFlag) != 0U;
232 }
233
mcux_flexcomm_irq_rx_pending(const struct device * dev)234 static int mcux_flexcomm_irq_rx_pending(const struct device *dev)
235 {
236 const struct mcux_flexcomm_config *config = dev->config;
237 uint32_t mask = kUSART_RxLevelInterruptEnable;
238
239 return (USART_GetEnabledInterrupts(config->base) & mask)
240 && mcux_flexcomm_irq_rx_full(dev);
241 }
242
mcux_flexcomm_irq_err_enable(const struct device * dev)243 static void mcux_flexcomm_irq_err_enable(const struct device *dev)
244 {
245 const struct mcux_flexcomm_config *config = dev->config;
246 uint32_t mask = kStatus_USART_NoiseError |
247 kStatus_USART_FramingError |
248 kStatus_USART_ParityError;
249
250 USART_EnableInterrupts(config->base, mask);
251 }
252
mcux_flexcomm_irq_err_disable(const struct device * dev)253 static void mcux_flexcomm_irq_err_disable(const struct device *dev)
254 {
255 const struct mcux_flexcomm_config *config = dev->config;
256 uint32_t mask = kStatus_USART_NoiseError |
257 kStatus_USART_FramingError |
258 kStatus_USART_ParityError;
259
260 USART_DisableInterrupts(config->base, mask);
261 }
262
mcux_flexcomm_irq_is_pending(const struct device * dev)263 static int mcux_flexcomm_irq_is_pending(const struct device *dev)
264 {
265 return (mcux_flexcomm_irq_tx_ready(dev)
266 || mcux_flexcomm_irq_rx_pending(dev));
267 }
268
mcux_flexcomm_irq_update(const struct device * dev)269 static int mcux_flexcomm_irq_update(const struct device *dev)
270 {
271 return 1;
272 }
273
mcux_flexcomm_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)274 static void mcux_flexcomm_irq_callback_set(const struct device *dev,
275 uart_irq_callback_user_data_t cb,
276 void *cb_data)
277 {
278 struct mcux_flexcomm_data *data = dev->data;
279
280 data->irq_callback = cb;
281 data->irq_cb_data = cb_data;
282
283 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
284 data->async_callback = NULL;
285 data->async_cb_data = NULL;
286 #endif
287 }
288 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
289
290 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
mcux_flexcomm_uart_configure(const struct device * dev,const struct uart_config * cfg)291 static int mcux_flexcomm_uart_configure(const struct device *dev, const struct uart_config *cfg)
292 {
293 const struct mcux_flexcomm_config *config = dev->config;
294 struct mcux_flexcomm_data *data = dev->data;
295 struct uart_config *uart_config = &data->uart_config;
296 usart_config_t usart_config;
297 usart_parity_mode_t parity_mode;
298 usart_stop_bit_count_t stop_bits;
299 usart_data_len_t data_bits = kUSART_8BitsPerChar;
300 bool nine_bit_mode = false;
301 uint32_t clock_freq;
302
303 /* Set up structure to reconfigure UART */
304 USART_GetDefaultConfig(&usart_config);
305
306 /* Set parity */
307 if (cfg->parity == UART_CFG_PARITY_ODD) {
308 parity_mode = kUSART_ParityOdd;
309 } else if (cfg->parity == UART_CFG_PARITY_EVEN) {
310 parity_mode = kUSART_ParityEven;
311 } else if (cfg->parity == UART_CFG_PARITY_NONE) {
312 parity_mode = kUSART_ParityDisabled;
313 } else {
314 return -ENOTSUP;
315 }
316 usart_config.parityMode = parity_mode;
317
318 /* Set baudrate */
319 usart_config.baudRate_Bps = cfg->baudrate;
320
321 /* Set stop bits */
322 if (cfg->stop_bits == UART_CFG_STOP_BITS_1) {
323 stop_bits = kUSART_OneStopBit;
324 } else if (cfg->stop_bits == UART_CFG_STOP_BITS_2) {
325 stop_bits = kUSART_TwoStopBit;
326 } else {
327 return -ENOTSUP;
328 }
329 usart_config.stopBitCount = stop_bits;
330
331 /* Set data bits */
332 if (cfg->data_bits == UART_CFG_DATA_BITS_5 ||
333 cfg->data_bits == UART_CFG_DATA_BITS_6) {
334 return -ENOTSUP;
335 } else if (cfg->data_bits == UART_CFG_DATA_BITS_7) {
336 data_bits = kUSART_7BitsPerChar;
337 } else if (cfg->data_bits == UART_CFG_DATA_BITS_8) {
338 data_bits = kUSART_8BitsPerChar;
339 } else if (cfg->data_bits == UART_CFG_DATA_BITS_9) {
340 nine_bit_mode = true;
341 } else {
342 return -EINVAL;
343 }
344 usart_config.bitCountPerChar = data_bits;
345
346 /* Set flow control */
347 if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_NONE) {
348 usart_config.enableHardwareFlowControl = false;
349 } else if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) {
350 usart_config.enableHardwareFlowControl = true;
351 } else {
352 return -ENOTSUP;
353 }
354
355 /* Wait for USART to finish transmission and turn off */
356 USART_Deinit(config->base);
357
358 /* Get UART clock frequency */
359 clock_control_get_rate(config->clock_dev,
360 config->clock_subsys, &clock_freq);
361
362 /* Handle 9 bit mode */
363 USART_Enable9bitMode(config->base, nine_bit_mode);
364
365 /* Reconfigure UART */
366 USART_Init(config->base, &usart_config, clock_freq);
367
368 /* Update driver device data */
369 uart_config->parity = cfg->parity;
370 uart_config->baudrate = cfg->baudrate;
371 uart_config->stop_bits = cfg->stop_bits;
372 uart_config->data_bits = cfg->data_bits;
373 uart_config->flow_ctrl = cfg->flow_ctrl;
374
375 return 0;
376 }
377
mcux_flexcomm_uart_config_get(const struct device * dev,struct uart_config * cfg)378 static int mcux_flexcomm_uart_config_get(const struct device *dev,
379 struct uart_config *cfg)
380 {
381 struct mcux_flexcomm_data *data = dev->data;
382 *cfg = data->uart_config;
383 return 0;
384 }
385 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
386
387 #ifdef CONFIG_UART_ASYNC_API
388 /* This function is called by this driver to notify user callback of events */
async_user_callback(const struct device * dev,struct uart_event * evt)389 static void async_user_callback(const struct device *dev,
390 struct uart_event *evt)
391 {
392 const struct mcux_flexcomm_data *data = dev->data;
393
394 if (data->async_callback) {
395 data->async_callback(dev, evt, data->async_cb_data);
396 }
397 }
398
mcux_flexcomm_uart_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)399 static int mcux_flexcomm_uart_callback_set(const struct device *dev,
400 uart_callback_t callback,
401 void *user_data)
402 {
403 struct mcux_flexcomm_data *data = dev->data;
404
405 data->async_callback = callback;
406 data->async_cb_data = user_data;
407
408
409 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
410 data->irq_callback = NULL;
411 data->irq_cb_data = NULL;
412 #endif
413
414 return 0;
415 }
416
mcux_flexcomm_uart_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)417 static int mcux_flexcomm_uart_tx(const struct device *dev, const uint8_t *buf,
418 size_t len, int32_t timeout)
419 {
420 const struct mcux_flexcomm_config *config = dev->config;
421 struct mcux_flexcomm_data *data = dev->data;
422 int ret = 0;
423
424 if (config->tx_dma.dev == NULL) {
425 return -ENODEV;
426 }
427
428 unsigned int key = irq_lock();
429
430 /* Getting DMA status to tell if channel is busy or not set up */
431 struct dma_status status;
432
433 ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &status);
434
435 if (ret < 0) {
436 irq_unlock(key);
437 return ret;
438 }
439
440 /* There is an ongoing transfer */
441 if (status.busy) {
442 irq_unlock(key);
443 return -EBUSY;
444 }
445
446 /* Disable TX DMA requests for uart while setting up */
447 USART_EnableTxDMA(config->base, false);
448
449 /* Set up the dma channel/transfer */
450 data->tx_data.xfer_buf = buf;
451 data->tx_data.xfer_len = len;
452 data->tx_data.active_block.source_address = (uint32_t)buf;
453 data->tx_data.active_block.dest_address = (uint32_t) &config->base->FIFOWR;
454 data->tx_data.active_block.block_size = len;
455 data->tx_data.active_block.next_block = NULL;
456
457 ret = dma_config(config->tx_dma.dev, config->tx_dma.channel,
458 (struct dma_config *) &config->tx_dma.cfg);
459 if (ret) {
460 irq_unlock(key);
461 return ret;
462 }
463
464 /* Enable interrupt for when TX fifo is empty (all data transmitted) */
465 config->base->FIFOINTENSET |= USART_FIFOINTENSET_TXLVL_MASK;
466
467 /* Enable TX DMA requests */
468 USART_EnableTxDMA(config->base, true);
469
470 /* Trigger the DMA to start transfer */
471 ret = dma_start(config->tx_dma.dev, config->tx_dma.channel);
472 if (ret) {
473 irq_unlock(key);
474 return ret;
475 }
476
477 /* Schedule a TX abort for @param timeout */
478 if (timeout != SYS_FOREVER_US) {
479 k_work_schedule(&data->tx_data.timeout_work, K_USEC(timeout));
480 }
481
482 irq_unlock(key);
483
484 return ret;
485 }
486
mcux_flexcomm_uart_tx_abort(const struct device * dev)487 static int mcux_flexcomm_uart_tx_abort(const struct device *dev)
488 {
489 const struct mcux_flexcomm_config *config = dev->config;
490 struct mcux_flexcomm_data *data = dev->data;
491 int ret = 0;
492
493 /* First disable DMA requests from UART to prevent transfer
494 * status change during the abort routine
495 */
496 USART_EnableTxDMA(config->base, false);
497
498 /* In case there is no transfer to abort */
499 if (data->tx_data.xfer_len == 0) {
500 return -EFAULT;
501 }
502
503 /* In case a user called this function, do not abort twice */
504 (void)k_work_cancel_delayable(&data->tx_data.timeout_work);
505
506 /* Getting dma status to use to calculate bytes sent */
507 struct dma_status status = {0};
508
509 ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &status);
510 if (ret < 0) {
511 return ret;
512 }
513
514 /* Done with the DMA transfer, can stop it now */
515 ret = dma_stop(config->tx_dma.dev, config->tx_dma.channel);
516 if (ret) {
517 return ret;
518 }
519
520 /* Define TX abort event before resetting driver variables */
521 size_t sent_len = data->tx_data.xfer_len - status.pending_length;
522 const uint8_t *aborted_buf = data->tx_data.xfer_buf;
523 struct uart_event tx_abort_event = {
524 .type = UART_TX_ABORTED,
525 .data.tx.buf = aborted_buf,
526 .data.tx.len = sent_len
527 };
528
529 /* Driver data needs reset since there is no longer an ongoing
530 * transfer, this should before the user callback, not after,
531 * just in case the user callback calls tx again
532 */
533 data->tx_data.xfer_len = 0;
534 data->tx_data.xfer_buf = NULL;
535
536 async_user_callback(dev, &tx_abort_event);
537
538 return ret;
539 }
540
mcux_flexcomm_uart_rx_enable(const struct device * dev,uint8_t * buf,const size_t len,const int32_t timeout)541 static int mcux_flexcomm_uart_rx_enable(const struct device *dev, uint8_t *buf,
542 const size_t len, const int32_t timeout)
543 {
544 const struct mcux_flexcomm_config *config = dev->config;
545 struct mcux_flexcomm_data *data = dev->data;
546 int ret = 0;
547
548 if (config->rx_dma.dev == NULL) {
549 return -ENODEV;
550 }
551
552 /* Getting DMA status to tell if channel is busy or not set up */
553 struct dma_status status;
554
555 ret = dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &status);
556
557 if (ret < 0) {
558 return ret;
559 }
560
561 /* There is an ongoing transfer */
562 if (status.busy) {
563 return -EBUSY;
564 }
565
566 /* Disable RX DMA requests for uart while setting up */
567 USART_EnableRxDMA(config->base, false);
568
569 /* Set up the dma channel/transfer */
570 data->rx_data.xfer_buf = buf;
571 data->rx_data.xfer_len = len;
572 data->rx_data.active_block.dest_address = (uint32_t)data->rx_data.xfer_buf;
573 data->rx_data.active_block.source_address = (uint32_t) &config->base->FIFORD;
574 data->rx_data.active_block.block_size = data->rx_data.xfer_len;
575
576 ret = dma_config(config->rx_dma.dev, config->rx_dma.channel,
577 (struct dma_config *) &config->rx_dma.cfg);
578 if (ret) {
579 return ret;
580 }
581
582 data->rx_data.timeout = timeout;
583
584 /* Enable RX DMA requests from UART */
585 USART_EnableRxDMA(config->base, true);
586
587 /* Enable start bit detected interrupt, this is the only
588 * way for the flexcomm uart to support the Zephyr Async API.
589 * This is only needed if using a timeout.
590 */
591 if (timeout != SYS_FOREVER_US) {
592 config->base->INTENSET |= USART_INTENSET_STARTEN_MASK;
593 }
594
595 /* Trigger the DMA to start transfer */
596 ret = dma_start(config->rx_dma.dev, config->rx_dma.channel);
597 if (ret) {
598 return ret;
599 }
600
601 /* Request next buffer */
602 struct uart_event rx_buf_request = {
603 .type = UART_RX_BUF_REQUEST,
604 };
605
606 async_user_callback(dev, &rx_buf_request);
607
608 return ret;
609 }
610
flexcomm_uart_rx_update(const struct device * dev)611 static void flexcomm_uart_rx_update(const struct device *dev)
612 {
613 const struct mcux_flexcomm_config *config = dev->config;
614 struct mcux_flexcomm_data *data = dev->data;
615
616 struct dma_status status;
617
618 (void)dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &status);
619
620 /* Calculate how many bytes have been received by RX DMA */
621 size_t total_rx_receive_len = data->rx_data.xfer_len - status.pending_length;
622
623 /* Generate RX ready event if there has been new data received */
624 if (total_rx_receive_len > data->rx_data.offset) {
625
626 data->rx_data.count = total_rx_receive_len - data->rx_data.offset;
627 struct uart_event rx_rdy_event = {
628 .type = UART_RX_RDY,
629 .data.rx.buf = data->rx_data.xfer_buf,
630 .data.rx.len = data->rx_data.count,
631 .data.rx.offset = data->rx_data.offset,
632 };
633
634 async_user_callback(dev, &rx_rdy_event);
635 }
636
637 /* The data is no longer new, update buffer tracking variables */
638 data->rx_data.offset += data->rx_data.count;
639 data->rx_data.count = 0;
640
641 }
642
mcux_flexcomm_uart_rx_disable(const struct device * dev)643 static int mcux_flexcomm_uart_rx_disable(const struct device *dev)
644 {
645 const struct mcux_flexcomm_config *config = dev->config;
646 struct mcux_flexcomm_data *data = dev->data;
647 int ret = 0;
648
649 /* This bit can be used to check if RX is already disabled
650 * because it is the bit changed by enabling and disabling DMA
651 * requests, and in this driver, RX DMA requests should only be
652 * disabled when the rx function is disabled other than when
653 * setting up in uart_rx_enable.
654 */
655 if (!(config->base->FIFOCFG & USART_FIFOCFG_DMARX_MASK)) {
656 return -EFAULT;
657 }
658
659 /* In case a user called this function, don't disable twice */
660 (void)k_work_cancel_delayable(&data->rx_data.timeout_work);
661
662
663 /* Disable RX requests to pause DMA first and measure what happened,
664 * Can't stop yet because DMA pending length is needed to
665 * calculate how many bytes have been received
666 */
667 USART_EnableRxDMA(config->base, false);
668
669 /* Check if RX data received and generate rx ready event if so */
670 flexcomm_uart_rx_update(dev);
671
672 /* Notify DMA driver to stop transfer only after RX data handled */
673 ret = dma_stop(config->rx_dma.dev, config->rx_dma.channel);
674 if (ret) {
675 return ret;
676 }
677
678 /* Generate buffer release event for current buffer */
679 struct uart_event current_buffer_release_event = {
680 .type = UART_RX_BUF_RELEASED,
681 .data.rx_buf.buf = data->rx_data.xfer_buf,
682 };
683
684 async_user_callback(dev, ¤t_buffer_release_event);
685
686 /* Generate buffer release event for next buffer */
687 if (data->rx_data.next_xfer_buf) {
688 struct uart_event next_buffer_release_event = {
689 .type = UART_RX_BUF_RELEASED,
690 .data.rx_buf.buf = data->rx_data.next_xfer_buf
691 };
692
693 async_user_callback(dev, &next_buffer_release_event);
694 }
695
696 /* Reset RX driver data */
697 data->rx_data.xfer_buf = NULL;
698 data->rx_data.xfer_len = 0;
699 data->rx_data.next_xfer_buf = NULL;
700 data->rx_data.next_xfer_len = 0;
701 data->rx_data.offset = 0;
702 data->rx_data.count = 0;
703
704 /* Final event is the RX disable event */
705 struct uart_event rx_disabled_event = {
706 .type = UART_RX_DISABLED
707 };
708
709 async_user_callback(dev, &rx_disabled_event);
710
711 return ret;
712 }
713
mcux_flexcomm_uart_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)714 static int mcux_flexcomm_uart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
715 {
716 const struct mcux_flexcomm_config *config = dev->config;
717 struct mcux_flexcomm_data *data = dev->data;
718
719 /* There is already a next buffer scheduled */
720 if (data->rx_data.next_xfer_buf != NULL || data->rx_data.next_xfer_len != 0) {
721 return -EBUSY;
722 }
723
724 /* DMA requests are disabled, meaning the RX has been disabled */
725 if (!(config->base->FIFOCFG & USART_FIFOCFG_DMARX_MASK)) {
726 return -EACCES;
727 }
728
729 /* If everything is fine, schedule the new buffer */
730 data->rx_data.next_xfer_buf = buf;
731 data->rx_data.next_xfer_len = len;
732
733 return 0;
734 }
735
736 /* This callback is from the TX DMA and consumed by this driver */
mcux_flexcomm_uart_dma_tx_callback(const struct device * dma_device,void * cb_data,uint32_t channel,int status)737 static void mcux_flexcomm_uart_dma_tx_callback(const struct device *dma_device, void *cb_data,
738 uint32_t channel, int status)
739 {
740 /* DMA callback data was configured during driver init as UART device ptr */
741 struct device *dev = (struct device *)cb_data;
742
743 const struct mcux_flexcomm_config *config = dev->config;
744 struct mcux_flexcomm_data *data = dev->data;
745
746 unsigned int key = irq_lock();
747
748 /* Turn off requests since we are aborting */
749 USART_EnableTxDMA(config->base, false);
750
751 /* Timeout did not happen */
752 (void)k_work_cancel_delayable(&data->tx_data.timeout_work);
753
754 irq_unlock(key);
755 }
756
757 /* This callback is from the RX DMA and consumed by this driver */
mcux_flexcomm_uart_dma_rx_callback(const struct device * dma_device,void * cb_data,uint32_t channel,int status)758 static void mcux_flexcomm_uart_dma_rx_callback(const struct device *dma_device, void *cb_data,
759 uint32_t channel, int status)
760 {
761 /* DMA callback data was configured during driver init as UART device ptr */
762 struct device *dev = (struct device *)cb_data;
763
764 const struct mcux_flexcomm_config *config = dev->config;
765 struct mcux_flexcomm_data *data = dev->data;
766
767 /* Cancel timeout now that the transfer is complete */
768 (void)k_work_cancel_delayable(&data->rx_data.timeout_work);
769
770 /* Update user with received RX data if needed */
771 flexcomm_uart_rx_update(dev);
772
773 /* Release current buffer */
774 struct uart_event current_buffer_release_event = {
775 .type = UART_RX_BUF_RELEASED,
776 .data.rx_buf.buf = data->rx_data.xfer_buf,
777 };
778
779 async_user_callback(dev, ¤t_buffer_release_event);
780
781 if (data->rx_data.next_xfer_buf) {
782 /* Replace buffer in driver data */
783 data->rx_data.xfer_buf = data->rx_data.next_xfer_buf;
784 data->rx_data.xfer_len = data->rx_data.next_xfer_len;
785 data->rx_data.next_xfer_buf = NULL;
786 data->rx_data.next_xfer_len = 0;
787
788 /* Reload DMA channel with new buffer */
789 data->rx_data.active_block.block_size = data->rx_data.xfer_len;
790 data->rx_data.active_block.dest_address = (uint32_t) data->rx_data.xfer_buf;
791 dma_reload(config->rx_dma.dev, config->rx_dma.channel,
792 data->rx_data.active_block.source_address,
793 data->rx_data.active_block.dest_address,
794 data->rx_data.active_block.block_size);
795
796 /* Request next buffer */
797 struct uart_event rx_buf_request = {
798 .type = UART_RX_BUF_REQUEST,
799 };
800
801 async_user_callback(dev, &rx_buf_request);
802
803 /* Start the new transfer */
804 dma_start(config->rx_dma.dev, config->rx_dma.channel);
805
806 } else {
807 /* If there is no next available buffer then disable DMA */
808 mcux_flexcomm_uart_rx_disable(dev);
809 }
810
811 /* Now that this transfer was finished, reset tracking variables */
812 data->rx_data.count = 0;
813 data->rx_data.offset = 0;
814 }
815
816 #if defined(CONFIG_SOC_SERIES_IMX_RT5XX) || defined(CONFIG_SOC_SERIES_IMX_RT6XX)
817 /*
818 * This functions calculates the inputmux connection value
819 * needed by INPUTMUX_EnableSignal to allow the UART's DMA
820 * request to reach the DMA.
821 */
fc_uart_calc_inmux_connection(uint8_t channel,DMA_Type * base)822 static uint32_t fc_uart_calc_inmux_connection(uint8_t channel, DMA_Type *base)
823 {
824 uint32_t chmux_avl = 0;
825 uint32_t chmux_sel = 0;
826 uint32_t chmux_val = 0;
827
828 #if defined(CONFIG_SOC_SERIES_IMX_RT5XX)
829 uint32_t chmux_sel_id = 0;
830
831 if (base == (DMA_Type *)DMA0_BASE) {
832 chmux_sel_id = DMA0_CHMUX_SEL0_ID;
833 } else if (base == (DMA_Type *)DMA1_BASE) {
834 chmux_sel_id = DMA1_CHMUX_SEL0_ID;
835 }
836
837
838 if (channel >= 16 && !(channel >= 24 && channel <= 27)) {
839 chmux_avl = 1 << CHMUX_AVL_SHIFT;
840 } else {
841 chmux_avl = 0;
842 }
843
844 /* 1 for flexcomm */
845 chmux_val = 1 << CHMUX_VAL_SHIFT;
846
847
848 if (channel <= 15 || (channel >= 24 && channel <= 27)) {
849 chmux_sel = 0;
850 } else if (channel >= 16 && channel <= 23) {
851 chmux_sel = (chmux_sel_id + 4 * (channel - 16))
852 << CHMUX_OFF_SHIFT;
853 } else {
854 chmux_sel = (chmux_sel_id + 4 * (channel - 20))
855 << CHMUX_OFF_SHIFT;
856 }
857
858 #endif /* RT5xx */
859
860 uint32_t req_en_id = 0;
861
862 if (base == (DMA_Type *)DMA0_BASE) {
863 req_en_id = DMA0_REQ_ENA0_ID;
864 } else if (base == (DMA_Type *)DMA1_BASE) {
865 req_en_id = DMA1_REQ_ENA0_ID;
866 }
867
868
869 uint32_t en_val;
870
871 if (channel <= 31) {
872 en_val = channel + (req_en_id << ENA_SHIFT);
873 } else {
874 en_val = (channel - 32) + ((req_en_id + 4) << ENA_SHIFT);
875 }
876
877
878 uint32_t ret = en_val + chmux_avl + chmux_val + chmux_sel;
879
880 return ret;
881 }
882 #endif /* RT 3-digit */
883
884
flexcomm_uart_async_init(const struct device * dev)885 static int flexcomm_uart_async_init(const struct device *dev)
886 {
887 const struct mcux_flexcomm_config *config = dev->config;
888 struct mcux_flexcomm_data *data = dev->data;
889
890 if (config->rx_dma.dev == NULL ||
891 config->tx_dma.dev == NULL) {
892 return -ENODEV;
893 }
894
895 if (!device_is_ready(config->rx_dma.dev) ||
896 !device_is_ready(config->tx_dma.dev)) {
897 return -ENODEV;
898 }
899
900 /* Disable DMA requests */
901 USART_EnableTxDMA(config->base, false);
902 USART_EnableRxDMA(config->base, false);
903
904 /* Route DMA requests */
905 #if defined(CONFIG_SOC_SERIES_IMX_RT5XX) || defined(CONFIG_SOC_SERIES_IMX_RT6XX)
906 /* RT 3 digit uses input mux to route DMA requests from
907 * the UART peripheral to a hardware designated DMA channel
908 */
909 INPUTMUX_Init(INPUTMUX);
910 INPUTMUX_EnableSignal(INPUTMUX,
911 fc_uart_calc_inmux_connection(config->rx_dma.channel,
912 config->rx_dma.base), true);
913 INPUTMUX_EnableSignal(INPUTMUX,
914 fc_uart_calc_inmux_connection(config->tx_dma.channel,
915 config->tx_dma.base), true);
916 INPUTMUX_Deinit(INPUTMUX);
917 #endif /* RT5xx and RT6xx */
918
919 /* Init work objects for RX and TX timeouts */
920 k_work_init_delayable(&data->tx_data.timeout_work,
921 config->tx_timeout_func);
922 k_work_init_delayable(&data->rx_data.timeout_work,
923 config->rx_timeout_func);
924
925 return 0;
926 }
927
928 #endif /* CONFIG_UART_ASYNC_API */
929
930 #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT
mcux_flexcomm_isr(const struct device * dev)931 static void mcux_flexcomm_isr(const struct device *dev)
932 {
933 struct mcux_flexcomm_data *data = dev->data;
934
935 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
936 if (data->irq_callback) {
937 data->irq_callback(dev, data->irq_cb_data);
938 }
939 #endif
940
941 #ifdef CONFIG_UART_ASYNC_API
942 const struct mcux_flexcomm_config *config = dev->config;
943
944 /* If there is an async callback then we are using async api */
945 if (data->async_callback) {
946
947 /* Handle RX interrupt (START bit detected)
948 * RX interrupt defeats the purpose of UART ASYNC API
949 * because core is involved for every byte but
950 * it is included for compatibility of applications.
951 * There is no other way with flexcomm UART to handle
952 * Zephyr's RX ASYNC API. However, if not using the RX
953 * timeout (timeout is forever), then the performance is
954 * still as might be expected.
955 */
956 if (config->base->INTSTAT & USART_INTSTAT_START_MASK) {
957
958 /* Receiving some data so reschedule timeout,
959 * unless timeout is 0 in which case just handle
960 * rx data now. If timeout is forever, don't do anything.
961 */
962 if (data->rx_data.timeout == 0) {
963 flexcomm_uart_rx_update(dev);
964 } else if (data->rx_data.timeout != SYS_FOREVER_US) {
965 k_work_reschedule(&data->rx_data.timeout_work,
966 K_USEC(data->rx_data.timeout));
967 }
968
969 /* Write 1 to clear start bit status bit */
970 config->base->STAT |= USART_STAT_START_MASK;
971 }
972
973 /* Handle TX interrupt (TXLVL = 0)
974 * Default TXLVL interrupt happens when TXLVL = 0, which
975 * has not been changed by this driver, so in this case the
976 * TX interrupt should happen when transfer is complete
977 * because DMA filling TX fifo is faster than transmitter rate
978 */
979 if (config->base->FIFOINTSTAT & USART_FIFOINTSTAT_TXLVL_MASK) {
980
981 /* Disable interrupt */
982 config->base->FIFOINTENCLR = USART_FIFOINTENCLR_TXLVL_MASK;
983
984 /* Set up TX done event to notify the user of completion */
985 struct uart_event tx_done_event = {
986 .type = UART_TX_DONE,
987 .data.tx.buf = data->tx_data.xfer_buf,
988 .data.tx.len = data->tx_data.xfer_len,
989 };
990
991 /* Reset TX data */
992 data->tx_data.xfer_len = 0;
993 data->tx_data.xfer_buf = NULL;
994
995 async_user_callback(dev, &tx_done_event);
996 }
997
998 }
999 #endif /* CONFIG_UART_ASYNC_API */
1000 }
1001 #endif /* CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT */
1002
1003
mcux_flexcomm_init(const struct device * dev)1004 static int mcux_flexcomm_init(const struct device *dev)
1005 {
1006 const struct mcux_flexcomm_config *config = dev->config;
1007 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1008 struct mcux_flexcomm_data *data = dev->data;
1009 struct uart_config *cfg = &data->uart_config;
1010 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1011 usart_config_t usart_config;
1012 usart_parity_mode_t parity_mode;
1013 uint32_t clock_freq;
1014 int err;
1015
1016 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1017 if (err) {
1018 return err;
1019 }
1020
1021 if (!device_is_ready(config->clock_dev)) {
1022 return -ENODEV;
1023 }
1024
1025 /* Get the clock frequency */
1026 if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
1027 &clock_freq)) {
1028 return -EINVAL;
1029 }
1030
1031 if (config->parity == UART_CFG_PARITY_ODD) {
1032 parity_mode = kUSART_ParityOdd;
1033 } else if (config->parity == UART_CFG_PARITY_EVEN) {
1034 parity_mode = kUSART_ParityEven;
1035 } else {
1036 parity_mode = kUSART_ParityDisabled;
1037 }
1038
1039 USART_GetDefaultConfig(&usart_config);
1040 usart_config.enableTx = true;
1041 usart_config.enableRx = true;
1042 usart_config.parityMode = parity_mode;
1043 usart_config.baudRate_Bps = config->baud_rate;
1044
1045 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1046 cfg->baudrate = config->baud_rate;
1047 cfg->parity = config->parity;
1048 /* From USART_GetDefaultConfig */
1049 cfg->stop_bits = UART_CFG_STOP_BITS_1;
1050 cfg->data_bits = UART_CFG_DATA_BITS_8;
1051 cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE;
1052 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1053
1054 USART_Init(config->base, &usart_config, clock_freq);
1055
1056 #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT
1057 config->irq_config_func(dev);
1058 #endif
1059
1060 #ifdef CONFIG_UART_ASYNC_API
1061 err = flexcomm_uart_async_init(dev);
1062 if (err) {
1063 return err;
1064 }
1065 #endif
1066
1067 return 0;
1068 }
1069
1070 static const struct uart_driver_api mcux_flexcomm_driver_api = {
1071 .poll_in = mcux_flexcomm_poll_in,
1072 .poll_out = mcux_flexcomm_poll_out,
1073 .err_check = mcux_flexcomm_err_check,
1074 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1075 .configure = mcux_flexcomm_uart_configure,
1076 .config_get = mcux_flexcomm_uart_config_get,
1077 #endif
1078 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
1079 .fifo_fill = mcux_flexcomm_fifo_fill,
1080 .fifo_read = mcux_flexcomm_fifo_read,
1081 .irq_tx_enable = mcux_flexcomm_irq_tx_enable,
1082 .irq_tx_disable = mcux_flexcomm_irq_tx_disable,
1083 .irq_tx_complete = mcux_flexcomm_irq_tx_complete,
1084 .irq_tx_ready = mcux_flexcomm_irq_tx_ready,
1085 .irq_rx_enable = mcux_flexcomm_irq_rx_enable,
1086 .irq_rx_disable = mcux_flexcomm_irq_rx_disable,
1087 .irq_rx_ready = mcux_flexcomm_irq_rx_full,
1088 .irq_err_enable = mcux_flexcomm_irq_err_enable,
1089 .irq_err_disable = mcux_flexcomm_irq_err_disable,
1090 .irq_is_pending = mcux_flexcomm_irq_is_pending,
1091 .irq_update = mcux_flexcomm_irq_update,
1092 .irq_callback_set = mcux_flexcomm_irq_callback_set,
1093 #endif
1094 #ifdef CONFIG_UART_ASYNC_API
1095 .callback_set = mcux_flexcomm_uart_callback_set,
1096 .tx = mcux_flexcomm_uart_tx,
1097 .tx_abort = mcux_flexcomm_uart_tx_abort,
1098 .rx_enable = mcux_flexcomm_uart_rx_enable,
1099 .rx_disable = mcux_flexcomm_uart_rx_disable,
1100 .rx_buf_rsp = mcux_flexcomm_uart_rx_buf_rsp,
1101 #endif
1102 };
1103
1104
1105 #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT
1106 #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n) \
1107 static void mcux_flexcomm_irq_config_func_##n(const struct device *dev) \
1108 { \
1109 IRQ_CONNECT(DT_INST_IRQN(n), \
1110 DT_INST_IRQ(n, priority), \
1111 mcux_flexcomm_isr, DEVICE_DT_INST_GET(n), 0); \
1112 \
1113 irq_enable(DT_INST_IRQN(n)); \
1114 }
1115 #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n) \
1116 .irq_config_func = mcux_flexcomm_irq_config_func_##n,
1117 #else
1118 #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n)
1119 #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n)
1120 #endif /* CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT */
1121
1122 #ifdef CONFIG_UART_ASYNC_API
1123 #define UART_MCUX_FLEXCOMM_TX_TIMEOUT_FUNC(n) \
1124 static void mcux_flexcomm_uart_##n##_tx_timeout(struct k_work *work) \
1125 { \
1126 mcux_flexcomm_uart_tx_abort(DEVICE_DT_INST_GET(n)); \
1127 }
1128 #define UART_MCUX_FLEXCOMM_RX_TIMEOUT_FUNC(n) \
1129 static void mcux_flexcomm_uart_##n##_rx_timeout(struct k_work *work) \
1130 { \
1131 flexcomm_uart_rx_update(DEVICE_DT_INST_GET(n)); \
1132 }
1133
1134 DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_TX_TIMEOUT_FUNC);
1135 DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_RX_TIMEOUT_FUNC);
1136
1137 #define UART_MCUX_FLEXCOMM_ASYNC_CFG(n) \
1138 .tx_dma = { \
1139 .dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
1140 .channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \
1141 .cfg = { \
1142 .source_burst_length = 1, \
1143 .dest_burst_length = 1, \
1144 .source_data_size = 1, \
1145 .dest_data_size = 1, \
1146 .complete_callback_en = 1, \
1147 .error_callback_en = 1, \
1148 .block_count = 1, \
1149 .head_block = \
1150 &mcux_flexcomm_##n##_data.tx_data.active_block, \
1151 .channel_direction = MEMORY_TO_PERIPHERAL, \
1152 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \
1153 .dma_callback = mcux_flexcomm_uart_dma_tx_callback, \
1154 .user_data = (void *)DEVICE_DT_INST_GET(n), \
1155 }, \
1156 .base = (DMA_Type *) \
1157 DT_REG_ADDR(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
1158 }, \
1159 .rx_dma = { \
1160 .dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \
1161 .channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \
1162 .cfg = { \
1163 .source_burst_length = 1, \
1164 .dest_burst_length = 1, \
1165 .source_data_size = 1, \
1166 .dest_data_size = 1, \
1167 .complete_callback_en = 1, \
1168 .error_callback_en = 1, \
1169 .block_count = 1, \
1170 .head_block = \
1171 &mcux_flexcomm_##n##_data.rx_data.active_block, \
1172 .channel_direction = PERIPHERAL_TO_MEMORY, \
1173 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \
1174 .dma_callback = mcux_flexcomm_uart_dma_rx_callback, \
1175 .user_data = (void *)DEVICE_DT_INST_GET(n) \
1176 }, \
1177 .base = (DMA_Type *) \
1178 DT_REG_ADDR(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \
1179 }, \
1180 .rx_timeout_func = mcux_flexcomm_uart_##n##_rx_timeout, \
1181 .tx_timeout_func = mcux_flexcomm_uart_##n##_tx_timeout,
1182 #else
1183 #define UART_MCUX_FLEXCOMM_ASYNC_CFG(n)
1184 #endif /* CONFIG_UART_ASYNC_API */
1185
1186 #define UART_MCUX_FLEXCOMM_INIT_CFG(n) \
1187 static const struct mcux_flexcomm_config mcux_flexcomm_##n##_config = { \
1188 .base = (USART_Type *)DT_INST_REG_ADDR(n), \
1189 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
1190 .clock_subsys = \
1191 (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
1192 .baud_rate = DT_INST_PROP(n, current_speed), \
1193 .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \
1194 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1195 UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n) \
1196 UART_MCUX_FLEXCOMM_ASYNC_CFG(n) \
1197 };
1198
1199 #define UART_MCUX_FLEXCOMM_INIT(n) \
1200 \
1201 PINCTRL_DT_INST_DEFINE(n); \
1202 \
1203 static struct mcux_flexcomm_data mcux_flexcomm_##n##_data; \
1204 \
1205 static const struct mcux_flexcomm_config mcux_flexcomm_##n##_config; \
1206 \
1207 DEVICE_DT_INST_DEFINE(n, \
1208 &mcux_flexcomm_init, \
1209 NULL, \
1210 &mcux_flexcomm_##n##_data, \
1211 &mcux_flexcomm_##n##_config, \
1212 PRE_KERNEL_1, \
1213 CONFIG_SERIAL_INIT_PRIORITY, \
1214 &mcux_flexcomm_driver_api); \
1215 \
1216 UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n) \
1217 \
1218 UART_MCUX_FLEXCOMM_INIT_CFG(n)
1219
1220 DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_INIT)
1221