1 /*
2 * Copyright 2017,2021,2023-2024 NXP
3 * Copyright (c) 2020 Softube
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT nxp_kinetis_lpuart
9
10 #include <errno.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/pm/policy.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #ifdef CONFIG_UART_ASYNC_API
19 #include <zephyr/drivers/dma.h>
20 #endif
21 #include <zephyr/logging/log.h>
22
23 #include <fsl_lpuart.h>
24 #if CONFIG_NXP_LP_FLEXCOMM
25 #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
26 #endif
27
28 LOG_MODULE_REGISTER(uart_mcux_lpuart, LOG_LEVEL_ERR);
29
30 #define PINCTRL_STATE_FLOWCONTROL PINCTRL_STATE_PRIV_START
31
32 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
33 /* there are already going to be build errors, but at least this message will
34 * be the first error from this driver making the reason clear
35 */
36 BUILD_ASSERT(IS_ENABLED(CONFIG_UART_EXCLUSIVE_API_CALLBACKS), ""
37 "LPUART must use exclusive api callbacks");
38 #endif
39
40 #ifdef CONFIG_UART_ASYNC_API
41 struct lpuart_dma_config {
42 const struct device *dma_dev;
43 const uint32_t dma_channel;
44 struct dma_config dma_cfg;
45 };
46 #endif /* CONFIG_UART_ASYNC_API */
47
48 struct mcux_lpuart_config {
49 LPUART_Type *base;
50 #ifdef CONFIG_NXP_LP_FLEXCOMM
51 const struct device *parent_dev;
52 #endif
53 const struct device *clock_dev;
54 const struct pinctrl_dev_config *pincfg;
55 clock_control_subsys_t clock_subsys;
56 uint32_t baud_rate;
57 uint8_t flow_ctrl;
58 uint8_t parity;
59 bool rs485_de_active_low;
60 bool loopback_en;
61 bool single_wire;
62 bool tx_invert;
63 bool rx_invert;
64 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
65 void (*irq_config_func)(const struct device *dev);
66 #endif
67 #ifdef CONFIG_UART_ASYNC_API
68 const struct lpuart_dma_config rx_dma_config;
69 const struct lpuart_dma_config tx_dma_config;
70 #endif /* CONFIG_UART_ASYNC_API */
71 };
72
73 #ifdef CONFIG_UART_ASYNC_API
74 struct mcux_lpuart_rx_dma_params {
75 struct dma_block_config active_dma_block;
76 uint8_t *buf;
77 size_t buf_len;
78 size_t offset;
79 size_t counter;
80 struct k_work_delayable timeout_work;
81 size_t timeout_us;
82 };
83
84 struct mcux_lpuart_tx_dma_params {
85 struct dma_block_config active_dma_block;
86 const uint8_t *buf;
87 size_t buf_len;
88 struct k_work_delayable timeout_work;
89 size_t timeout_us;
90 };
91
92 struct mcux_lpuart_async_data {
93 const struct device *uart_dev;
94 struct mcux_lpuart_tx_dma_params tx_dma_params;
95 struct mcux_lpuart_rx_dma_params rx_dma_params;
96 uint8_t *next_rx_buffer;
97 size_t next_rx_buffer_len;
98 uart_callback_t user_callback;
99 void *user_data;
100 };
101 #endif
102
103 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
104 enum mcux_lpuart_api {
105 LPUART_NONE,
106 LPUART_IRQ_DRIVEN,
107 LPUART_ASYNC
108 };
109 #endif
110
111 struct mcux_lpuart_data {
112 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
113 uart_irq_callback_user_data_t callback;
114 void *cb_data;
115 #endif
116 #ifdef CONFIG_PM
117 bool pm_state_lock_on;
118 bool tx_poll_stream_on;
119 bool tx_int_stream_on;
120 #endif /* CONFIG_PM */
121 #ifdef CONFIG_UART_ASYNC_API
122 struct mcux_lpuart_async_data async;
123 #endif
124 struct uart_config uart_config;
125 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
126 enum mcux_lpuart_api api_type;
127 #endif
128 };
129
130 #ifdef CONFIG_PM
mcux_lpuart_pm_policy_state_lock_get(const struct device * dev)131 static void mcux_lpuart_pm_policy_state_lock_get(const struct device *dev)
132 {
133 struct mcux_lpuart_data *data = dev->data;
134
135 if (!data->pm_state_lock_on) {
136 data->pm_state_lock_on = true;
137 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
138 }
139 }
140
mcux_lpuart_pm_policy_state_lock_put(const struct device * dev)141 static void mcux_lpuart_pm_policy_state_lock_put(const struct device *dev)
142 {
143 struct mcux_lpuart_data *data = dev->data;
144
145 if (data->pm_state_lock_on) {
146 data->pm_state_lock_on = false;
147 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
148 }
149 }
150 #endif /* CONFIG_PM */
151
mcux_lpuart_poll_in(const struct device * dev,unsigned char * c)152 static int mcux_lpuart_poll_in(const struct device *dev, unsigned char *c)
153 {
154 const struct mcux_lpuart_config *config = dev->config;
155 uint32_t flags = LPUART_GetStatusFlags(config->base);
156 int ret = -1;
157
158 if (flags & kLPUART_RxDataRegFullFlag) {
159 *c = LPUART_ReadByte(config->base);
160 ret = 0;
161 }
162
163 return ret;
164 }
165
mcux_lpuart_poll_out(const struct device * dev,unsigned char c)166 static void mcux_lpuart_poll_out(const struct device *dev, unsigned char c)
167 {
168 const struct mcux_lpuart_config *config = dev->config;
169 unsigned int key;
170 #ifdef CONFIG_PM
171 struct mcux_lpuart_data *data = dev->data;
172 #endif
173
174 while (!(LPUART_GetStatusFlags(config->base)
175 & LPUART_STAT_TDRE_MASK)) {
176 }
177 /* Lock interrupts while we send data */
178 key = irq_lock();
179 #ifdef CONFIG_PM
180 /*
181 * We must keep the part from entering lower power mode until the
182 * transmission completes. Set the power constraint, and enable
183 * the transmission complete interrupt so we know when transmission is
184 * completed.
185 */
186 if (!data->tx_poll_stream_on && !data->tx_int_stream_on) {
187 data->tx_poll_stream_on = true;
188 mcux_lpuart_pm_policy_state_lock_get(dev);
189 /* Enable TC interrupt */
190 LPUART_EnableInterrupts(config->base,
191 kLPUART_TransmissionCompleteInterruptEnable);
192
193 }
194 #endif /* CONFIG_PM */
195
196 LPUART_WriteByte(config->base, c);
197 irq_unlock(key);
198 }
199
mcux_lpuart_err_check(const struct device * dev)200 static int mcux_lpuart_err_check(const struct device *dev)
201 {
202 const struct mcux_lpuart_config *config = dev->config;
203 uint32_t flags = LPUART_GetStatusFlags(config->base);
204 int err = 0;
205
206 if (flags & kLPUART_RxOverrunFlag) {
207 err |= UART_ERROR_OVERRUN;
208 }
209
210 if (flags & kLPUART_ParityErrorFlag) {
211 err |= UART_ERROR_PARITY;
212 }
213
214 if (flags & kLPUART_FramingErrorFlag) {
215 err |= UART_ERROR_FRAMING;
216 }
217
218 if (flags & kLPUART_NoiseErrorFlag) {
219 err |= UART_ERROR_PARITY;
220 }
221
222 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag |
223 kLPUART_ParityErrorFlag |
224 kLPUART_FramingErrorFlag |
225 kLPUART_NoiseErrorFlag);
226
227 return err;
228 }
229
230 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)231 static int mcux_lpuart_fifo_fill(const struct device *dev,
232 const uint8_t *tx_data,
233 int len)
234 {
235 const struct mcux_lpuart_config *config = dev->config;
236 uint8_t num_tx = 0U;
237
238 while ((len - num_tx > 0) &&
239 (LPUART_GetStatusFlags(config->base)
240 & LPUART_STAT_TDRE_MASK)) {
241
242 LPUART_WriteByte(config->base, tx_data[num_tx++]);
243 }
244 return num_tx;
245 }
246
mcux_lpuart_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)247 static int mcux_lpuart_fifo_read(const struct device *dev, uint8_t *rx_data,
248 const int len)
249 {
250 const struct mcux_lpuart_config *config = dev->config;
251 uint8_t num_rx = 0U;
252
253 while ((len - num_rx > 0) &&
254 (LPUART_GetStatusFlags(config->base)
255 & kLPUART_RxDataRegFullFlag)) {
256
257 rx_data[num_rx++] = LPUART_ReadByte(config->base);
258 }
259
260 return num_rx;
261 }
262
mcux_lpuart_irq_tx_enable(const struct device * dev)263 static void mcux_lpuart_irq_tx_enable(const struct device *dev)
264 {
265 const struct mcux_lpuart_config *config = dev->config;
266 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
267 #ifdef CONFIG_PM
268 struct mcux_lpuart_data *data = dev->data;
269 unsigned int key;
270 #endif
271
272 #ifdef CONFIG_PM
273 key = irq_lock();
274 data->tx_poll_stream_on = false;
275 data->tx_int_stream_on = true;
276 /* Transmission complete interrupt no longer required */
277 LPUART_DisableInterrupts(config->base,
278 kLPUART_TransmissionCompleteInterruptEnable);
279 /* Do not allow system to sleep while UART tx is ongoing */
280 mcux_lpuart_pm_policy_state_lock_get(dev);
281 #endif
282 LPUART_EnableInterrupts(config->base, mask);
283 #ifdef CONFIG_PM
284 irq_unlock(key);
285 #endif
286 }
287
mcux_lpuart_irq_tx_disable(const struct device * dev)288 static void mcux_lpuart_irq_tx_disable(const struct device *dev)
289 {
290 const struct mcux_lpuart_config *config = dev->config;
291 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
292 #ifdef CONFIG_PM
293 struct mcux_lpuart_data *data = dev->data;
294 unsigned int key;
295
296 key = irq_lock();
297 #endif
298
299 LPUART_DisableInterrupts(config->base, mask);
300 #ifdef CONFIG_PM
301 data->tx_int_stream_on = false;
302 /*
303 * If transmission IRQ is no longer enabled,
304 * transmission is complete. Release pm constraint.
305 */
306 mcux_lpuart_pm_policy_state_lock_put(dev);
307 irq_unlock(key);
308 #endif
309 }
310
mcux_lpuart_irq_tx_complete(const struct device * dev)311 static int mcux_lpuart_irq_tx_complete(const struct device *dev)
312 {
313 const struct mcux_lpuart_config *config = dev->config;
314 uint32_t flags = LPUART_GetStatusFlags(config->base);
315
316 return (flags & kLPUART_TransmissionCompleteFlag) != 0U;
317 }
318
mcux_lpuart_irq_tx_ready(const struct device * dev)319 static int mcux_lpuart_irq_tx_ready(const struct device *dev)
320 {
321 const struct mcux_lpuart_config *config = dev->config;
322 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
323 uint32_t flags = LPUART_GetStatusFlags(config->base);
324
325 return (LPUART_GetEnabledInterrupts(config->base) & mask)
326 && (flags & LPUART_STAT_TDRE_MASK);
327 }
328
mcux_lpuart_irq_rx_enable(const struct device * dev)329 static void mcux_lpuart_irq_rx_enable(const struct device *dev)
330 {
331 const struct mcux_lpuart_config *config = dev->config;
332 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
333
334 LPUART_EnableInterrupts(config->base, mask);
335 }
336
mcux_lpuart_irq_rx_disable(const struct device * dev)337 static void mcux_lpuart_irq_rx_disable(const struct device *dev)
338 {
339 const struct mcux_lpuart_config *config = dev->config;
340 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
341
342 LPUART_DisableInterrupts(config->base, mask);
343 }
344
mcux_lpuart_irq_rx_full(const struct device * dev)345 static int mcux_lpuart_irq_rx_full(const struct device *dev)
346 {
347 const struct mcux_lpuart_config *config = dev->config;
348 uint32_t flags = LPUART_GetStatusFlags(config->base);
349
350 return (flags & kLPUART_RxDataRegFullFlag) != 0U;
351 }
352
mcux_lpuart_irq_rx_pending(const struct device * dev)353 static int mcux_lpuart_irq_rx_pending(const struct device *dev)
354 {
355 const struct mcux_lpuart_config *config = dev->config;
356 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
357
358 return (LPUART_GetEnabledInterrupts(config->base) & mask)
359 && mcux_lpuart_irq_rx_full(dev);
360 }
361
mcux_lpuart_irq_err_enable(const struct device * dev)362 static void mcux_lpuart_irq_err_enable(const struct device *dev)
363 {
364 const struct mcux_lpuart_config *config = dev->config;
365 uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
366 kLPUART_FramingErrorInterruptEnable |
367 kLPUART_ParityErrorInterruptEnable;
368
369 LPUART_EnableInterrupts(config->base, mask);
370 }
371
mcux_lpuart_irq_err_disable(const struct device * dev)372 static void mcux_lpuart_irq_err_disable(const struct device *dev)
373 {
374 const struct mcux_lpuart_config *config = dev->config;
375 uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
376 kLPUART_FramingErrorInterruptEnable |
377 kLPUART_ParityErrorInterruptEnable;
378
379 LPUART_DisableInterrupts(config->base, mask);
380 }
381
mcux_lpuart_irq_is_pending(const struct device * dev)382 static int mcux_lpuart_irq_is_pending(const struct device *dev)
383 {
384 return (mcux_lpuart_irq_tx_ready(dev)
385 || mcux_lpuart_irq_rx_pending(dev));
386 }
387
mcux_lpuart_irq_update(const struct device * dev)388 static int mcux_lpuart_irq_update(const struct device *dev)
389 {
390 return 1;
391 }
392
mcux_lpuart_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)393 static void mcux_lpuart_irq_callback_set(const struct device *dev,
394 uart_irq_callback_user_data_t cb,
395 void *cb_data)
396 {
397 struct mcux_lpuart_data *data = dev->data;
398
399 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
400 if (data->api_type == LPUART_ASYNC) {
401 LOG_ERR("UART irq and async api are exclusive");
402 }
403 #endif
404
405 data->callback = cb;
406 data->cb_data = cb_data;
407
408 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
409 data->async.user_callback = NULL;
410 data->async.user_data = NULL;
411 data->api_type = LPUART_IRQ_DRIVEN;
412 #endif
413 }
414
415 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
416
417
418 #ifdef CONFIG_UART_ASYNC_API
async_timer_start(struct k_work_delayable * work,size_t timeout_us)419 static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us)
420 {
421 if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) {
422 LOG_DBG("async timer started for %d us", timeout_us);
423 k_work_reschedule(work, K_USEC(timeout_us));
424 }
425 }
426
async_user_callback(const struct device * dev,struct uart_event * evt)427 static void async_user_callback(const struct device *dev, struct uart_event *evt)
428 {
429 const struct mcux_lpuart_data *data = dev->data;
430
431 if (data->async.user_callback) {
432 data->async.user_callback(dev, evt, data->async.user_data);
433 }
434 }
435
async_evt_tx_done(struct device * dev)436 static void async_evt_tx_done(struct device *dev)
437 {
438 struct mcux_lpuart_data *data = dev->data;
439
440 (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
441
442 LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len);
443 struct uart_event event = {
444 .type = UART_TX_DONE,
445 .data.tx.buf = data->async.tx_dma_params.buf,
446 .data.tx.len = data->async.tx_dma_params.buf_len
447 };
448
449 /* Reset TX Buffer */
450 data->async.tx_dma_params.buf = NULL;
451 data->async.tx_dma_params.buf_len = 0U;
452
453 async_user_callback(dev, &event);
454 }
455
async_evt_rx_rdy(const struct device * dev)456 static void async_evt_rx_rdy(const struct device *dev)
457 {
458 struct mcux_lpuart_data *data = dev->data;
459 struct mcux_lpuart_rx_dma_params *dma_params = &data->async.rx_dma_params;
460
461 struct uart_event event = {
462 .type = UART_RX_RDY,
463 .data.rx.buf = dma_params->buf,
464 .data.rx.len = dma_params->counter - dma_params->offset,
465 .data.rx.offset = dma_params->offset
466 };
467
468 LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset,
469 (uint32_t)event.data.rx.buf);
470
471 /* Update the current pos for new data */
472 dma_params->offset = dma_params->counter;
473
474 /* Only send event for new data */
475 if (event.data.rx.len > 0) {
476 async_user_callback(dev, &event);
477 }
478 }
479
async_evt_rx_buf_request(const struct device * dev)480 static void async_evt_rx_buf_request(const struct device *dev)
481 {
482 struct uart_event evt = {
483 .type = UART_RX_BUF_REQUEST,
484 };
485
486 async_user_callback(dev, &evt);
487 }
488
async_evt_rx_buf_release(const struct device * dev)489 static void async_evt_rx_buf_release(const struct device *dev)
490 {
491 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
492 struct uart_event evt = {
493 .type = UART_RX_BUF_RELEASED,
494 .data.rx_buf.buf = data->async.rx_dma_params.buf,
495 };
496
497 async_user_callback(dev, &evt);
498 data->async.rx_dma_params.buf = NULL;
499 data->async.rx_dma_params.buf_len = 0U;
500 data->async.rx_dma_params.offset = 0U;
501 data->async.rx_dma_params.counter = 0U;
502 }
503
mcux_lpuart_async_rx_flush(const struct device * dev)504 static void mcux_lpuart_async_rx_flush(const struct device *dev)
505 {
506 struct dma_status status;
507 struct mcux_lpuart_data *data = dev->data;
508 const struct mcux_lpuart_config *config = dev->config;
509
510 const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
511 config->rx_dma_config.dma_channel,
512 &status);
513
514 if (get_status_result == 0) {
515 const size_t rx_rcv_len = data->async.rx_dma_params.buf_len -
516 status.pending_length;
517
518 if (rx_rcv_len > data->async.rx_dma_params.counter) {
519 data->async.rx_dma_params.counter = rx_rcv_len;
520 async_evt_rx_rdy(dev);
521 }
522 } else {
523 LOG_ERR("Error getting DMA status");
524 }
525 }
526
mcux_lpuart_rx_disable(const struct device * dev)527 static int mcux_lpuart_rx_disable(const struct device *dev)
528 {
529 LOG_INF("Disabling UART RX DMA");
530 const struct mcux_lpuart_config *config = dev->config;
531 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
532 LPUART_Type *lpuart = config->base;
533 const unsigned int key = irq_lock();
534
535 LPUART_EnableRx(lpuart, false);
536 (void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work);
537 LPUART_DisableInterrupts(lpuart, kLPUART_IdleLineInterruptEnable);
538 LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag);
539 LPUART_EnableRxDMA(lpuart, false);
540
541 /* No active RX buffer, cannot disable */
542 if (!data->async.rx_dma_params.buf) {
543 LOG_ERR("No buffers to release from RX DMA!");
544 } else {
545 mcux_lpuart_async_rx_flush(dev);
546 async_evt_rx_buf_release(dev);
547 if (data->async.next_rx_buffer != NULL) {
548 data->async.rx_dma_params.buf = data->async.next_rx_buffer;
549 data->async.rx_dma_params.buf_len = data->async.next_rx_buffer_len;
550 data->async.next_rx_buffer = NULL;
551 data->async.next_rx_buffer_len = 0;
552 /* Release the next buffer as well */
553 async_evt_rx_buf_release(dev);
554 }
555 }
556 const int ret = dma_stop(config->rx_dma_config.dma_dev,
557 config->rx_dma_config.dma_channel);
558
559 if (ret != 0) {
560 LOG_ERR("Error stopping rx DMA. Reason: %x", ret);
561 }
562 LOG_DBG("RX: Disabled");
563 struct uart_event disabled_event = {
564 .type = UART_RX_DISABLED
565 };
566
567 async_user_callback(dev, &disabled_event);
568 irq_unlock(key);
569 return ret;
570 }
571
prepare_rx_dma_block_config(const struct device * dev)572 static void prepare_rx_dma_block_config(const struct device *dev)
573 {
574 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
575 const struct mcux_lpuart_config *config = dev->config;
576 LPUART_Type *lpuart = config->base;
577 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
578
579 assert(rx_dma_params->buf != NULL);
580 assert(rx_dma_params->buf_len > 0);
581
582 struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block;
583
584 head_block_config->dest_address = (uint32_t)rx_dma_params->buf;
585 head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart);
586 head_block_config->block_size = rx_dma_params->buf_len;
587 head_block_config->dest_scatter_en = true;
588 }
589
configure_and_start_rx_dma(const struct mcux_lpuart_config * config,struct mcux_lpuart_data * data,LPUART_Type * lpuart)590 static int configure_and_start_rx_dma(
591 const struct mcux_lpuart_config *config, struct mcux_lpuart_data *data,
592 LPUART_Type *lpuart)
593 {
594 LOG_DBG("Configuring and Starting UART RX DMA");
595 int ret = dma_config(config->rx_dma_config.dma_dev,
596 config->rx_dma_config.dma_channel,
597 (struct dma_config *)&config->rx_dma_config.dma_cfg);
598
599 if (ret != 0) {
600 LOG_ERR("Failed to Configure RX DMA: err: %d", ret);
601 return ret;
602 }
603 ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel);
604 if (ret < 0) {
605 LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)",
606 config->rx_dma_config.dma_channel,
607 ret);
608 }
609 LPUART_EnableRxDMA(lpuart, true);
610 return ret;
611 }
612
uart_mcux_lpuart_dma_replace_rx_buffer(const struct device * dev)613 static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev)
614 {
615 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
616 const struct mcux_lpuart_config *config = dev->config;
617 LPUART_Type *lpuart = config->base;
618
619 LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len);
620 /* There must be a buffer to replace this one with */
621 assert(data->async.next_rx_buffer != NULL);
622 assert(data->async.next_rx_buffer_len != 0U);
623 const int success = dma_reload(config->rx_dma_config.dma_dev,
624 config->rx_dma_config.dma_channel,
625 LPUART_GetDataRegisterAddress(lpuart),
626 (uint32_t)data->async.next_rx_buffer,
627 data->async.next_rx_buffer_len);
628
629 if (success != 0) {
630 LOG_ERR("Error %d reloading DMA with next RX buffer", success);
631 }
632 return success;
633 }
634
dma_callback(const struct device * dma_dev,void * callback_arg,uint32_t channel,int dma_status)635 static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel,
636 int dma_status)
637 {
638 struct device *dev = (struct device *)callback_arg;
639 const struct mcux_lpuart_config *config = dev->config;
640 LPUART_Type *lpuart = config->base;
641 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
642
643 LOG_DBG("DMA call back on channel %d", channel);
644 struct dma_status status;
645 const int get_status_result = dma_get_status(dma_dev, channel, &status);
646
647 if (get_status_result < 0) {
648 LOG_ERR("error on status get: %d", get_status_result);
649 } else {
650 LOG_DBG("DMA Status: b: %d dir: %d len_remain: %d", status.busy, status.dir,
651 status.pending_length);
652 }
653
654 if (dma_status < 0) {
655 LOG_ERR("Got error : %d", dma_status);
656 }
657
658
659 if (channel == config->tx_dma_config.dma_channel) {
660 LOG_DBG("TX Channel");
661 LPUART_EnableTxDMA(lpuart, false);
662 async_evt_tx_done(dev);
663 } else if (channel == config->rx_dma_config.dma_channel) {
664 LOG_DBG("RX Channel");
665 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
666
667 /* The RX Event indicates DMA transfer is complete and full buffer is available. */
668 rx_dma_params->counter = rx_dma_params->buf_len;
669
670 LOG_DBG("Current Buf (%x) full, swapping to new buf: %x",
671 (uint32_t)rx_dma_params->buf,
672 (uint32_t)data->async.next_rx_buffer);
673 async_evt_rx_rdy(dev);
674 async_evt_rx_buf_release(dev);
675
676 rx_dma_params->buf = data->async.next_rx_buffer;
677 rx_dma_params->buf_len = data->async.next_rx_buffer_len;
678 data->async.next_rx_buffer = NULL;
679 data->async.next_rx_buffer_len = 0U;
680
681 /* A new buffer was available (and already loaded into the DMA engine) */
682 if (rx_dma_params->buf != NULL &&
683 rx_dma_params->buf_len > 0) {
684 /* Request the next buffer */
685 async_evt_rx_buf_request(dev);
686 } else {
687 /* Buffer full without valid next buffer, disable RX DMA */
688 LOG_INF("Disabled RX DMA, no valid next buffer ");
689 mcux_lpuart_rx_disable(dev);
690 }
691 } else {
692 LOG_ERR("Got unexpected DMA Channel: %d", channel);
693 }
694 }
695
696 static int mcux_lpuart_configure_async(const struct device *dev);
697
mcux_lpuart_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)698 static int mcux_lpuart_callback_set(const struct device *dev, uart_callback_t callback,
699 void *user_data)
700 {
701 struct mcux_lpuart_data *data = dev->data;
702
703 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
704 if (data->api_type == LPUART_IRQ_DRIVEN) {
705 LOG_ERR("UART irq and async api are exclusive");
706 return -ENOTSUP;
707 }
708 #endif
709
710 data->async.user_callback = callback;
711 data->async.user_data = user_data;
712
713 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
714 data->callback = NULL;
715 data->cb_data = NULL;
716 data->api_type = LPUART_ASYNC;
717 #endif
718
719 return mcux_lpuart_configure_async(dev);
720 }
721
mcux_lpuart_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout_us)722 static int mcux_lpuart_tx(const struct device *dev, const uint8_t *buf, size_t len,
723 int32_t timeout_us)
724 {
725 struct mcux_lpuart_data *data = dev->data;
726 const struct mcux_lpuart_config *config = dev->config;
727 LPUART_Type *lpuart = config->base;
728
729 unsigned int key = irq_lock();
730
731 /* Check for an ongiong transfer and abort if it is pending */
732 struct dma_status status;
733 const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
734 config->tx_dma_config.dma_channel,
735 &status);
736
737 if (get_status_result < 0 || status.busy) {
738 irq_unlock(key);
739 LOG_ERR("Unable to submit UART DMA Transfer.");
740 return get_status_result < 0 ? get_status_result : -EBUSY;
741 }
742
743 int ret;
744
745 LPUART_EnableTxDMA(lpuart, false);
746
747 data->async.tx_dma_params.buf = buf;
748 data->async.tx_dma_params.buf_len = len;
749 data->async.tx_dma_params.active_dma_block.source_address = (uint32_t)buf;
750 data->async.tx_dma_params.active_dma_block.dest_address =
751 LPUART_GetDataRegisterAddress(lpuart);
752 data->async.tx_dma_params.active_dma_block.block_size = len;
753 data->async.tx_dma_params.active_dma_block.next_block = NULL;
754
755 ret = dma_config(config->tx_dma_config.dma_dev,
756 config->tx_dma_config.dma_channel,
757 (struct dma_config *)&config->tx_dma_config.dma_cfg);
758
759 if (ret == 0) {
760 LOG_DBG("Starting UART DMA TX Ch %u", config->tx_dma_config.dma_channel);
761
762 ret = dma_start(config->tx_dma_config.dma_dev,
763 config->tx_dma_config.dma_channel);
764 LPUART_EnableTxDMA(lpuart, true);
765 if (ret != 0) {
766 LOG_ERR("Failed to start DMA(Tx) Ch %d",
767 config->tx_dma_config.dma_channel);
768 }
769 async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us);
770 } else {
771 LOG_ERR("Error configuring UART DMA: %x", ret);
772 }
773 irq_unlock(key);
774 return ret;
775 }
776
mcux_lpuart_tx_abort(const struct device * dev)777 static int mcux_lpuart_tx_abort(const struct device *dev)
778 {
779 struct mcux_lpuart_data *data = dev->data;
780 const struct mcux_lpuart_config *config = dev->config;
781 LPUART_Type *lpuart = config->base;
782
783 LPUART_EnableTxDMA(lpuart, false);
784 (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
785 struct dma_status status;
786 const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
787 config->tx_dma_config.dma_channel,
788 &status);
789
790 if (get_status_result < 0) {
791 LOG_ERR("Error querying TX DMA Status during abort.");
792 }
793
794 const size_t bytes_transmitted = (get_status_result == 0) ?
795 data->async.tx_dma_params.buf_len - status.pending_length : 0;
796
797 const int ret = dma_stop(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel);
798
799 if (ret == 0) {
800 struct uart_event tx_aborted_event = {
801 .type = UART_TX_ABORTED,
802 .data.tx.buf = data->async.tx_dma_params.buf,
803 .data.tx.len = bytes_transmitted
804 };
805 async_user_callback(dev, &tx_aborted_event);
806 }
807 return ret;
808 }
809
mcux_lpuart_rx_enable(const struct device * dev,uint8_t * buf,const size_t len,const int32_t timeout_us)810 static int mcux_lpuart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len,
811 const int32_t timeout_us)
812 {
813 LOG_DBG("Enabling UART RX DMA");
814 struct mcux_lpuart_data *data = dev->data;
815 const struct mcux_lpuart_config *config = dev->config;
816 LPUART_Type *lpuart = config->base;
817
818 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
819
820 unsigned int key = irq_lock();
821 struct dma_status status;
822 const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
823 config->rx_dma_config.dma_channel,
824 &status);
825
826 if (get_status_result < 0 || status.busy) {
827 LOG_ERR("Unable to start receive on UART.");
828 irq_unlock(key);
829 return get_status_result < 0 ? get_status_result : -EBUSY;
830 }
831
832 rx_dma_params->timeout_us = timeout_us;
833 rx_dma_params->buf = buf;
834 rx_dma_params->buf_len = len;
835
836 LPUART_EnableInterrupts(config->base, kLPUART_IdleLineInterruptEnable);
837 prepare_rx_dma_block_config(dev);
838 const int ret = configure_and_start_rx_dma(config, data, lpuart);
839
840 /* Request the next buffer for when this buffer is full for continuous reception */
841 async_evt_rx_buf_request(dev);
842
843 /* Clear these status flags as they can prevent the UART device from receiving data */
844 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag |
845 kLPUART_ParityErrorFlag |
846 kLPUART_FramingErrorFlag |
847 kLPUART_NoiseErrorFlag);
848 LPUART_EnableRx(lpuart, true);
849 irq_unlock(key);
850 return ret;
851 }
852
mcux_lpuart_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)853 static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
854 {
855 struct mcux_lpuart_data *data = dev->data;
856
857 assert(data->async.next_rx_buffer == NULL);
858 assert(data->async.next_rx_buffer_len == 0);
859 data->async.next_rx_buffer = buf;
860 data->async.next_rx_buffer_len = len;
861 uart_mcux_lpuart_dma_replace_rx_buffer(dev);
862
863 return 0;
864 }
865
mcux_lpuart_async_rx_timeout(struct k_work * work)866 static void mcux_lpuart_async_rx_timeout(struct k_work *work)
867 {
868 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
869 struct mcux_lpuart_rx_dma_params *rx_params = CONTAINER_OF(dwork,
870 struct mcux_lpuart_rx_dma_params,
871 timeout_work);
872 struct mcux_lpuart_async_data *async_data = CONTAINER_OF(rx_params,
873 struct mcux_lpuart_async_data,
874 rx_dma_params);
875 const struct device *dev = async_data->uart_dev;
876
877 LOG_DBG("RX timeout");
878 mcux_lpuart_async_rx_flush(dev);
879 }
880
mcux_lpuart_async_tx_timeout(struct k_work * work)881 static void mcux_lpuart_async_tx_timeout(struct k_work *work)
882 {
883 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
884 struct mcux_lpuart_tx_dma_params *tx_params = CONTAINER_OF(dwork,
885 struct mcux_lpuart_tx_dma_params,
886 timeout_work);
887 struct mcux_lpuart_async_data *async_data = CONTAINER_OF(tx_params,
888 struct mcux_lpuart_async_data,
889 tx_dma_params);
890 const struct device *dev = async_data->uart_dev;
891
892 LOG_DBG("TX timeout");
893 (void)mcux_lpuart_tx_abort(dev);
894 }
895
896 #endif /* CONFIG_UART_ASYNC_API */
897
898 #if CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
899
900 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_irq_driven_isr(const struct device * dev,struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)901 static inline void mcux_lpuart_irq_driven_isr(const struct device *dev,
902 struct mcux_lpuart_data *data,
903 const struct mcux_lpuart_config *config,
904 const uint32_t status) {
905 if (data->callback) {
906 data->callback(dev, data->cb_data);
907 }
908
909 if (status & kLPUART_RxOverrunFlag) {
910 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
911 }
912 }
913 #endif
914
915 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_async_isr(struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)916 static inline void mcux_lpuart_async_isr(struct mcux_lpuart_data *data,
917 const struct mcux_lpuart_config *config,
918 const uint32_t status) {
919 if (status & kLPUART_IdleLineFlag) {
920 async_timer_start(&data->async.rx_dma_params.timeout_work,
921 data->async.rx_dma_params.timeout_us);
922 LPUART_ClearStatusFlags(config->base, kLPUART_IdleLineFlag);
923 }
924 }
925 #endif
926
mcux_lpuart_isr(const struct device * dev)927 static void mcux_lpuart_isr(const struct device *dev)
928 {
929 struct mcux_lpuart_data *data = dev->data;
930 const struct mcux_lpuart_config *config = dev->config;
931 const uint32_t status = LPUART_GetStatusFlags(config->base);
932
933 #if CONFIG_PM
934 if (status & kLPUART_TransmissionCompleteFlag) {
935
936 if (data->tx_poll_stream_on) {
937 /* Poll transmission complete. Allow system to sleep */
938 LPUART_DisableInterrupts(config->base,
939 kLPUART_TransmissionCompleteInterruptEnable);
940 data->tx_poll_stream_on = false;
941 mcux_lpuart_pm_policy_state_lock_put(dev);
942 }
943 }
944 #endif /* CONFIG_PM */
945
946 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
947 if (data->api_type == LPUART_IRQ_DRIVEN) {
948 mcux_lpuart_irq_driven_isr(dev, data, config, status);
949 } else if (data->api_type == LPUART_ASYNC) {
950 mcux_lpuart_async_isr(data, config, status);
951 }
952 #elif defined(CONFIG_UART_INTERRUPT_DRIVEN)
953 mcux_lpuart_irq_driven_isr(dev, data, config, status);
954 #elif defined(CONFIG_UART_ASYNC_API)
955 mcux_lpuart_async_isr(data, config, status);
956 #endif /* API */
957 }
958 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
959
mcux_lpuart_configure_basic(const struct device * dev,const struct uart_config * cfg,lpuart_config_t * uart_config)960 static int mcux_lpuart_configure_basic(const struct device *dev, const struct uart_config *cfg,
961 lpuart_config_t *uart_config)
962 {
963 /* Translate UART API enum to LPUART enum from HAL */
964 switch (cfg->parity) {
965 case UART_CFG_PARITY_NONE:
966 uart_config->parityMode = kLPUART_ParityDisabled;
967 break;
968 case UART_CFG_PARITY_ODD:
969 uart_config->parityMode = kLPUART_ParityOdd;
970 break;
971 case UART_CFG_PARITY_EVEN:
972 uart_config->parityMode = kLPUART_ParityEven;
973 break;
974 default:
975 return -ENOTSUP;
976 }
977
978 switch (cfg->data_bits) {
979 #if defined(FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT) && \
980 FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT
981 case UART_CFG_DATA_BITS_7:
982 uart_config->dataBitsCount = kLPUART_SevenDataBits;
983 break;
984 #endif
985 case UART_CFG_DATA_BITS_8:
986 uart_config->dataBitsCount = kLPUART_EightDataBits;
987 break;
988 default:
989 return -ENOTSUP;
990 }
991
992 #if defined(FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT) && \
993 FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT
994 switch (cfg->stop_bits) {
995 case UART_CFG_STOP_BITS_1:
996 uart_config->stopBitCount = kLPUART_OneStopBit;
997 break;
998 case UART_CFG_STOP_BITS_2:
999 uart_config->stopBitCount = kLPUART_TwoStopBit;
1000 break;
1001 default:
1002 return -ENOTSUP;
1003 }
1004 #endif
1005
1006 #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
1007 FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT
1008 switch (cfg->flow_ctrl) {
1009 case UART_CFG_FLOW_CTRL_NONE:
1010 case UART_CFG_FLOW_CTRL_RS485:
1011 uart_config->enableTxCTS = false;
1012 uart_config->enableRxRTS = false;
1013 break;
1014 case UART_CFG_FLOW_CTRL_RTS_CTS:
1015 uart_config->enableTxCTS = true;
1016 uart_config->enableRxRTS = true;
1017 break;
1018 default:
1019 return -ENOTSUP;
1020 }
1021 #endif
1022
1023 uart_config->baudRate_Bps = cfg->baudrate;
1024 uart_config->enableRx = true;
1025 /* Tx will be enabled manually after set tx-rts */
1026 uart_config->enableTx = false;
1027
1028 return 0;
1029 }
1030
1031 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_configure_async(const struct device * dev)1032 static int mcux_lpuart_configure_async(const struct device *dev)
1033 {
1034 const struct mcux_lpuart_config *config = dev->config;
1035 struct mcux_lpuart_data *data = dev->data;
1036 lpuart_config_t uart_config;
1037 int ret;
1038
1039 LPUART_GetDefaultConfig(&uart_config);
1040
1041 ret = mcux_lpuart_configure_basic(dev, &data->uart_config, &uart_config);
1042 if (ret) {
1043 return ret;
1044 }
1045
1046 uart_config.rxIdleType = kLPUART_IdleTypeStopBit;
1047 uart_config.rxIdleConfig = kLPUART_IdleCharacter1;
1048 data->async.next_rx_buffer = NULL;
1049 data->async.next_rx_buffer_len = 0;
1050 data->async.uart_dev = dev;
1051 k_work_init_delayable(&data->async.rx_dma_params.timeout_work,
1052 mcux_lpuart_async_rx_timeout);
1053 k_work_init_delayable(&data->async.tx_dma_params.timeout_work,
1054 mcux_lpuart_async_tx_timeout);
1055
1056 /* Disable the UART Receiver until the async API provides a buffer to
1057 * receive into with rx_enable
1058 */
1059 uart_config.enableRx = false;
1060 /* Clearing the fifo of any junk received before the async rx enable was called */
1061 while (LPUART_GetRxFifoCount(config->base) > 0) {
1062 LPUART_ReadByte(config->base);
1063 }
1064
1065 return 0;
1066 }
1067 #endif
1068
mcux_lpuart_configure_init(const struct device * dev,const struct uart_config * cfg)1069 static int mcux_lpuart_configure_init(const struct device *dev, const struct uart_config *cfg)
1070 {
1071 const struct mcux_lpuart_config *config = dev->config;
1072 struct mcux_lpuart_data *data = dev->data;
1073 lpuart_config_t uart_config;
1074 uint32_t clock_freq;
1075 int ret;
1076
1077 if (!device_is_ready(config->clock_dev)) {
1078 return -ENODEV;
1079 }
1080
1081 if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
1082 &clock_freq)) {
1083 return -EINVAL;
1084 }
1085
1086 LPUART_GetDefaultConfig(&uart_config);
1087
1088 ret = mcux_lpuart_configure_basic(dev, cfg, &uart_config);
1089 if (ret) {
1090 return ret;
1091 }
1092
1093 LPUART_Init(config->base, &uart_config, clock_freq);
1094
1095 if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485) {
1096 /* Set the LPUART into RS485 mode (tx driver enable using RTS) */
1097 config->base->MODIR |= LPUART_MODIR_TXRTSE(true);
1098 if (!config->rs485_de_active_low) {
1099 config->base->MODIR |= LPUART_MODIR_TXRTSPOL(1);
1100 }
1101 }
1102 /* Now can enable tx */
1103 config->base->CTRL |= LPUART_CTRL_TE(true);
1104
1105
1106 if (config->loopback_en) {
1107 /* Set the LPUART into loopback mode */
1108 config->base->CTRL |= LPUART_CTRL_LOOPS_MASK;
1109 config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1110 } else if (config->single_wire) {
1111 /* Enable the single wire / half-duplex mode, only possible when
1112 * loopback is disabled. We need a critical section to prevent
1113 * the UART firing an interrupt during mode switch
1114 */
1115 unsigned int key = irq_lock();
1116
1117 config->base->CTRL |= (LPUART_CTRL_LOOPS_MASK | LPUART_CTRL_RSRC_MASK);
1118 irq_unlock(key);
1119 } else {
1120 #ifdef LPUART_CTRL_TXINV
1121 /* Only invert TX in full-duplex mode */
1122 if (config->tx_invert) {
1123 config->base->CTRL |= LPUART_CTRL_TXINV(1);
1124 }
1125 #endif
1126 }
1127
1128 #ifdef LPUART_STAT_RXINV
1129 if (config->rx_invert) {
1130 config->base->STAT |= LPUART_STAT_RXINV(1);
1131 }
1132 #endif
1133
1134 /* update internal uart_config */
1135 data->uart_config = *cfg;
1136
1137 return 0;
1138 }
1139
1140 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
mcux_lpuart_config_get(const struct device * dev,struct uart_config * cfg)1141 static int mcux_lpuart_config_get(const struct device *dev, struct uart_config *cfg)
1142 {
1143 struct mcux_lpuart_data *data = dev->data;
1144 *cfg = data->uart_config;
1145 return 0;
1146 }
1147
mcux_lpuart_configure(const struct device * dev,const struct uart_config * cfg)1148 static int mcux_lpuart_configure(const struct device *dev,
1149 const struct uart_config *cfg)
1150 {
1151 const struct mcux_lpuart_config *config = dev->config;
1152
1153 /* Make sure that RSRC is de-asserted otherwise deinit will hang. */
1154 config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1155
1156 /* disable LPUART */
1157 LPUART_Deinit(config->base);
1158
1159 int ret = mcux_lpuart_configure_init(dev, cfg);
1160 if (ret) {
1161 return ret;
1162 }
1163
1164 /* wait for hardware init */
1165 k_sleep(K_MSEC(1));
1166
1167 return 0;
1168 }
1169 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1170
mcux_lpuart_init(const struct device * dev)1171 static int mcux_lpuart_init(const struct device *dev)
1172 {
1173 const struct mcux_lpuart_config *config = dev->config;
1174 struct mcux_lpuart_data *data = dev->data;
1175 struct uart_config *uart_api_config = &data->uart_config;
1176 int err;
1177
1178 uart_api_config->baudrate = config->baud_rate;
1179 uart_api_config->parity = config->parity;
1180 uart_api_config->stop_bits = UART_CFG_STOP_BITS_1;
1181 uart_api_config->data_bits = UART_CFG_DATA_BITS_8;
1182 uart_api_config->flow_ctrl = config->flow_ctrl;
1183
1184 /* set initial configuration */
1185 mcux_lpuart_configure_init(dev, uart_api_config);
1186 if (config->flow_ctrl) {
1187 const struct pinctrl_state *state;
1188
1189 err = pinctrl_lookup_state(config->pincfg, PINCTRL_STATE_FLOWCONTROL, &state);
1190 if (err < 0) {
1191 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1192 }
1193 } else {
1194 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1195 }
1196 if (err < 0) {
1197 return err;
1198 }
1199
1200 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1201 #if CONFIG_NXP_LP_FLEXCOMM
1202 /* When using LP Flexcomm driver, register the interrupt handler
1203 * so we receive notification from the LP Flexcomm interrupt handler.
1204 */
1205 nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev,
1206 LP_FLEXCOMM_PERIPH_LPUART, mcux_lpuart_isr);
1207 #else
1208 /* Interrupt is managed by this driver */
1209 config->irq_config_func(dev);
1210 #endif
1211 #ifdef CONFIG_UART_EXCLUSIVE_API_CALLBACKS
1212 data->api_type = LPUART_NONE;
1213 #endif
1214 #endif
1215
1216 #ifdef CONFIG_PM
1217 data->pm_state_lock_on = false;
1218 data->tx_poll_stream_on = false;
1219 data->tx_int_stream_on = false;
1220 #endif
1221
1222 return 0;
1223 }
1224
1225 static const struct uart_driver_api mcux_lpuart_driver_api = {
1226 .poll_in = mcux_lpuart_poll_in,
1227 .poll_out = mcux_lpuart_poll_out,
1228 .err_check = mcux_lpuart_err_check,
1229 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1230 .configure = mcux_lpuart_configure,
1231 .config_get = mcux_lpuart_config_get,
1232 #endif
1233 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
1234 .fifo_fill = mcux_lpuart_fifo_fill,
1235 .fifo_read = mcux_lpuart_fifo_read,
1236 .irq_tx_enable = mcux_lpuart_irq_tx_enable,
1237 .irq_tx_disable = mcux_lpuart_irq_tx_disable,
1238 .irq_tx_complete = mcux_lpuart_irq_tx_complete,
1239 .irq_tx_ready = mcux_lpuart_irq_tx_ready,
1240 .irq_rx_enable = mcux_lpuart_irq_rx_enable,
1241 .irq_rx_disable = mcux_lpuart_irq_rx_disable,
1242 .irq_rx_ready = mcux_lpuart_irq_rx_full,
1243 .irq_err_enable = mcux_lpuart_irq_err_enable,
1244 .irq_err_disable = mcux_lpuart_irq_err_disable,
1245 .irq_is_pending = mcux_lpuart_irq_is_pending,
1246 .irq_update = mcux_lpuart_irq_update,
1247 .irq_callback_set = mcux_lpuart_irq_callback_set,
1248 #endif
1249 #ifdef CONFIG_UART_ASYNC_API
1250 .callback_set = mcux_lpuart_callback_set,
1251 .tx = mcux_lpuart_tx,
1252 .tx_abort = mcux_lpuart_tx_abort,
1253 .rx_enable = mcux_lpuart_rx_enable,
1254 .rx_buf_rsp = mcux_lpuart_rx_buf_rsp,
1255 .rx_disable = mcux_lpuart_rx_disable,
1256 #endif /* CONFIG_UART_ASYNC_API */
1257 };
1258
1259
1260 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1261 #define MCUX_LPUART_IRQ_INSTALL(n, i) \
1262 do { \
1263 IRQ_CONNECT(DT_INST_IRQN_BY_IDX(n, i), \
1264 DT_INST_IRQ_BY_IDX(n, i, priority), \
1265 mcux_lpuart_isr, DEVICE_DT_INST_GET(n), 0); \
1266 \
1267 irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \
1268 } while (false)
1269 #define MCUX_LPUART_IRQ_INIT(n) .irq_config_func = mcux_lpuart_config_func_##n,
1270 #define MCUX_LPUART_IRQ_DEFINE(n) \
1271 static void mcux_lpuart_config_func_##n(const struct device *dev) \
1272 { \
1273 IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \
1274 (MCUX_LPUART_IRQ_INSTALL(n, 0);)) \
1275 \
1276 IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1), \
1277 (MCUX_LPUART_IRQ_INSTALL(n, 1);)) \
1278 }
1279 #else
1280 #define MCUX_LPUART_IRQ_INIT(n)
1281 #define MCUX_LPUART_IRQ_DEFINE(n)
1282 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
1283
1284 #ifdef CONFIG_UART_ASYNC_API
1285 #define TX_DMA_CONFIG(id) \
1286 .tx_dma_config = { \
1287 .dma_dev = \
1288 DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
1289 .dma_channel = \
1290 DT_INST_DMAS_CELL_BY_NAME(id, tx, mux), \
1291 .dma_cfg = { \
1292 .source_burst_length = 1, \
1293 .dest_burst_length = 1, \
1294 .source_data_size = 1, \
1295 .dest_data_size = 1, \
1296 .complete_callback_en = 1, \
1297 .error_callback_dis = 0, \
1298 .block_count = 1, \
1299 .head_block = \
1300 &mcux_lpuart_##id##_data.async.tx_dma_params.active_dma_block, \
1301 .channel_direction = MEMORY_TO_PERIPHERAL, \
1302 .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
1303 id, tx, source), \
1304 .dma_callback = dma_callback, \
1305 .user_data = (void *)DEVICE_DT_INST_GET(id) \
1306 }, \
1307 },
1308 #define RX_DMA_CONFIG(id) \
1309 .rx_dma_config = { \
1310 .dma_dev = \
1311 DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
1312 .dma_channel = \
1313 DT_INST_DMAS_CELL_BY_NAME(id, rx, mux), \
1314 .dma_cfg = { \
1315 .source_burst_length = 1, \
1316 .dest_burst_length = 1, \
1317 .source_data_size = 1, \
1318 .dest_data_size = 1, \
1319 .complete_callback_en = 1, \
1320 .error_callback_dis = 0, \
1321 .block_count = 1, \
1322 .head_block = \
1323 &mcux_lpuart_##id##_data.async.rx_dma_params.active_dma_block, \
1324 .channel_direction = PERIPHERAL_TO_MEMORY, \
1325 .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
1326 id, rx, source), \
1327 .dma_callback = dma_callback, \
1328 .user_data = (void *)DEVICE_DT_INST_GET(id) \
1329 }, \
1330 },
1331 #else
1332 #define RX_DMA_CONFIG(n)
1333 #define TX_DMA_CONFIG(n)
1334 #endif /* CONFIG_UART_ASYNC_API */
1335
1336 #define FLOW_CONTROL(n) \
1337 DT_INST_PROP(n, hw_flow_control) \
1338 ? UART_CFG_FLOW_CTRL_RTS_CTS \
1339 : DT_INST_PROP(n, nxp_rs485_mode)\
1340 ? UART_CFG_FLOW_CTRL_RS485 \
1341 : UART_CFG_FLOW_CTRL_NONE
1342 #ifdef CONFIG_NXP_LP_FLEXCOMM
1343 #define PARENT_DEV(n) \
1344 .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)),
1345 #else
1346 #define PARENT_DEV(n)
1347 #endif /* CONFIG_NXP_LP_FLEXCOMM */
1348
1349 #define LPUART_MCUX_DECLARE_CFG(n) \
1350 static const struct mcux_lpuart_config mcux_lpuart_##n##_config = { \
1351 .base = (LPUART_Type *) DT_INST_REG_ADDR(n), \
1352 PARENT_DEV(n) \
1353 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
1354 .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
1355 .baud_rate = DT_INST_PROP(n, current_speed), \
1356 .flow_ctrl = FLOW_CONTROL(n), \
1357 .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \
1358 .rs485_de_active_low = DT_INST_PROP(n, nxp_rs485_de_active_low), \
1359 .loopback_en = DT_INST_PROP(n, nxp_loopback), \
1360 .single_wire = DT_INST_PROP(n, single_wire), \
1361 .rx_invert = DT_INST_PROP(n, rx_invert), \
1362 .tx_invert = DT_INST_PROP(n, tx_invert), \
1363 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1364 MCUX_LPUART_IRQ_INIT(n) \
1365 RX_DMA_CONFIG(n) \
1366 TX_DMA_CONFIG(n) \
1367 };
1368
1369 #define LPUART_MCUX_INIT(n) \
1370 \
1371 static struct mcux_lpuart_data mcux_lpuart_##n##_data; \
1372 \
1373 PINCTRL_DT_INST_DEFINE(n); \
1374 MCUX_LPUART_IRQ_DEFINE(n) \
1375 \
1376 LPUART_MCUX_DECLARE_CFG(n) \
1377 \
1378 DEVICE_DT_INST_DEFINE(n, \
1379 mcux_lpuart_init, \
1380 NULL, \
1381 &mcux_lpuart_##n##_data, \
1382 &mcux_lpuart_##n##_config, \
1383 PRE_KERNEL_1, \
1384 CONFIG_SERIAL_INIT_PRIORITY, \
1385 &mcux_lpuart_driver_api); \
1386
1387 DT_INST_FOREACH_STATUS_OKAY(LPUART_MCUX_INIT)
1388