1 /*
2 * Copyright 2017,2021,2023-2024 NXP
3 * Copyright (c) 2020 Softube
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT nxp_lpuart
9
10 #include <errno.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/pm/policy.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #ifdef CONFIG_UART_ASYNC_API
19 #include <zephyr/drivers/dma.h>
20 #endif
21 #include <zephyr/logging/log.h>
22
23 #include <fsl_lpuart.h>
24 #if CONFIG_NXP_LP_FLEXCOMM
25 #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
26 #endif
27
28 LOG_MODULE_REGISTER(uart_mcux_lpuart, LOG_LEVEL_ERR);
29
30 #define PINCTRL_STATE_FLOWCONTROL PINCTRL_STATE_PRIV_START
31
32 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
33 /* there are already going to be build errors, but at least this message will
34 * be the first error from this driver making the reason clear
35 */
36 BUILD_ASSERT(IS_ENABLED(CONFIG_UART_EXCLUSIVE_API_CALLBACKS), ""
37 "LPUART must use exclusive api callbacks");
38 #endif
39
40 #ifdef CONFIG_UART_ASYNC_API
41 struct lpuart_dma_config {
42 const struct device *dma_dev;
43 const uint32_t dma_channel;
44 struct dma_config dma_cfg;
45 };
46 #endif /* CONFIG_UART_ASYNC_API */
47
48 struct mcux_lpuart_config {
49 LPUART_Type *base;
50 #ifdef CONFIG_NXP_LP_FLEXCOMM
51 const struct device *parent_dev;
52 #endif
53 const struct device *clock_dev;
54 const struct pinctrl_dev_config *pincfg;
55 clock_control_subsys_t clock_subsys;
56 uint32_t baud_rate;
57 uint8_t flow_ctrl;
58 uint8_t parity;
59 bool rs485_de_active_low;
60 bool loopback_en;
61 bool single_wire;
62 bool tx_invert;
63 bool rx_invert;
64 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
65 void (*irq_config_func)(const struct device *dev);
66 #endif
67 #ifdef CONFIG_UART_ASYNC_API
68 const struct lpuart_dma_config rx_dma_config;
69 const struct lpuart_dma_config tx_dma_config;
70 #endif /* CONFIG_UART_ASYNC_API */
71 };
72
73 #ifdef CONFIG_UART_ASYNC_API
74 struct mcux_lpuart_rx_dma_params {
75 struct dma_block_config active_dma_block;
76 uint8_t *buf;
77 size_t buf_len;
78 size_t offset;
79 size_t counter;
80 struct k_work_delayable timeout_work;
81 size_t timeout_us;
82 };
83
84 struct mcux_lpuart_tx_dma_params {
85 struct dma_block_config active_dma_block;
86 const uint8_t *buf;
87 size_t buf_len;
88 struct k_work_delayable timeout_work;
89 size_t timeout_us;
90 };
91
92 struct mcux_lpuart_async_data {
93 const struct device *uart_dev;
94 struct mcux_lpuart_tx_dma_params tx_dma_params;
95 struct mcux_lpuart_rx_dma_params rx_dma_params;
96 uint8_t *next_rx_buffer;
97 size_t next_rx_buffer_len;
98 uart_callback_t user_callback;
99 void *user_data;
100 };
101 #endif
102
103 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
104 enum mcux_lpuart_api {
105 LPUART_NONE,
106 LPUART_IRQ_DRIVEN,
107 LPUART_ASYNC
108 };
109 #endif
110
111 struct mcux_lpuart_data {
112 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
113 uart_irq_callback_user_data_t callback;
114 void *cb_data;
115 #endif
116 #ifdef CONFIG_PM
117 bool pm_state_lock_on;
118 bool tx_poll_stream_on;
119 bool tx_int_stream_on;
120 #endif /* CONFIG_PM */
121 #ifdef CONFIG_UART_ASYNC_API
122 struct mcux_lpuart_async_data async;
123 #endif
124 struct uart_config uart_config;
125 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
126 enum mcux_lpuart_api api_type;
127 #endif
128 };
129
130 #ifdef CONFIG_PM
mcux_lpuart_pm_policy_state_lock_get(const struct device * dev)131 static void mcux_lpuart_pm_policy_state_lock_get(const struct device *dev)
132 {
133 struct mcux_lpuart_data *data = dev->data;
134
135 if (!data->pm_state_lock_on) {
136 data->pm_state_lock_on = true;
137 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
138 }
139 }
140
mcux_lpuart_pm_policy_state_lock_put(const struct device * dev)141 static void mcux_lpuart_pm_policy_state_lock_put(const struct device *dev)
142 {
143 struct mcux_lpuart_data *data = dev->data;
144
145 if (data->pm_state_lock_on) {
146 data->pm_state_lock_on = false;
147 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
148 }
149 }
150 #endif /* CONFIG_PM */
151
mcux_lpuart_poll_in(const struct device * dev,unsigned char * c)152 static int mcux_lpuart_poll_in(const struct device *dev, unsigned char *c)
153 {
154 const struct mcux_lpuart_config *config = dev->config;
155 uint32_t flags = LPUART_GetStatusFlags(config->base);
156 int ret = -1;
157
158 if (flags & kLPUART_RxDataRegFullFlag) {
159 *c = LPUART_ReadByte(config->base);
160 ret = 0;
161 }
162
163 return ret;
164 }
165
mcux_lpuart_poll_out(const struct device * dev,unsigned char c)166 static void mcux_lpuart_poll_out(const struct device *dev, unsigned char c)
167 {
168 const struct mcux_lpuart_config *config = dev->config;
169 unsigned int key;
170 #ifdef CONFIG_PM
171 struct mcux_lpuart_data *data = dev->data;
172 #endif
173
174 while (!(LPUART_GetStatusFlags(config->base)
175 & LPUART_STAT_TDRE_MASK)) {
176 }
177 /* Lock interrupts while we send data */
178 key = irq_lock();
179 #ifdef CONFIG_PM
180 /*
181 * We must keep the part from entering lower power mode until the
182 * transmission completes. Set the power constraint, and enable
183 * the transmission complete interrupt so we know when transmission is
184 * completed.
185 */
186 if (!data->tx_poll_stream_on && !data->tx_int_stream_on) {
187 data->tx_poll_stream_on = true;
188 mcux_lpuart_pm_policy_state_lock_get(dev);
189 /* Enable TC interrupt */
190 LPUART_EnableInterrupts(config->base,
191 kLPUART_TransmissionCompleteInterruptEnable);
192
193 }
194 #endif /* CONFIG_PM */
195
196 LPUART_WriteByte(config->base, c);
197 irq_unlock(key);
198 }
199
mcux_lpuart_err_check(const struct device * dev)200 static int mcux_lpuart_err_check(const struct device *dev)
201 {
202 const struct mcux_lpuart_config *config = dev->config;
203 uint32_t flags = LPUART_GetStatusFlags(config->base);
204 int err = 0;
205
206 if (flags & kLPUART_RxOverrunFlag) {
207 err |= UART_ERROR_OVERRUN;
208 }
209
210 if (flags & kLPUART_ParityErrorFlag) {
211 err |= UART_ERROR_PARITY;
212 }
213
214 if (flags & kLPUART_FramingErrorFlag) {
215 err |= UART_ERROR_FRAMING;
216 }
217
218 if (flags & kLPUART_NoiseErrorFlag) {
219 err |= UART_ERROR_PARITY;
220 }
221
222 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag |
223 kLPUART_ParityErrorFlag |
224 kLPUART_FramingErrorFlag |
225 kLPUART_NoiseErrorFlag);
226
227 return err;
228 }
229
230 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)231 static int mcux_lpuart_fifo_fill(const struct device *dev,
232 const uint8_t *tx_data,
233 int len)
234 {
235 const struct mcux_lpuart_config *config = dev->config;
236 int num_tx = 0U;
237
238 while ((len - num_tx > 0) &&
239 (LPUART_GetStatusFlags(config->base)
240 & LPUART_STAT_TDRE_MASK)) {
241
242 LPUART_WriteByte(config->base, tx_data[num_tx++]);
243 }
244 return num_tx;
245 }
246
mcux_lpuart_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)247 static int mcux_lpuart_fifo_read(const struct device *dev, uint8_t *rx_data,
248 const int len)
249 {
250 const struct mcux_lpuart_config *config = dev->config;
251 int num_rx = 0U;
252
253 while ((len - num_rx > 0) &&
254 (LPUART_GetStatusFlags(config->base)
255 & kLPUART_RxDataRegFullFlag)) {
256
257 rx_data[num_rx++] = LPUART_ReadByte(config->base);
258 }
259
260 return num_rx;
261 }
262
mcux_lpuart_irq_tx_enable(const struct device * dev)263 static void mcux_lpuart_irq_tx_enable(const struct device *dev)
264 {
265 const struct mcux_lpuart_config *config = dev->config;
266 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
267 #ifdef CONFIG_PM
268 struct mcux_lpuart_data *data = dev->data;
269 unsigned int key;
270 #endif
271
272 #ifdef CONFIG_PM
273 key = irq_lock();
274 data->tx_poll_stream_on = false;
275 data->tx_int_stream_on = true;
276 /* Transmission complete interrupt no longer required */
277 LPUART_DisableInterrupts(config->base,
278 kLPUART_TransmissionCompleteInterruptEnable);
279 /* Do not allow system to sleep while UART tx is ongoing */
280 mcux_lpuart_pm_policy_state_lock_get(dev);
281 #endif
282 LPUART_EnableInterrupts(config->base, mask);
283 #ifdef CONFIG_PM
284 irq_unlock(key);
285 #endif
286 }
287
mcux_lpuart_irq_tx_disable(const struct device * dev)288 static void mcux_lpuart_irq_tx_disable(const struct device *dev)
289 {
290 const struct mcux_lpuart_config *config = dev->config;
291 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
292 #ifdef CONFIG_PM
293 struct mcux_lpuart_data *data = dev->data;
294 unsigned int key;
295
296 key = irq_lock();
297 #endif
298
299 LPUART_DisableInterrupts(config->base, mask);
300 #ifdef CONFIG_PM
301 data->tx_int_stream_on = false;
302 /*
303 * If transmission IRQ is no longer enabled,
304 * transmission is complete. Release pm constraint.
305 */
306 mcux_lpuart_pm_policy_state_lock_put(dev);
307 irq_unlock(key);
308 #endif
309 }
310
mcux_lpuart_irq_tx_complete(const struct device * dev)311 static int mcux_lpuart_irq_tx_complete(const struct device *dev)
312 {
313 const struct mcux_lpuart_config *config = dev->config;
314 uint32_t flags = LPUART_GetStatusFlags(config->base);
315
316 return (flags & kLPUART_TransmissionCompleteFlag) != 0U;
317 }
318
mcux_lpuart_irq_tx_ready(const struct device * dev)319 static int mcux_lpuart_irq_tx_ready(const struct device *dev)
320 {
321 const struct mcux_lpuart_config *config = dev->config;
322 uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
323 uint32_t flags = LPUART_GetStatusFlags(config->base);
324
325 return (LPUART_GetEnabledInterrupts(config->base) & mask)
326 && (flags & LPUART_STAT_TDRE_MASK);
327 }
328
mcux_lpuart_irq_rx_enable(const struct device * dev)329 static void mcux_lpuart_irq_rx_enable(const struct device *dev)
330 {
331 const struct mcux_lpuart_config *config = dev->config;
332 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
333
334 LPUART_EnableInterrupts(config->base, mask);
335 }
336
mcux_lpuart_irq_rx_disable(const struct device * dev)337 static void mcux_lpuart_irq_rx_disable(const struct device *dev)
338 {
339 const struct mcux_lpuart_config *config = dev->config;
340 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
341
342 LPUART_DisableInterrupts(config->base, mask);
343 }
344
mcux_lpuart_irq_rx_full(const struct device * dev)345 static int mcux_lpuart_irq_rx_full(const struct device *dev)
346 {
347 const struct mcux_lpuart_config *config = dev->config;
348 uint32_t flags = LPUART_GetStatusFlags(config->base);
349
350 return (flags & kLPUART_RxDataRegFullFlag) != 0U;
351 }
352
mcux_lpuart_irq_rx_pending(const struct device * dev)353 static int mcux_lpuart_irq_rx_pending(const struct device *dev)
354 {
355 const struct mcux_lpuart_config *config = dev->config;
356 uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
357
358 return (LPUART_GetEnabledInterrupts(config->base) & mask)
359 && mcux_lpuart_irq_rx_full(dev);
360 }
361
mcux_lpuart_irq_err_enable(const struct device * dev)362 static void mcux_lpuart_irq_err_enable(const struct device *dev)
363 {
364 const struct mcux_lpuart_config *config = dev->config;
365 uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
366 kLPUART_FramingErrorInterruptEnable |
367 kLPUART_ParityErrorInterruptEnable;
368
369 LPUART_EnableInterrupts(config->base, mask);
370 }
371
mcux_lpuart_irq_err_disable(const struct device * dev)372 static void mcux_lpuart_irq_err_disable(const struct device *dev)
373 {
374 const struct mcux_lpuart_config *config = dev->config;
375 uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
376 kLPUART_FramingErrorInterruptEnable |
377 kLPUART_ParityErrorInterruptEnable;
378
379 LPUART_DisableInterrupts(config->base, mask);
380 }
381
mcux_lpuart_irq_is_pending(const struct device * dev)382 static int mcux_lpuart_irq_is_pending(const struct device *dev)
383 {
384 return (mcux_lpuart_irq_tx_ready(dev)
385 || mcux_lpuart_irq_rx_pending(dev));
386 }
387
mcux_lpuart_irq_update(const struct device * dev)388 static int mcux_lpuart_irq_update(const struct device *dev)
389 {
390 return 1;
391 }
392
mcux_lpuart_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)393 static void mcux_lpuart_irq_callback_set(const struct device *dev,
394 uart_irq_callback_user_data_t cb,
395 void *cb_data)
396 {
397 struct mcux_lpuart_data *data = dev->data;
398
399 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
400 if (data->api_type == LPUART_ASYNC) {
401 LOG_ERR("UART irq and async api are exclusive");
402 }
403 #endif
404
405 data->callback = cb;
406 data->cb_data = cb_data;
407
408 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
409 data->async.user_callback = NULL;
410 data->async.user_data = NULL;
411 data->api_type = LPUART_IRQ_DRIVEN;
412 #endif
413 }
414
415 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
416
417
418 #ifdef CONFIG_UART_ASYNC_API
async_timer_start(struct k_work_delayable * work,size_t timeout_us)419 static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us)
420 {
421 if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) {
422 LOG_DBG("async timer started for %d us", timeout_us);
423 k_work_reschedule(work, K_USEC(timeout_us));
424 }
425 }
426
async_user_callback(const struct device * dev,struct uart_event * evt)427 static void async_user_callback(const struct device *dev, struct uart_event *evt)
428 {
429 const struct mcux_lpuart_data *data = dev->data;
430
431 if (data->async.user_callback) {
432 data->async.user_callback(dev, evt, data->async.user_data);
433 }
434 }
435
async_evt_tx_done(struct device * dev)436 static void async_evt_tx_done(struct device *dev)
437 {
438 struct mcux_lpuart_data *data = dev->data;
439
440 (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
441
442 LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len);
443 struct uart_event event = {
444 .type = UART_TX_DONE,
445 .data.tx.buf = data->async.tx_dma_params.buf,
446 .data.tx.len = data->async.tx_dma_params.buf_len
447 };
448
449 /* Reset TX Buffer */
450 data->async.tx_dma_params.buf = NULL;
451 data->async.tx_dma_params.buf_len = 0U;
452
453 async_user_callback(dev, &event);
454 }
455
async_evt_rx_rdy(const struct device * dev)456 static void async_evt_rx_rdy(const struct device *dev)
457 {
458 struct mcux_lpuart_data *data = dev->data;
459 struct mcux_lpuart_rx_dma_params *dma_params = &data->async.rx_dma_params;
460
461 struct uart_event event = {
462 .type = UART_RX_RDY,
463 .data.rx.buf = dma_params->buf,
464 .data.rx.len = dma_params->counter - dma_params->offset,
465 .data.rx.offset = dma_params->offset
466 };
467
468 LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset,
469 (uint32_t)event.data.rx.buf);
470
471 /* Update the current pos for new data */
472 dma_params->offset = dma_params->counter;
473
474 /* Only send event for new data */
475 if (event.data.rx.len > 0) {
476 async_user_callback(dev, &event);
477 }
478 }
479
async_evt_rx_buf_request(const struct device * dev)480 static void async_evt_rx_buf_request(const struct device *dev)
481 {
482 struct uart_event evt = {
483 .type = UART_RX_BUF_REQUEST,
484 };
485
486 async_user_callback(dev, &evt);
487 }
488
async_evt_rx_buf_release(const struct device * dev)489 static void async_evt_rx_buf_release(const struct device *dev)
490 {
491 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
492 struct uart_event evt = {
493 .type = UART_RX_BUF_RELEASED,
494 .data.rx_buf.buf = data->async.rx_dma_params.buf,
495 };
496
497 async_user_callback(dev, &evt);
498 data->async.rx_dma_params.buf = NULL;
499 data->async.rx_dma_params.buf_len = 0U;
500 data->async.rx_dma_params.offset = 0U;
501 data->async.rx_dma_params.counter = 0U;
502 }
503
mcux_lpuart_async_rx_flush(const struct device * dev)504 static void mcux_lpuart_async_rx_flush(const struct device *dev)
505 {
506 struct dma_status status;
507 struct mcux_lpuart_data *data = dev->data;
508 const struct mcux_lpuart_config *config = dev->config;
509
510 const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
511 config->rx_dma_config.dma_channel,
512 &status);
513
514 if (get_status_result == 0) {
515 const size_t rx_rcv_len = data->async.rx_dma_params.buf_len -
516 status.pending_length;
517
518 if (rx_rcv_len > data->async.rx_dma_params.counter && status.pending_length) {
519 data->async.rx_dma_params.counter = rx_rcv_len;
520 async_evt_rx_rdy(dev);
521 }
522 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
523 } else {
524 LOG_ERR("Error getting DMA status");
525 }
526 }
527
mcux_lpuart_rx_disable(const struct device * dev)528 static int mcux_lpuart_rx_disable(const struct device *dev)
529 {
530 LOG_INF("Disabling UART RX DMA");
531 const struct mcux_lpuart_config *config = dev->config;
532 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
533 LPUART_Type *lpuart = config->base;
534 const unsigned int key = irq_lock();
535
536 LPUART_EnableRx(lpuart, false);
537 (void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work);
538 LPUART_DisableInterrupts(lpuart, kLPUART_IdleLineInterruptEnable);
539 LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag);
540 LPUART_EnableRxDMA(lpuart, false);
541
542 /* No active RX buffer, cannot disable */
543 if (!data->async.rx_dma_params.buf) {
544 LOG_ERR("No buffers to release from RX DMA!");
545 } else {
546 mcux_lpuart_async_rx_flush(dev);
547 async_evt_rx_buf_release(dev);
548 if (data->async.next_rx_buffer != NULL) {
549 data->async.rx_dma_params.buf = data->async.next_rx_buffer;
550 data->async.rx_dma_params.buf_len = data->async.next_rx_buffer_len;
551 data->async.next_rx_buffer = NULL;
552 data->async.next_rx_buffer_len = 0;
553 /* Release the next buffer as well */
554 async_evt_rx_buf_release(dev);
555 }
556 }
557 const int ret = dma_stop(config->rx_dma_config.dma_dev,
558 config->rx_dma_config.dma_channel);
559
560 if (ret != 0) {
561 LOG_ERR("Error stopping rx DMA. Reason: %x", ret);
562 }
563 LOG_DBG("RX: Disabled");
564 struct uart_event disabled_event = {
565 .type = UART_RX_DISABLED
566 };
567
568 async_user_callback(dev, &disabled_event);
569 irq_unlock(key);
570 return ret;
571 }
572
prepare_rx_dma_block_config(const struct device * dev)573 static void prepare_rx_dma_block_config(const struct device *dev)
574 {
575 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
576 const struct mcux_lpuart_config *config = dev->config;
577 LPUART_Type *lpuart = config->base;
578 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
579
580 assert(rx_dma_params->buf != NULL);
581 assert(rx_dma_params->buf_len > 0);
582
583 struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block;
584
585 head_block_config->dest_address = (uint32_t)rx_dma_params->buf;
586 head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart);
587 head_block_config->block_size = rx_dma_params->buf_len;
588 head_block_config->dest_scatter_en = true;
589 }
590
configure_and_start_rx_dma(const struct mcux_lpuart_config * config,struct mcux_lpuart_data * data,LPUART_Type * lpuart)591 static int configure_and_start_rx_dma(
592 const struct mcux_lpuart_config *config, struct mcux_lpuart_data *data,
593 LPUART_Type *lpuart)
594 {
595 LOG_DBG("Configuring and Starting UART RX DMA");
596 int ret = dma_config(config->rx_dma_config.dma_dev,
597 config->rx_dma_config.dma_channel,
598 (struct dma_config *)&config->rx_dma_config.dma_cfg);
599
600 if (ret != 0) {
601 LOG_ERR("Failed to Configure RX DMA: err: %d", ret);
602 return ret;
603 }
604 ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel);
605 if (ret < 0) {
606 LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)",
607 config->rx_dma_config.dma_channel,
608 ret);
609 }
610 LPUART_EnableRxDMA(lpuart, true);
611 return ret;
612 }
613
uart_mcux_lpuart_dma_replace_rx_buffer(const struct device * dev)614 static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev)
615 {
616 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
617 const struct mcux_lpuart_config *config = dev->config;
618 LPUART_Type *lpuart = config->base;
619
620 LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len);
621 /* There must be a buffer to replace this one with */
622 assert(data->async.next_rx_buffer != NULL);
623 assert(data->async.next_rx_buffer_len != 0U);
624
625 const int success =
626 dma_reload(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel,
627 LPUART_GetDataRegisterAddress(lpuart),
628 (uint32_t)data->async.next_rx_buffer, data->async.next_rx_buffer_len);
629
630 if (success != 0) {
631 LOG_ERR("Error %d reloading DMA with next RX buffer", success);
632 }
633
634 return success;
635 }
636
dma_callback(const struct device * dma_dev,void * callback_arg,uint32_t channel,int dma_status)637 static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel,
638 int dma_status)
639 {
640 struct device *dev = (struct device *)callback_arg;
641 const struct mcux_lpuart_config *config = dev->config;
642 LPUART_Type *lpuart = config->base;
643 struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
644
645 LOG_DBG("DMA call back on channel %d", channel);
646 struct dma_status status;
647 const int get_status_result = dma_get_status(dma_dev, channel, &status);
648
649 if (get_status_result < 0) {
650 LOG_ERR("error on status get: %d", get_status_result);
651 } else {
652 LOG_DBG("DMA Status: b: %d dir: %d len_remain: %d", status.busy, status.dir,
653 status.pending_length);
654 }
655
656 if (dma_status < 0) {
657 LOG_ERR("Got error : %d", dma_status);
658 }
659
660
661 if (channel == config->tx_dma_config.dma_channel) {
662 LOG_DBG("TX Channel");
663 LPUART_EnableTxDMA(lpuart, false);
664 async_evt_tx_done(dev);
665 } else if (channel == config->rx_dma_config.dma_channel) {
666 LOG_DBG("RX Channel");
667 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
668
669 /* The RX Event indicates DMA transfer is complete and full buffer is available. */
670 rx_dma_params->counter = rx_dma_params->buf_len;
671
672 LOG_DBG("Current Buf (%x) full, swapping to new buf: %x",
673 (uint32_t)rx_dma_params->buf,
674 (uint32_t)data->async.next_rx_buffer);
675 async_evt_rx_rdy(dev);
676 async_evt_rx_buf_release(dev);
677
678 /* Remember the buf so it can be released after it is done. */
679 rx_dma_params->buf = data->async.next_rx_buffer;
680 rx_dma_params->buf_len = data->async.next_rx_buffer_len;
681 data->async.next_rx_buffer = NULL;
682 data->async.next_rx_buffer_len = 0U;
683
684 /* A new buffer was available (and already loaded into the DMA engine) */
685 if (rx_dma_params->buf != NULL && rx_dma_params->buf_len > 0) {
686 /* Request the next buffer */
687 async_evt_rx_buf_request(dev);
688 } else {
689 /* Buffer full without valid next buffer, disable RX DMA */
690 LOG_INF("Disabled RX DMA, no valid next buffer ");
691 mcux_lpuart_rx_disable(dev);
692 }
693 } else {
694 LOG_ERR("Got unexpected DMA Channel: %d", channel);
695 }
696 }
697
698 static int mcux_lpuart_configure_async(const struct device *dev);
699
mcux_lpuart_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)700 static int mcux_lpuart_callback_set(const struct device *dev, uart_callback_t callback,
701 void *user_data)
702 {
703 struct mcux_lpuart_data *data = dev->data;
704
705 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
706 if (data->api_type == LPUART_IRQ_DRIVEN) {
707 LOG_ERR("UART irq and async api are exclusive");
708 return -ENOTSUP;
709 }
710 #endif
711
712 data->async.user_callback = callback;
713 data->async.user_data = user_data;
714
715 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
716 data->callback = NULL;
717 data->cb_data = NULL;
718 data->api_type = LPUART_ASYNC;
719 #endif
720
721 return mcux_lpuart_configure_async(dev);
722 }
723
mcux_lpuart_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout_us)724 static int mcux_lpuart_tx(const struct device *dev, const uint8_t *buf, size_t len,
725 int32_t timeout_us)
726 {
727 struct mcux_lpuart_data *data = dev->data;
728 const struct mcux_lpuart_config *config = dev->config;
729 LPUART_Type *lpuart = config->base;
730
731 unsigned int key = irq_lock();
732
733 /* Check for an ongiong transfer and abort if it is pending */
734 struct dma_status status;
735 const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
736 config->tx_dma_config.dma_channel,
737 &status);
738
739 if (get_status_result < 0 || status.busy) {
740 irq_unlock(key);
741 LOG_ERR("Unable to submit UART DMA Transfer.");
742 return get_status_result < 0 ? get_status_result : -EBUSY;
743 }
744
745 int ret;
746
747 LPUART_EnableTxDMA(lpuart, false);
748
749 data->async.tx_dma_params.buf = buf;
750 data->async.tx_dma_params.buf_len = len;
751 data->async.tx_dma_params.active_dma_block.source_address = (uint32_t)buf;
752 data->async.tx_dma_params.active_dma_block.dest_address =
753 LPUART_GetDataRegisterAddress(lpuart);
754 data->async.tx_dma_params.active_dma_block.block_size = len;
755 data->async.tx_dma_params.active_dma_block.next_block = NULL;
756
757 ret = dma_config(config->tx_dma_config.dma_dev,
758 config->tx_dma_config.dma_channel,
759 (struct dma_config *)&config->tx_dma_config.dma_cfg);
760
761 if (ret == 0) {
762 LOG_DBG("Starting UART DMA TX Ch %u", config->tx_dma_config.dma_channel);
763
764 ret = dma_start(config->tx_dma_config.dma_dev,
765 config->tx_dma_config.dma_channel);
766 LPUART_EnableTxDMA(lpuart, true);
767 if (ret != 0) {
768 LOG_ERR("Failed to start DMA(Tx) Ch %d",
769 config->tx_dma_config.dma_channel);
770 }
771 async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us);
772 } else {
773 LOG_ERR("Error configuring UART DMA: %x", ret);
774 }
775 irq_unlock(key);
776 return ret;
777 }
778
mcux_lpuart_tx_abort(const struct device * dev)779 static int mcux_lpuart_tx_abort(const struct device *dev)
780 {
781 struct mcux_lpuart_data *data = dev->data;
782 const struct mcux_lpuart_config *config = dev->config;
783 LPUART_Type *lpuart = config->base;
784
785 LPUART_EnableTxDMA(lpuart, false);
786 (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
787 struct dma_status status;
788 const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
789 config->tx_dma_config.dma_channel,
790 &status);
791
792 if (get_status_result < 0) {
793 LOG_ERR("Error querying TX DMA Status during abort.");
794 }
795
796 const size_t bytes_transmitted = (get_status_result == 0) ?
797 data->async.tx_dma_params.buf_len - status.pending_length : 0;
798
799 const int ret = dma_stop(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel);
800
801 if (ret == 0) {
802 struct uart_event tx_aborted_event = {
803 .type = UART_TX_ABORTED,
804 .data.tx.buf = data->async.tx_dma_params.buf,
805 .data.tx.len = bytes_transmitted
806 };
807 async_user_callback(dev, &tx_aborted_event);
808 }
809 return ret;
810 }
811
mcux_lpuart_rx_enable(const struct device * dev,uint8_t * buf,const size_t len,const int32_t timeout_us)812 static int mcux_lpuart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len,
813 const int32_t timeout_us)
814 {
815 LOG_DBG("Enabling UART RX DMA");
816 struct mcux_lpuart_data *data = dev->data;
817 const struct mcux_lpuart_config *config = dev->config;
818 LPUART_Type *lpuart = config->base;
819
820 struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
821
822 unsigned int key = irq_lock();
823 struct dma_status status;
824 const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
825 config->rx_dma_config.dma_channel,
826 &status);
827
828 if (get_status_result < 0 || status.busy) {
829 LOG_ERR("Unable to start receive on UART.");
830 irq_unlock(key);
831 return get_status_result < 0 ? get_status_result : -EBUSY;
832 }
833
834 rx_dma_params->timeout_us = timeout_us;
835 rx_dma_params->buf = buf;
836 rx_dma_params->buf_len = len;
837 data->async.next_rx_buffer = NULL;
838 data->async.next_rx_buffer_len = 0U;
839
840 LPUART_EnableInterrupts(config->base, kLPUART_IdleLineInterruptEnable);
841 prepare_rx_dma_block_config(dev);
842 const int ret = configure_and_start_rx_dma(config, data, lpuart);
843
844 /* Request the next buffer for when this buffer is full for continuous reception */
845 async_evt_rx_buf_request(dev);
846
847 /* Clear these status flags as they can prevent the UART device from receiving data */
848 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag | kLPUART_ParityErrorFlag |
849 kLPUART_FramingErrorFlag |
850 kLPUART_NoiseErrorFlag);
851 LPUART_EnableRx(lpuart, true);
852 irq_unlock(key);
853 return ret;
854 }
855
mcux_lpuart_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)856 static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
857 {
858 struct mcux_lpuart_data *data = dev->data;
859 unsigned int key;
860
861 key = irq_lock();
862 assert(data->async.next_rx_buffer == NULL);
863 assert(data->async.next_rx_buffer_len == 0);
864 data->async.next_rx_buffer = buf;
865 data->async.next_rx_buffer_len = len;
866 uart_mcux_lpuart_dma_replace_rx_buffer(dev);
867 irq_unlock(key);
868 return 0;
869 }
870
mcux_lpuart_async_rx_timeout(struct k_work * work)871 static void mcux_lpuart_async_rx_timeout(struct k_work *work)
872 {
873 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
874 struct mcux_lpuart_rx_dma_params *rx_params = CONTAINER_OF(dwork,
875 struct mcux_lpuart_rx_dma_params,
876 timeout_work);
877 struct mcux_lpuart_async_data *async_data = CONTAINER_OF(rx_params,
878 struct mcux_lpuart_async_data,
879 rx_dma_params);
880 const struct device *dev = async_data->uart_dev;
881
882 LOG_DBG("RX timeout");
883 mcux_lpuart_async_rx_flush(dev);
884 }
885
mcux_lpuart_async_tx_timeout(struct k_work * work)886 static void mcux_lpuart_async_tx_timeout(struct k_work *work)
887 {
888 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
889 struct mcux_lpuart_tx_dma_params *tx_params = CONTAINER_OF(dwork,
890 struct mcux_lpuart_tx_dma_params,
891 timeout_work);
892 struct mcux_lpuart_async_data *async_data = CONTAINER_OF(tx_params,
893 struct mcux_lpuart_async_data,
894 tx_dma_params);
895 const struct device *dev = async_data->uart_dev;
896
897 LOG_DBG("TX timeout");
898 (void)mcux_lpuart_tx_abort(dev);
899 }
900
901 #endif /* CONFIG_UART_ASYNC_API */
902
903 #if CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
904
905 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_irq_driven_isr(const struct device * dev,struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)906 static inline void mcux_lpuart_irq_driven_isr(const struct device *dev,
907 struct mcux_lpuart_data *data,
908 const struct mcux_lpuart_config *config,
909 const uint32_t status) {
910 if (data->callback) {
911 data->callback(dev, data->cb_data);
912 }
913
914 if (status & kLPUART_RxOverrunFlag) {
915 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
916 }
917 }
918 #endif
919
920 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_async_isr(struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)921 static inline void mcux_lpuart_async_isr(struct mcux_lpuart_data *data,
922 const struct mcux_lpuart_config *config,
923 const uint32_t status) {
924 if (status & kLPUART_IdleLineFlag) {
925 async_timer_start(&data->async.rx_dma_params.timeout_work,
926 data->async.rx_dma_params.timeout_us);
927 LPUART_ClearStatusFlags(config->base, kLPUART_IdleLineFlag);
928 }
929
930 if (status & kLPUART_RxOverrunFlag) {
931 LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
932 }
933 }
934 #endif
935
mcux_lpuart_isr(const struct device * dev)936 static void mcux_lpuart_isr(const struct device *dev)
937 {
938 struct mcux_lpuart_data *data = dev->data;
939 const struct mcux_lpuart_config *config = dev->config;
940 const uint32_t status = LPUART_GetStatusFlags(config->base);
941
942 #if CONFIG_PM
943 if (status & kLPUART_TransmissionCompleteFlag) {
944
945 if (data->tx_poll_stream_on) {
946 /* Poll transmission complete. Allow system to sleep */
947 LPUART_DisableInterrupts(config->base,
948 kLPUART_TransmissionCompleteInterruptEnable);
949 data->tx_poll_stream_on = false;
950 mcux_lpuart_pm_policy_state_lock_put(dev);
951 }
952 }
953 #endif /* CONFIG_PM */
954
955 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
956 if (data->api_type == LPUART_IRQ_DRIVEN) {
957 mcux_lpuart_irq_driven_isr(dev, data, config, status);
958 } else if (data->api_type == LPUART_ASYNC) {
959 mcux_lpuart_async_isr(data, config, status);
960 }
961 #elif defined(CONFIG_UART_INTERRUPT_DRIVEN)
962 mcux_lpuart_irq_driven_isr(dev, data, config, status);
963 #elif defined(CONFIG_UART_ASYNC_API)
964 mcux_lpuart_async_isr(data, config, status);
965 #endif /* API */
966 }
967 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
968
mcux_lpuart_configure_basic(const struct device * dev,const struct uart_config * cfg,lpuart_config_t * uart_config)969 static int mcux_lpuart_configure_basic(const struct device *dev, const struct uart_config *cfg,
970 lpuart_config_t *uart_config)
971 {
972 /* Translate UART API enum to LPUART enum from HAL */
973 switch (cfg->parity) {
974 case UART_CFG_PARITY_NONE:
975 uart_config->parityMode = kLPUART_ParityDisabled;
976 break;
977 case UART_CFG_PARITY_ODD:
978 uart_config->parityMode = kLPUART_ParityOdd;
979 break;
980 case UART_CFG_PARITY_EVEN:
981 uart_config->parityMode = kLPUART_ParityEven;
982 break;
983 default:
984 return -ENOTSUP;
985 }
986
987 switch (cfg->data_bits) {
988 #if defined(FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT) && \
989 FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT
990 case UART_CFG_DATA_BITS_7:
991 uart_config->dataBitsCount = kLPUART_SevenDataBits;
992 break;
993 #endif
994 case UART_CFG_DATA_BITS_8:
995 uart_config->dataBitsCount = kLPUART_EightDataBits;
996 break;
997 default:
998 return -ENOTSUP;
999 }
1000
1001 #if defined(FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT) && \
1002 FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT
1003 switch (cfg->stop_bits) {
1004 case UART_CFG_STOP_BITS_1:
1005 uart_config->stopBitCount = kLPUART_OneStopBit;
1006 break;
1007 case UART_CFG_STOP_BITS_2:
1008 uart_config->stopBitCount = kLPUART_TwoStopBit;
1009 break;
1010 default:
1011 return -ENOTSUP;
1012 }
1013 #endif
1014
1015 #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
1016 FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT
1017 switch (cfg->flow_ctrl) {
1018 case UART_CFG_FLOW_CTRL_NONE:
1019 case UART_CFG_FLOW_CTRL_RS485:
1020 uart_config->enableTxCTS = false;
1021 uart_config->enableRxRTS = false;
1022 break;
1023 case UART_CFG_FLOW_CTRL_RTS_CTS:
1024 uart_config->enableTxCTS = true;
1025 uart_config->enableRxRTS = true;
1026 break;
1027 default:
1028 return -ENOTSUP;
1029 }
1030 #endif
1031
1032 uart_config->baudRate_Bps = cfg->baudrate;
1033 uart_config->enableRx = true;
1034 /* Tx will be enabled manually after set tx-rts */
1035 uart_config->enableTx = false;
1036
1037 return 0;
1038 }
1039
1040 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_configure_async(const struct device * dev)1041 static int mcux_lpuart_configure_async(const struct device *dev)
1042 {
1043 const struct mcux_lpuart_config *config = dev->config;
1044 struct mcux_lpuart_data *data = dev->data;
1045 lpuart_config_t uart_config;
1046 int ret;
1047
1048 LPUART_GetDefaultConfig(&uart_config);
1049
1050 ret = mcux_lpuart_configure_basic(dev, &data->uart_config, &uart_config);
1051 if (ret) {
1052 return ret;
1053 }
1054
1055 uart_config.rxIdleType = kLPUART_IdleTypeStopBit;
1056 uart_config.rxIdleConfig = kLPUART_IdleCharacter1;
1057 data->async.next_rx_buffer = NULL;
1058 data->async.next_rx_buffer_len = 0;
1059 data->async.uart_dev = dev;
1060 k_work_init_delayable(&data->async.rx_dma_params.timeout_work,
1061 mcux_lpuart_async_rx_timeout);
1062 k_work_init_delayable(&data->async.tx_dma_params.timeout_work,
1063 mcux_lpuart_async_tx_timeout);
1064
1065 /* Disable the UART Receiver until the async API provides a buffer to
1066 * receive into with rx_enable
1067 */
1068 uart_config.enableRx = false;
1069 /* Clearing the fifo of any junk received before the async rx enable was called */
1070 while (LPUART_GetRxFifoCount(config->base) > 0) {
1071 LPUART_ReadByte(config->base);
1072 }
1073
1074 return 0;
1075 }
1076 #endif
1077
mcux_lpuart_configure_init(const struct device * dev,const struct uart_config * cfg)1078 static int mcux_lpuart_configure_init(const struct device *dev, const struct uart_config *cfg)
1079 {
1080 const struct mcux_lpuart_config *config = dev->config;
1081 struct mcux_lpuart_data *data = dev->data;
1082 lpuart_config_t uart_config;
1083 uint32_t clock_freq;
1084 int ret;
1085
1086 if (!device_is_ready(config->clock_dev)) {
1087 return -ENODEV;
1088 }
1089
1090 if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
1091 &clock_freq)) {
1092 return -EINVAL;
1093 }
1094
1095 LPUART_GetDefaultConfig(&uart_config);
1096
1097 ret = mcux_lpuart_configure_basic(dev, cfg, &uart_config);
1098 if (ret) {
1099 return ret;
1100 }
1101
1102 LPUART_Init(config->base, &uart_config, clock_freq);
1103
1104 #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
1105 FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT
1106 if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485) {
1107 /* Set the LPUART into RS485 mode (tx driver enable using RTS) */
1108 config->base->MODIR |= LPUART_MODIR_TXRTSE(true);
1109 if (!config->rs485_de_active_low) {
1110 config->base->MODIR |= LPUART_MODIR_TXRTSPOL(1);
1111 }
1112 }
1113 #endif
1114
1115 /* Now can enable tx */
1116 config->base->CTRL |= LPUART_CTRL_TE(true);
1117
1118
1119 if (config->loopback_en) {
1120 /* Set the LPUART into loopback mode */
1121 config->base->CTRL |= LPUART_CTRL_LOOPS_MASK;
1122 config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1123 } else if (config->single_wire) {
1124 /* Enable the single wire / half-duplex mode, only possible when
1125 * loopback is disabled. We need a critical section to prevent
1126 * the UART firing an interrupt during mode switch
1127 */
1128 unsigned int key = irq_lock();
1129
1130 config->base->CTRL |= (LPUART_CTRL_LOOPS_MASK | LPUART_CTRL_RSRC_MASK);
1131 irq_unlock(key);
1132 } else {
1133 #ifdef LPUART_CTRL_TXINV
1134 /* Only invert TX in full-duplex mode */
1135 if (config->tx_invert) {
1136 config->base->CTRL |= LPUART_CTRL_TXINV(1);
1137 }
1138 #endif
1139 }
1140
1141 #ifdef LPUART_STAT_RXINV
1142 if (config->rx_invert) {
1143 config->base->STAT |= LPUART_STAT_RXINV(1);
1144 }
1145 #endif
1146
1147 /* update internal uart_config */
1148 data->uart_config = *cfg;
1149
1150 return 0;
1151 }
1152
1153 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
mcux_lpuart_config_get(const struct device * dev,struct uart_config * cfg)1154 static int mcux_lpuart_config_get(const struct device *dev, struct uart_config *cfg)
1155 {
1156 struct mcux_lpuart_data *data = dev->data;
1157 *cfg = data->uart_config;
1158 return 0;
1159 }
1160
mcux_lpuart_configure(const struct device * dev,const struct uart_config * cfg)1161 static int mcux_lpuart_configure(const struct device *dev,
1162 const struct uart_config *cfg)
1163 {
1164 const struct mcux_lpuart_config *config = dev->config;
1165
1166 /* Make sure that RSRC is de-asserted otherwise deinit will hang. */
1167 config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1168
1169 /* disable LPUART */
1170 LPUART_Deinit(config->base);
1171
1172 int ret = mcux_lpuart_configure_init(dev, cfg);
1173 if (ret) {
1174 return ret;
1175 }
1176
1177 /* wait for hardware init */
1178 k_sleep(K_MSEC(1));
1179
1180 return 0;
1181 }
1182 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1183
mcux_lpuart_init(const struct device * dev)1184 static int mcux_lpuart_init(const struct device *dev)
1185 {
1186 const struct mcux_lpuart_config *config = dev->config;
1187 struct mcux_lpuart_data *data = dev->data;
1188 struct uart_config *uart_api_config = &data->uart_config;
1189 int err;
1190
1191 uart_api_config->baudrate = config->baud_rate;
1192 uart_api_config->parity = config->parity;
1193 uart_api_config->stop_bits = UART_CFG_STOP_BITS_1;
1194 uart_api_config->data_bits = UART_CFG_DATA_BITS_8;
1195 uart_api_config->flow_ctrl = config->flow_ctrl;
1196
1197 /* set initial configuration */
1198 mcux_lpuart_configure_init(dev, uart_api_config);
1199 if (config->flow_ctrl) {
1200 const struct pinctrl_state *state;
1201
1202 err = pinctrl_lookup_state(config->pincfg, PINCTRL_STATE_FLOWCONTROL, &state);
1203 if (err < 0) {
1204 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1205 }
1206 } else {
1207 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1208 }
1209 if (err < 0) {
1210 return err;
1211 }
1212
1213 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1214 #if CONFIG_NXP_LP_FLEXCOMM
1215 /* When using LP Flexcomm driver, register the interrupt handler
1216 * so we receive notification from the LP Flexcomm interrupt handler.
1217 */
1218 nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev,
1219 LP_FLEXCOMM_PERIPH_LPUART, mcux_lpuart_isr);
1220 #else
1221 /* Interrupt is managed by this driver */
1222 config->irq_config_func(dev);
1223 #endif
1224 #ifdef CONFIG_UART_EXCLUSIVE_API_CALLBACKS
1225 data->api_type = LPUART_NONE;
1226 #endif
1227 #endif
1228
1229 #ifdef CONFIG_PM
1230 data->pm_state_lock_on = false;
1231 data->tx_poll_stream_on = false;
1232 data->tx_int_stream_on = false;
1233 #endif
1234
1235 return 0;
1236 }
1237
1238 static DEVICE_API(uart, mcux_lpuart_driver_api) = {
1239 .poll_in = mcux_lpuart_poll_in,
1240 .poll_out = mcux_lpuart_poll_out,
1241 .err_check = mcux_lpuart_err_check,
1242 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1243 .configure = mcux_lpuart_configure,
1244 .config_get = mcux_lpuart_config_get,
1245 #endif
1246 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
1247 .fifo_fill = mcux_lpuart_fifo_fill,
1248 .fifo_read = mcux_lpuart_fifo_read,
1249 .irq_tx_enable = mcux_lpuart_irq_tx_enable,
1250 .irq_tx_disable = mcux_lpuart_irq_tx_disable,
1251 .irq_tx_complete = mcux_lpuart_irq_tx_complete,
1252 .irq_tx_ready = mcux_lpuart_irq_tx_ready,
1253 .irq_rx_enable = mcux_lpuart_irq_rx_enable,
1254 .irq_rx_disable = mcux_lpuart_irq_rx_disable,
1255 .irq_rx_ready = mcux_lpuart_irq_rx_full,
1256 .irq_err_enable = mcux_lpuart_irq_err_enable,
1257 .irq_err_disable = mcux_lpuart_irq_err_disable,
1258 .irq_is_pending = mcux_lpuart_irq_is_pending,
1259 .irq_update = mcux_lpuart_irq_update,
1260 .irq_callback_set = mcux_lpuart_irq_callback_set,
1261 #endif
1262 #ifdef CONFIG_UART_ASYNC_API
1263 .callback_set = mcux_lpuart_callback_set,
1264 .tx = mcux_lpuart_tx,
1265 .tx_abort = mcux_lpuart_tx_abort,
1266 .rx_enable = mcux_lpuart_rx_enable,
1267 .rx_buf_rsp = mcux_lpuart_rx_buf_rsp,
1268 .rx_disable = mcux_lpuart_rx_disable,
1269 #endif /* CONFIG_UART_ASYNC_API */
1270 };
1271
1272
1273 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1274 #define MCUX_LPUART_IRQ_INSTALL(n, i) \
1275 do { \
1276 IRQ_CONNECT(DT_INST_IRQN_BY_IDX(n, i), \
1277 DT_INST_IRQ_BY_IDX(n, i, priority), \
1278 mcux_lpuart_isr, DEVICE_DT_INST_GET(n), 0); \
1279 \
1280 irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \
1281 } while (false)
1282 #define MCUX_LPUART_IRQ_INIT(n) .irq_config_func = mcux_lpuart_config_func_##n,
1283 #define MCUX_LPUART_IRQ_DEFINE(n) \
1284 static void mcux_lpuart_config_func_##n(const struct device *dev) \
1285 { \
1286 IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \
1287 (MCUX_LPUART_IRQ_INSTALL(n, 0);)) \
1288 \
1289 IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1), \
1290 (MCUX_LPUART_IRQ_INSTALL(n, 1);)) \
1291 }
1292 #else
1293 #define MCUX_LPUART_IRQ_INIT(n)
1294 #define MCUX_LPUART_IRQ_DEFINE(n)
1295 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
1296
1297 #ifdef CONFIG_UART_ASYNC_API
1298 #define TX_DMA_CONFIG(id) \
1299 .tx_dma_config = { \
1300 .dma_dev = \
1301 DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
1302 .dma_channel = \
1303 DT_INST_DMAS_CELL_BY_NAME(id, tx, mux), \
1304 .dma_cfg = { \
1305 .source_burst_length = 1, \
1306 .dest_burst_length = 1, \
1307 .source_data_size = 1, \
1308 .dest_data_size = 1, \
1309 .complete_callback_en = 1, \
1310 .error_callback_dis = 0, \
1311 .block_count = 1, \
1312 .head_block = \
1313 &mcux_lpuart_##id##_data.async.tx_dma_params.active_dma_block, \
1314 .channel_direction = MEMORY_TO_PERIPHERAL, \
1315 .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
1316 id, tx, source), \
1317 .dma_callback = dma_callback, \
1318 .user_data = (void *)DEVICE_DT_INST_GET(id) \
1319 }, \
1320 },
1321 #define RX_DMA_CONFIG(id) \
1322 .rx_dma_config = { \
1323 .dma_dev = \
1324 DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
1325 .dma_channel = \
1326 DT_INST_DMAS_CELL_BY_NAME(id, rx, mux), \
1327 .dma_cfg = { \
1328 .source_burst_length = 1, \
1329 .dest_burst_length = 1, \
1330 .source_data_size = 1, \
1331 .dest_data_size = 1, \
1332 .complete_callback_en = 1, \
1333 .error_callback_dis = 0, \
1334 .block_count = 1, \
1335 .head_block = \
1336 &mcux_lpuart_##id##_data.async.rx_dma_params.active_dma_block, \
1337 .channel_direction = PERIPHERAL_TO_MEMORY, \
1338 .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
1339 id, rx, source), \
1340 .dma_callback = dma_callback, \
1341 .user_data = (void *)DEVICE_DT_INST_GET(id), \
1342 .cyclic = 1, \
1343 }, \
1344 },
1345 #else
1346 #define RX_DMA_CONFIG(n)
1347 #define TX_DMA_CONFIG(n)
1348 #endif /* CONFIG_UART_ASYNC_API */
1349
1350 #define FLOW_CONTROL(n) \
1351 DT_INST_PROP(n, hw_flow_control) \
1352 ? UART_CFG_FLOW_CTRL_RTS_CTS \
1353 : DT_INST_PROP(n, nxp_rs485_mode)\
1354 ? UART_CFG_FLOW_CTRL_RS485 \
1355 : UART_CFG_FLOW_CTRL_NONE
1356 #ifdef CONFIG_NXP_LP_FLEXCOMM
1357 #define PARENT_DEV(n) \
1358 .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)),
1359 #else
1360 #define PARENT_DEV(n)
1361 #endif /* CONFIG_NXP_LP_FLEXCOMM */
1362
1363 #define LPUART_MCUX_DECLARE_CFG(n) \
1364 static const struct mcux_lpuart_config mcux_lpuart_##n##_config = { \
1365 .base = (LPUART_Type *) DT_INST_REG_ADDR(n), \
1366 PARENT_DEV(n) \
1367 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
1368 .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
1369 .baud_rate = DT_INST_PROP(n, current_speed), \
1370 .flow_ctrl = FLOW_CONTROL(n), \
1371 .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \
1372 .rs485_de_active_low = DT_INST_PROP(n, nxp_rs485_de_active_low), \
1373 .loopback_en = DT_INST_PROP(n, nxp_loopback), \
1374 .single_wire = DT_INST_PROP(n, single_wire), \
1375 .rx_invert = DT_INST_PROP(n, rx_invert), \
1376 .tx_invert = DT_INST_PROP(n, tx_invert), \
1377 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1378 MCUX_LPUART_IRQ_INIT(n) \
1379 RX_DMA_CONFIG(n) \
1380 TX_DMA_CONFIG(n) \
1381 };
1382
1383 #define LPUART_MCUX_INIT(n) \
1384 \
1385 static struct mcux_lpuart_data mcux_lpuart_##n##_data; \
1386 \
1387 PINCTRL_DT_INST_DEFINE(n); \
1388 MCUX_LPUART_IRQ_DEFINE(n) \
1389 \
1390 LPUART_MCUX_DECLARE_CFG(n) \
1391 \
1392 DEVICE_DT_INST_DEFINE(n, \
1393 mcux_lpuart_init, \
1394 NULL, \
1395 &mcux_lpuart_##n##_data, \
1396 &mcux_lpuart_##n##_config, \
1397 PRE_KERNEL_1, \
1398 CONFIG_SERIAL_INIT_PRIORITY, \
1399 &mcux_lpuart_driver_api); \
1400
1401 DT_INST_FOREACH_STATUS_OKAY(LPUART_MCUX_INIT)
1402