1 /*
2  * Copyright 2017,2021,2023-2025 NXP
3  * Copyright (c) 2020 Softube
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT nxp_lpuart
9 
10 #include <errno.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/pm/policy.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #ifdef CONFIG_UART_ASYNC_API
19 #include <zephyr/drivers/dma.h>
20 #endif
21 #include <zephyr/logging/log.h>
22 #include <zephyr/sys/util_macro.h>
23 
24 #include <fsl_lpuart.h>
25 #if CONFIG_NXP_LP_FLEXCOMM
26 #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
27 #endif
28 
29 LOG_MODULE_REGISTER(uart_mcux_lpuart, LOG_LEVEL_ERR);
30 
31 #define PINCTRL_STATE_FLOWCONTROL PINCTRL_STATE_PRIV_START
32 
33 #if defined(CONFIG_UART_LINE_CTRL) &&  \
34 	defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
35 	(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT)
36 #define UART_LINE_CTRL_ENABLE
37 #endif
38 
39 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
40 /* there are already going to be build errors, but at least this message will
41  * be the first error from this driver making the reason clear
42  */
43 BUILD_ASSERT(IS_ENABLED(CONFIG_UART_EXCLUSIVE_API_CALLBACKS), ""
44 		"LPUART must use exclusive api callbacks");
45 #endif
46 
47 #ifdef CONFIG_UART_ASYNC_API
48 struct lpuart_dma_config {
49 	const struct device *dma_dev;
50 	const uint32_t dma_channel;
51 	struct dma_config dma_cfg;
52 };
53 #endif /* CONFIG_UART_ASYNC_API */
54 
55 struct mcux_lpuart_config {
56 	LPUART_Type *base;
57 	const struct device *clock_dev;
58 	const struct pinctrl_dev_config *pincfg;
59 	clock_control_subsys_t clock_subsys;
60 	uint32_t baud_rate;
61 	uint8_t flow_ctrl;
62 	uint8_t parity;
63 	bool rs485_de_active_low;
64 	bool loopback_en;
65 	bool single_wire;
66 	bool tx_invert;
67 	bool rx_invert;
68 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
69 	void (*irq_config_func)(const struct device *dev);
70 #endif
71 #ifdef CONFIG_UART_ASYNC_API
72 	const struct lpuart_dma_config rx_dma_config;
73 	const struct lpuart_dma_config tx_dma_config;
74 #endif /* CONFIG_UART_ASYNC_API */
75 };
76 
77 #ifdef CONFIG_UART_ASYNC_API
78 struct mcux_lpuart_rx_dma_params {
79 	struct dma_block_config active_dma_block;
80 	uint8_t *buf;
81 	size_t buf_len;
82 	size_t offset;
83 	size_t counter;
84 	struct k_work_delayable timeout_work;
85 	size_t timeout_us;
86 };
87 
88 struct mcux_lpuart_tx_dma_params {
89 	struct dma_block_config active_dma_block;
90 	const uint8_t *buf;
91 	size_t buf_len;
92 	struct k_work_delayable timeout_work;
93 	size_t timeout_us;
94 };
95 
96 struct mcux_lpuart_async_data {
97 	const struct device *uart_dev;
98 	struct mcux_lpuart_tx_dma_params tx_dma_params;
99 	struct mcux_lpuart_rx_dma_params rx_dma_params;
100 	uint8_t *next_rx_buffer;
101 	size_t next_rx_buffer_len;
102 	uart_callback_t user_callback;
103 	void *user_data;
104 };
105 #endif
106 
107 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
108 enum mcux_lpuart_api {
109 	LPUART_NONE,
110 	LPUART_IRQ_DRIVEN,
111 	LPUART_ASYNC
112 };
113 #endif
114 
115 struct mcux_lpuart_data {
116 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
117 	uart_irq_callback_user_data_t callback;
118 	void *cb_data;
119 #endif
120 #ifdef CONFIG_PM
121 	bool pm_state_lock_on;
122 	bool tx_poll_stream_on;
123 	bool tx_int_stream_on;
124 #endif /* CONFIG_PM */
125 #ifdef CONFIG_UART_ASYNC_API
126 	struct mcux_lpuart_async_data async;
127 #endif
128 	struct uart_config uart_config;
129 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
130 	enum mcux_lpuart_api api_type;
131 #endif
132 };
133 
134 #ifdef CONFIG_PM
mcux_lpuart_pm_policy_state_lock_get(const struct device * dev)135 static void mcux_lpuart_pm_policy_state_lock_get(const struct device *dev)
136 {
137 	struct mcux_lpuart_data *data = dev->data;
138 
139 	if (!data->pm_state_lock_on) {
140 		data->pm_state_lock_on = true;
141 		pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
142 	}
143 }
144 
mcux_lpuart_pm_policy_state_lock_put(const struct device * dev)145 static void mcux_lpuart_pm_policy_state_lock_put(const struct device *dev)
146 {
147 	struct mcux_lpuart_data *data = dev->data;
148 
149 	if (data->pm_state_lock_on) {
150 		data->pm_state_lock_on = false;
151 		pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
152 	}
153 }
154 #endif /* CONFIG_PM */
155 
mcux_lpuart_poll_in(const struct device * dev,unsigned char * c)156 static int mcux_lpuart_poll_in(const struct device *dev, unsigned char *c)
157 {
158 	const struct mcux_lpuart_config *config = dev->config;
159 	uint32_t flags = LPUART_GetStatusFlags(config->base);
160 	int ret = -1;
161 
162 	if (flags & kLPUART_RxDataRegFullFlag) {
163 		*c = LPUART_ReadByte(config->base);
164 		ret = 0;
165 	}
166 
167 	return ret;
168 }
169 
mcux_lpuart_poll_out(const struct device * dev,unsigned char c)170 static void mcux_lpuart_poll_out(const struct device *dev, unsigned char c)
171 {
172 	const struct mcux_lpuart_config *config = dev->config;
173 	unsigned int key;
174 #ifdef CONFIG_PM
175 	struct mcux_lpuart_data *data = dev->data;
176 #endif
177 
178 	while (!(LPUART_GetStatusFlags(config->base)
179 		& LPUART_STAT_TDRE_MASK)) {
180 	}
181 	/* Lock interrupts while we send data */
182 	key = irq_lock();
183 #ifdef CONFIG_PM
184 	/*
185 	 * We must keep the part from entering lower power mode until the
186 	 * transmission completes. Set the power constraint, and enable
187 	 * the transmission complete interrupt so we know when transmission is
188 	 * completed.
189 	 */
190 	if (!data->tx_poll_stream_on && !data->tx_int_stream_on) {
191 		data->tx_poll_stream_on = true;
192 		mcux_lpuart_pm_policy_state_lock_get(dev);
193 		/* Enable TC interrupt */
194 		LPUART_EnableInterrupts(config->base,
195 			kLPUART_TransmissionCompleteInterruptEnable);
196 
197 	}
198 #endif /* CONFIG_PM */
199 
200 	LPUART_WriteByte(config->base, c);
201 	irq_unlock(key);
202 }
203 
mcux_lpuart_err_check(const struct device * dev)204 static int mcux_lpuart_err_check(const struct device *dev)
205 {
206 	const struct mcux_lpuart_config *config = dev->config;
207 	uint32_t flags = LPUART_GetStatusFlags(config->base);
208 	int err = 0;
209 
210 	if (flags & kLPUART_RxOverrunFlag) {
211 		err |= UART_ERROR_OVERRUN;
212 	}
213 
214 	if (flags & kLPUART_ParityErrorFlag) {
215 		err |= UART_ERROR_PARITY;
216 	}
217 
218 	if (flags & kLPUART_FramingErrorFlag) {
219 		err |= UART_ERROR_FRAMING;
220 	}
221 
222 	if (flags & kLPUART_NoiseErrorFlag) {
223 		err |= UART_ERROR_PARITY;
224 	}
225 
226 	LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag |
227 					      kLPUART_ParityErrorFlag |
228 					      kLPUART_FramingErrorFlag |
229 						  kLPUART_NoiseErrorFlag);
230 
231 	return err;
232 }
233 
234 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)235 static int mcux_lpuart_fifo_fill(const struct device *dev,
236 				 const uint8_t *tx_data,
237 				 int len)
238 {
239 	const struct mcux_lpuart_config *config = dev->config;
240 	int num_tx = 0U;
241 
242 	while ((len - num_tx > 0) &&
243 	       (LPUART_GetStatusFlags(config->base)
244 		& LPUART_STAT_TDRE_MASK)) {
245 
246 		LPUART_WriteByte(config->base, tx_data[num_tx++]);
247 	}
248 	return num_tx;
249 }
250 
mcux_lpuart_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)251 static int mcux_lpuart_fifo_read(const struct device *dev, uint8_t *rx_data,
252 				 const int len)
253 {
254 	const struct mcux_lpuart_config *config = dev->config;
255 	int num_rx = 0U;
256 
257 	while ((len - num_rx > 0) &&
258 	       (LPUART_GetStatusFlags(config->base)
259 		& kLPUART_RxDataRegFullFlag)) {
260 
261 		rx_data[num_rx++] = LPUART_ReadByte(config->base);
262 	}
263 
264 	return num_rx;
265 }
266 
mcux_lpuart_irq_tx_enable(const struct device * dev)267 static void mcux_lpuart_irq_tx_enable(const struct device *dev)
268 {
269 	const struct mcux_lpuart_config *config = dev->config;
270 	uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
271 #ifdef CONFIG_PM
272 	struct mcux_lpuart_data *data = dev->data;
273 	unsigned int key;
274 #endif
275 
276 #ifdef CONFIG_PM
277 	key = irq_lock();
278 	data->tx_poll_stream_on = false;
279 	data->tx_int_stream_on = true;
280 	/* Transmission complete interrupt no longer required */
281 	LPUART_DisableInterrupts(config->base,
282 		kLPUART_TransmissionCompleteInterruptEnable);
283 	/* Do not allow system to sleep while UART tx is ongoing */
284 	mcux_lpuart_pm_policy_state_lock_get(dev);
285 #endif
286 	LPUART_EnableInterrupts(config->base, mask);
287 #ifdef CONFIG_PM
288 	irq_unlock(key);
289 #endif
290 }
291 
mcux_lpuart_irq_tx_disable(const struct device * dev)292 static void mcux_lpuart_irq_tx_disable(const struct device *dev)
293 {
294 	const struct mcux_lpuart_config *config = dev->config;
295 	uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
296 #ifdef CONFIG_PM
297 	struct mcux_lpuart_data *data = dev->data;
298 	unsigned int key;
299 
300 	key = irq_lock();
301 #endif
302 
303 	LPUART_DisableInterrupts(config->base, mask);
304 #ifdef CONFIG_PM
305 	data->tx_int_stream_on = false;
306 	/*
307 	 * If transmission IRQ is no longer enabled,
308 	 * transmission is complete. Release pm constraint.
309 	 */
310 	mcux_lpuart_pm_policy_state_lock_put(dev);
311 	irq_unlock(key);
312 #endif
313 }
314 
mcux_lpuart_irq_tx_complete(const struct device * dev)315 static int mcux_lpuart_irq_tx_complete(const struct device *dev)
316 {
317 	const struct mcux_lpuart_config *config = dev->config;
318 	uint32_t flags = LPUART_GetStatusFlags(config->base);
319 
320 	return (flags & kLPUART_TransmissionCompleteFlag) != 0U;
321 }
322 
mcux_lpuart_irq_tx_ready(const struct device * dev)323 static int mcux_lpuart_irq_tx_ready(const struct device *dev)
324 {
325 	const struct mcux_lpuart_config *config = dev->config;
326 	uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable;
327 	uint32_t flags = LPUART_GetStatusFlags(config->base);
328 
329 	return (LPUART_GetEnabledInterrupts(config->base) & mask)
330 		&& (flags & LPUART_STAT_TDRE_MASK);
331 }
332 
mcux_lpuart_irq_rx_enable(const struct device * dev)333 static void mcux_lpuart_irq_rx_enable(const struct device *dev)
334 {
335 	const struct mcux_lpuart_config *config = dev->config;
336 	uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
337 
338 	LPUART_EnableInterrupts(config->base, mask);
339 }
340 
mcux_lpuart_irq_rx_disable(const struct device * dev)341 static void mcux_lpuart_irq_rx_disable(const struct device *dev)
342 {
343 	const struct mcux_lpuart_config *config = dev->config;
344 	uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
345 
346 	LPUART_DisableInterrupts(config->base, mask);
347 }
348 
mcux_lpuart_irq_rx_full(const struct device * dev)349 static int mcux_lpuart_irq_rx_full(const struct device *dev)
350 {
351 	const struct mcux_lpuart_config *config = dev->config;
352 	uint32_t flags = LPUART_GetStatusFlags(config->base);
353 
354 	return (flags & kLPUART_RxDataRegFullFlag) != 0U;
355 }
356 
mcux_lpuart_irq_rx_pending(const struct device * dev)357 static int mcux_lpuart_irq_rx_pending(const struct device *dev)
358 {
359 	const struct mcux_lpuart_config *config = dev->config;
360 	uint32_t mask = kLPUART_RxDataRegFullInterruptEnable;
361 
362 	return (LPUART_GetEnabledInterrupts(config->base) & mask)
363 		&& mcux_lpuart_irq_rx_full(dev);
364 }
365 
mcux_lpuart_irq_err_enable(const struct device * dev)366 static void mcux_lpuart_irq_err_enable(const struct device *dev)
367 {
368 	const struct mcux_lpuart_config *config = dev->config;
369 	uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
370 			kLPUART_FramingErrorInterruptEnable |
371 			kLPUART_ParityErrorInterruptEnable;
372 
373 	LPUART_EnableInterrupts(config->base, mask);
374 }
375 
mcux_lpuart_irq_err_disable(const struct device * dev)376 static void mcux_lpuart_irq_err_disable(const struct device *dev)
377 {
378 	const struct mcux_lpuart_config *config = dev->config;
379 	uint32_t mask = kLPUART_NoiseErrorInterruptEnable |
380 			kLPUART_FramingErrorInterruptEnable |
381 			kLPUART_ParityErrorInterruptEnable;
382 
383 	LPUART_DisableInterrupts(config->base, mask);
384 }
385 
mcux_lpuart_irq_is_pending(const struct device * dev)386 static int mcux_lpuart_irq_is_pending(const struct device *dev)
387 {
388 	return (mcux_lpuart_irq_tx_ready(dev)
389 		|| mcux_lpuart_irq_rx_pending(dev));
390 }
391 
mcux_lpuart_irq_update(const struct device * dev)392 static int mcux_lpuart_irq_update(const struct device *dev)
393 {
394 	return 1;
395 }
396 
mcux_lpuart_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)397 static void mcux_lpuart_irq_callback_set(const struct device *dev,
398 					 uart_irq_callback_user_data_t cb,
399 					 void *cb_data)
400 {
401 	struct mcux_lpuart_data *data = dev->data;
402 
403 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
404 	if (data->api_type == LPUART_ASYNC) {
405 		LOG_ERR("UART irq and async api are exclusive");
406 	}
407 #endif
408 
409 	data->callback = cb;
410 	data->cb_data = cb_data;
411 
412 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
413 	data->async.user_callback = NULL;
414 	data->async.user_data = NULL;
415 	data->api_type = LPUART_IRQ_DRIVEN;
416 #endif
417 }
418 
419 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
420 
421 
422 #ifdef CONFIG_UART_ASYNC_API
async_timer_start(struct k_work_delayable * work,size_t timeout_us)423 static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us)
424 {
425 	if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) {
426 		LOG_DBG("async timer started for %d us", timeout_us);
427 		k_work_reschedule(work, K_USEC(timeout_us));
428 	}
429 }
430 
async_user_callback(const struct device * dev,struct uart_event * evt)431 static void async_user_callback(const struct device *dev, struct uart_event *evt)
432 {
433 	const struct mcux_lpuart_data *data = dev->data;
434 
435 	if (data->async.user_callback) {
436 		data->async.user_callback(dev, evt, data->async.user_data);
437 	}
438 }
439 
async_evt_tx_done(struct device * dev)440 static void async_evt_tx_done(struct device *dev)
441 {
442 	struct mcux_lpuart_data *data = dev->data;
443 
444 	(void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
445 
446 	LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len);
447 	struct uart_event event = {
448 		.type = UART_TX_DONE,
449 		.data.tx.buf = data->async.tx_dma_params.buf,
450 		.data.tx.len = data->async.tx_dma_params.buf_len
451 	};
452 
453 	/* Reset TX Buffer */
454 	data->async.tx_dma_params.buf = NULL;
455 	data->async.tx_dma_params.buf_len = 0U;
456 
457 	async_user_callback(dev, &event);
458 }
459 
async_evt_rx_rdy(const struct device * dev)460 static void async_evt_rx_rdy(const struct device *dev)
461 {
462 	struct mcux_lpuart_data *data = dev->data;
463 	struct mcux_lpuart_rx_dma_params *dma_params = &data->async.rx_dma_params;
464 
465 	struct uart_event event = {
466 		.type = UART_RX_RDY,
467 		.data.rx.buf = dma_params->buf,
468 		.data.rx.len = dma_params->counter - dma_params->offset,
469 		.data.rx.offset = dma_params->offset
470 	};
471 
472 	LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset,
473 		(uint32_t)event.data.rx.buf);
474 
475 	/* Update the current pos for new data */
476 	dma_params->offset = dma_params->counter;
477 
478 	/* Only send event for new data */
479 	if (event.data.rx.len > 0) {
480 		async_user_callback(dev, &event);
481 	}
482 }
483 
async_evt_rx_buf_request(const struct device * dev)484 static void async_evt_rx_buf_request(const struct device *dev)
485 {
486 	struct uart_event evt = {
487 		.type = UART_RX_BUF_REQUEST,
488 	};
489 
490 	async_user_callback(dev, &evt);
491 }
492 
async_evt_rx_buf_release(const struct device * dev)493 static void async_evt_rx_buf_release(const struct device *dev)
494 {
495 	struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
496 	struct uart_event evt = {
497 		.type = UART_RX_BUF_RELEASED,
498 		.data.rx_buf.buf = data->async.rx_dma_params.buf,
499 	};
500 
501 	async_user_callback(dev, &evt);
502 	data->async.rx_dma_params.buf = NULL;
503 	data->async.rx_dma_params.buf_len = 0U;
504 	data->async.rx_dma_params.offset = 0U;
505 	data->async.rx_dma_params.counter = 0U;
506 }
507 
mcux_lpuart_async_rx_flush(const struct device * dev)508 static void mcux_lpuart_async_rx_flush(const struct device *dev)
509 {
510 	struct dma_status status;
511 	struct mcux_lpuart_data *data = dev->data;
512 	const struct mcux_lpuart_config *config = dev->config;
513 
514 	const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
515 						     config->rx_dma_config.dma_channel,
516 						     &status);
517 
518 	if (get_status_result == 0) {
519 		const size_t rx_rcv_len = data->async.rx_dma_params.buf_len -
520 					  status.pending_length;
521 
522 		if (rx_rcv_len > data->async.rx_dma_params.counter && status.pending_length) {
523 			data->async.rx_dma_params.counter = rx_rcv_len;
524 			async_evt_rx_rdy(dev);
525 		}
526 		LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
527 	} else {
528 		LOG_ERR("Error getting DMA status");
529 	}
530 }
531 
mcux_lpuart_rx_disable(const struct device * dev)532 static int mcux_lpuart_rx_disable(const struct device *dev)
533 {
534 	LOG_INF("Disabling UART RX DMA");
535 	const struct mcux_lpuart_config *config = dev->config;
536 	struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
537 	LPUART_Type *lpuart = config->base;
538 	const unsigned int key = irq_lock();
539 
540 	LPUART_EnableRx(lpuart, false);
541 	(void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work);
542 	LPUART_DisableInterrupts(lpuart, kLPUART_IdleLineInterruptEnable);
543 	LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag);
544 	LPUART_EnableRxDMA(lpuart, false);
545 
546 	/* No active RX buffer, cannot disable */
547 	if (!data->async.rx_dma_params.buf) {
548 		LOG_ERR("No buffers to release from RX DMA!");
549 	} else {
550 		mcux_lpuart_async_rx_flush(dev);
551 		async_evt_rx_buf_release(dev);
552 		if (data->async.next_rx_buffer != NULL) {
553 			data->async.rx_dma_params.buf = data->async.next_rx_buffer;
554 			data->async.rx_dma_params.buf_len = data->async.next_rx_buffer_len;
555 			data->async.next_rx_buffer = NULL;
556 			data->async.next_rx_buffer_len = 0;
557 			/* Release the next buffer as well */
558 			async_evt_rx_buf_release(dev);
559 		}
560 	}
561 	const int ret = dma_stop(config->rx_dma_config.dma_dev,
562 				 config->rx_dma_config.dma_channel);
563 
564 	if (ret != 0) {
565 		LOG_ERR("Error stopping rx DMA. Reason: %x", ret);
566 	}
567 	LOG_DBG("RX: Disabled");
568 	struct uart_event disabled_event = {
569 		.type = UART_RX_DISABLED
570 	};
571 
572 	async_user_callback(dev, &disabled_event);
573 	irq_unlock(key);
574 	return ret;
575 }
576 
prepare_rx_dma_block_config(const struct device * dev)577 static void prepare_rx_dma_block_config(const struct device *dev)
578 {
579 	struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
580 	const struct mcux_lpuart_config *config = dev->config;
581 	LPUART_Type *lpuart = config->base;
582 	struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
583 
584 	assert(rx_dma_params->buf != NULL);
585 	assert(rx_dma_params->buf_len > 0);
586 
587 	struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block;
588 
589 	head_block_config->dest_address = (uint32_t)rx_dma_params->buf;
590 	head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart);
591 	head_block_config->block_size = rx_dma_params->buf_len;
592 	head_block_config->dest_scatter_en = true;
593 }
594 
configure_and_start_rx_dma(const struct mcux_lpuart_config * config,struct mcux_lpuart_data * data,LPUART_Type * lpuart)595 static int configure_and_start_rx_dma(
596 	const struct mcux_lpuart_config *config, struct mcux_lpuart_data *data,
597 	LPUART_Type *lpuart)
598 {
599 	LOG_DBG("Configuring and Starting UART RX DMA");
600 	int ret = dma_config(config->rx_dma_config.dma_dev,
601 			     config->rx_dma_config.dma_channel,
602 			     (struct dma_config *)&config->rx_dma_config.dma_cfg);
603 
604 	if (ret != 0) {
605 		LOG_ERR("Failed to Configure RX DMA: err: %d", ret);
606 		return ret;
607 	}
608 	ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel);
609 	if (ret < 0) {
610 		LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)",
611 			config->rx_dma_config.dma_channel,
612 			ret);
613 	}
614 	LPUART_EnableRxDMA(lpuart, true);
615 	return ret;
616 }
617 
uart_mcux_lpuart_dma_replace_rx_buffer(const struct device * dev)618 static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev)
619 {
620 	struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
621 	const struct mcux_lpuart_config *config = dev->config;
622 	LPUART_Type *lpuart = config->base;
623 
624 	LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len);
625 	/* There must be a buffer to replace this one with */
626 	assert(data->async.next_rx_buffer != NULL);
627 	assert(data->async.next_rx_buffer_len != 0U);
628 
629 	const int success =
630 		dma_reload(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel,
631 			   LPUART_GetDataRegisterAddress(lpuart),
632 			   (uint32_t)data->async.next_rx_buffer, data->async.next_rx_buffer_len);
633 
634 	if (success != 0) {
635 		LOG_ERR("Error %d reloading DMA with next RX buffer", success);
636 	}
637 
638 	return success;
639 }
640 
dma_callback(const struct device * dma_dev,void * callback_arg,uint32_t channel,int dma_status)641 static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel,
642 			 int dma_status)
643 {
644 	struct device *dev = (struct device *)callback_arg;
645 	const struct mcux_lpuart_config *config = dev->config;
646 	LPUART_Type *lpuart = config->base;
647 	struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
648 
649 	LOG_DBG("DMA call back on channel %d", channel);
650 	struct dma_status status;
651 	const int get_status_result = dma_get_status(dma_dev, channel, &status);
652 
653 	if (get_status_result < 0) {
654 		LOG_ERR("error on status get: %d", get_status_result);
655 	} else {
656 		LOG_DBG("DMA Status: b: %d dir: %d len_remain: %d", status.busy, status.dir,
657 			status.pending_length);
658 	}
659 
660 	if (dma_status < 0) {
661 		LOG_ERR("Got error : %d", dma_status);
662 	}
663 
664 
665 	if (channel == config->tx_dma_config.dma_channel) {
666 		LOG_DBG("TX Channel");
667 		LPUART_EnableTxDMA(lpuart, false);
668 		async_evt_tx_done(dev);
669 	} else if (channel == config->rx_dma_config.dma_channel) {
670 		LOG_DBG("RX Channel");
671 		struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
672 
673 		/* The RX Event indicates DMA transfer is complete and full buffer is available. */
674 		rx_dma_params->counter = rx_dma_params->buf_len;
675 
676 		LOG_DBG("Current Buf (%x) full, swapping to new buf: %x",
677 			(uint32_t)rx_dma_params->buf,
678 			(uint32_t)data->async.next_rx_buffer);
679 		async_evt_rx_rdy(dev);
680 		async_evt_rx_buf_release(dev);
681 
682 		/* Remember the buf so it can be released after it is done. */
683 		rx_dma_params->buf = data->async.next_rx_buffer;
684 		rx_dma_params->buf_len = data->async.next_rx_buffer_len;
685 		data->async.next_rx_buffer = NULL;
686 		data->async.next_rx_buffer_len = 0U;
687 
688 		/* A new buffer was available (and already loaded into the DMA engine) */
689 		if (rx_dma_params->buf != NULL && rx_dma_params->buf_len > 0) {
690 			/* Request the next buffer */
691 			async_evt_rx_buf_request(dev);
692 		} else {
693 			/* Buffer full without valid next buffer, disable RX DMA */
694 			LOG_INF("Disabled RX DMA, no valid next buffer ");
695 			mcux_lpuart_rx_disable(dev);
696 		}
697 	} else {
698 		LOG_ERR("Got unexpected DMA Channel: %d", channel);
699 	}
700 }
701 
702 static int mcux_lpuart_configure_async(const struct device *dev);
703 
mcux_lpuart_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)704 static int mcux_lpuart_callback_set(const struct device *dev, uart_callback_t callback,
705 				    void *user_data)
706 {
707 	struct mcux_lpuart_data *data = dev->data;
708 
709 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
710 	if (data->api_type == LPUART_IRQ_DRIVEN) {
711 		LOG_ERR("UART irq and async api are exclusive");
712 		return -ENOTSUP;
713 	}
714 #endif
715 
716 	data->async.user_callback = callback;
717 	data->async.user_data = user_data;
718 
719 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
720 	data->callback = NULL;
721 	data->cb_data = NULL;
722 	data->api_type = LPUART_ASYNC;
723 #endif
724 
725 	return mcux_lpuart_configure_async(dev);
726 }
727 
mcux_lpuart_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout_us)728 static int mcux_lpuart_tx(const struct device *dev, const uint8_t *buf, size_t len,
729 			  int32_t timeout_us)
730 {
731 	struct mcux_lpuart_data *data = dev->data;
732 	const struct mcux_lpuart_config *config = dev->config;
733 	LPUART_Type *lpuart = config->base;
734 
735 	unsigned int key = irq_lock();
736 
737 	/* Check for an ongiong transfer and abort if it is pending */
738 	struct dma_status status;
739 	const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
740 						     config->tx_dma_config.dma_channel,
741 						     &status);
742 
743 	if (get_status_result < 0 || status.busy) {
744 		irq_unlock(key);
745 		LOG_ERR("Unable to submit UART DMA Transfer.");
746 		return get_status_result < 0 ? get_status_result : -EBUSY;
747 	}
748 
749 	int ret;
750 
751 	LPUART_EnableTxDMA(lpuart, false);
752 
753 	data->async.tx_dma_params.buf = buf;
754 	data->async.tx_dma_params.buf_len = len;
755 	data->async.tx_dma_params.active_dma_block.source_address = (uint32_t)buf;
756 	data->async.tx_dma_params.active_dma_block.dest_address =
757 		LPUART_GetDataRegisterAddress(lpuart);
758 	data->async.tx_dma_params.active_dma_block.block_size = len;
759 	data->async.tx_dma_params.active_dma_block.next_block = NULL;
760 
761 	ret = dma_config(config->tx_dma_config.dma_dev,
762 			 config->tx_dma_config.dma_channel,
763 			 (struct dma_config *)&config->tx_dma_config.dma_cfg);
764 
765 	if (ret == 0) {
766 		LOG_DBG("Starting UART DMA TX Ch %u", config->tx_dma_config.dma_channel);
767 
768 		ret = dma_start(config->tx_dma_config.dma_dev,
769 				config->tx_dma_config.dma_channel);
770 		LPUART_EnableTxDMA(lpuart, true);
771 		if (ret != 0) {
772 			LOG_ERR("Failed to start DMA(Tx) Ch %d",
773 				config->tx_dma_config.dma_channel);
774 		}
775 		async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us);
776 	} else {
777 		LOG_ERR("Error configuring UART DMA: %x", ret);
778 	}
779 	irq_unlock(key);
780 	return ret;
781 }
782 
mcux_lpuart_tx_abort(const struct device * dev)783 static int mcux_lpuart_tx_abort(const struct device *dev)
784 {
785 	struct mcux_lpuart_data *data = dev->data;
786 	const struct mcux_lpuart_config *config = dev->config;
787 	LPUART_Type *lpuart = config->base;
788 
789 	LPUART_EnableTxDMA(lpuart, false);
790 	(void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
791 	struct dma_status status;
792 	const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
793 						     config->tx_dma_config.dma_channel,
794 						     &status);
795 
796 	if (get_status_result < 0) {
797 		LOG_ERR("Error querying TX DMA Status during abort.");
798 	}
799 
800 	const size_t bytes_transmitted = (get_status_result == 0) ?
801 			 data->async.tx_dma_params.buf_len - status.pending_length : 0;
802 
803 	const int ret = dma_stop(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel);
804 
805 	if (ret == 0) {
806 		struct uart_event tx_aborted_event = {
807 			.type = UART_TX_ABORTED,
808 			.data.tx.buf = data->async.tx_dma_params.buf,
809 			.data.tx.len = bytes_transmitted
810 		};
811 		async_user_callback(dev, &tx_aborted_event);
812 	}
813 	return ret;
814 }
815 
mcux_lpuart_rx_enable(const struct device * dev,uint8_t * buf,const size_t len,const int32_t timeout_us)816 static int mcux_lpuart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len,
817 				 const int32_t timeout_us)
818 {
819 	LOG_DBG("Enabling UART RX DMA");
820 	struct mcux_lpuart_data *data = dev->data;
821 	const struct mcux_lpuart_config *config = dev->config;
822 	LPUART_Type *lpuart = config->base;
823 
824 	struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
825 
826 	unsigned int key = irq_lock();
827 	struct dma_status status;
828 	const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
829 						     config->rx_dma_config.dma_channel,
830 						     &status);
831 
832 	if (get_status_result < 0 || status.busy) {
833 		LOG_ERR("Unable to start receive on UART.");
834 		irq_unlock(key);
835 		return get_status_result < 0 ? get_status_result : -EBUSY;
836 	}
837 
838 	rx_dma_params->timeout_us = timeout_us;
839 	rx_dma_params->buf = buf;
840 	rx_dma_params->buf_len = len;
841 	data->async.next_rx_buffer = NULL;
842 	data->async.next_rx_buffer_len = 0U;
843 
844 	LPUART_EnableInterrupts(config->base, kLPUART_IdleLineInterruptEnable);
845 	prepare_rx_dma_block_config(dev);
846 	const int ret = configure_and_start_rx_dma(config, data, lpuart);
847 
848 	/* Request the next buffer for when this buffer is full for continuous reception */
849 	async_evt_rx_buf_request(dev);
850 
851 	/* Clear these status flags as they can prevent the UART device from receiving data */
852 	LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag | kLPUART_ParityErrorFlag |
853 							kLPUART_FramingErrorFlag |
854 							kLPUART_NoiseErrorFlag);
855 	LPUART_EnableRx(lpuart, true);
856 	irq_unlock(key);
857 	return ret;
858 }
859 
mcux_lpuart_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)860 static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
861 {
862 	struct mcux_lpuart_data *data = dev->data;
863 	unsigned int key;
864 
865 	key = irq_lock();
866 	assert(data->async.next_rx_buffer == NULL);
867 	assert(data->async.next_rx_buffer_len == 0);
868 	data->async.next_rx_buffer = buf;
869 	data->async.next_rx_buffer_len = len;
870 	uart_mcux_lpuart_dma_replace_rx_buffer(dev);
871 	irq_unlock(key);
872 	return 0;
873 }
874 
mcux_lpuart_async_rx_timeout(struct k_work * work)875 static void mcux_lpuart_async_rx_timeout(struct k_work *work)
876 {
877 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
878 	struct mcux_lpuart_rx_dma_params *rx_params = CONTAINER_OF(dwork,
879 								   struct mcux_lpuart_rx_dma_params,
880 								   timeout_work);
881 	struct mcux_lpuart_async_data *async_data = CONTAINER_OF(rx_params,
882 								 struct mcux_lpuart_async_data,
883 								 rx_dma_params);
884 	const struct device *dev = async_data->uart_dev;
885 
886 	LOG_DBG("RX timeout");
887 	mcux_lpuart_async_rx_flush(dev);
888 }
889 
mcux_lpuart_async_tx_timeout(struct k_work * work)890 static void mcux_lpuart_async_tx_timeout(struct k_work *work)
891 {
892 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
893 	struct mcux_lpuart_tx_dma_params *tx_params = CONTAINER_OF(dwork,
894 								   struct mcux_lpuart_tx_dma_params,
895 								   timeout_work);
896 	struct mcux_lpuart_async_data *async_data = CONTAINER_OF(tx_params,
897 								 struct mcux_lpuart_async_data,
898 								 tx_dma_params);
899 	const struct device *dev = async_data->uart_dev;
900 
901 	LOG_DBG("TX timeout");
902 	(void)mcux_lpuart_tx_abort(dev);
903 }
904 
905 #endif /* CONFIG_UART_ASYNC_API */
906 
907 #if CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
908 
909 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
mcux_lpuart_irq_driven_isr(const struct device * dev,struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)910 static inline void mcux_lpuart_irq_driven_isr(const struct device *dev,
911 					      struct mcux_lpuart_data *data,
912 					      const struct mcux_lpuart_config *config,
913 					      const uint32_t status) {
914 	if (data->callback) {
915 		data->callback(dev, data->cb_data);
916 	}
917 
918 	if (status & kLPUART_RxOverrunFlag) {
919 		LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
920 	}
921 }
922 #endif
923 
924 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_async_isr(struct mcux_lpuart_data * data,const struct mcux_lpuart_config * config,const uint32_t status)925 static inline void mcux_lpuart_async_isr(struct mcux_lpuart_data *data,
926 					      const struct mcux_lpuart_config *config,
927 					      const uint32_t status) {
928 	if (status & kLPUART_IdleLineFlag) {
929 		async_timer_start(&data->async.rx_dma_params.timeout_work,
930 				  data->async.rx_dma_params.timeout_us);
931 		LPUART_ClearStatusFlags(config->base, kLPUART_IdleLineFlag);
932 	}
933 
934 	if (status & kLPUART_RxOverrunFlag) {
935 		LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag);
936 	}
937 }
938 #endif
939 
mcux_lpuart_isr(const struct device * dev)940 static void mcux_lpuart_isr(const struct device *dev)
941 {
942 	struct mcux_lpuart_data *data = dev->data;
943 	const struct mcux_lpuart_config *config = dev->config;
944 	const uint32_t status = LPUART_GetStatusFlags(config->base);
945 
946 #if CONFIG_PM
947 	if (status & kLPUART_TransmissionCompleteFlag) {
948 
949 		if (data->tx_poll_stream_on) {
950 			/* Poll transmission complete. Allow system to sleep */
951 			LPUART_DisableInterrupts(config->base,
952 				kLPUART_TransmissionCompleteInterruptEnable);
953 			data->tx_poll_stream_on = false;
954 			mcux_lpuart_pm_policy_state_lock_put(dev);
955 		}
956 	}
957 #endif /* CONFIG_PM */
958 
959 #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN)
960 	if (data->api_type == LPUART_IRQ_DRIVEN) {
961 		mcux_lpuart_irq_driven_isr(dev, data, config, status);
962 	} else if (data->api_type == LPUART_ASYNC) {
963 		mcux_lpuart_async_isr(data, config, status);
964 	}
965 #elif defined(CONFIG_UART_INTERRUPT_DRIVEN)
966 	mcux_lpuart_irq_driven_isr(dev, data, config, status);
967 #elif defined(CONFIG_UART_ASYNC_API)
968 	mcux_lpuart_async_isr(data, config, status);
969 #endif /* API */
970 }
971 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
972 
mcux_lpuart_configure_basic(const struct device * dev,const struct uart_config * cfg,lpuart_config_t * uart_config)973 static int mcux_lpuart_configure_basic(const struct device *dev, const struct uart_config *cfg,
974 					lpuart_config_t *uart_config)
975 {
976 	/* Translate UART API enum to LPUART enum from HAL */
977 	switch (cfg->parity) {
978 	case UART_CFG_PARITY_NONE:
979 		uart_config->parityMode = kLPUART_ParityDisabled;
980 		break;
981 	case UART_CFG_PARITY_ODD:
982 		uart_config->parityMode = kLPUART_ParityOdd;
983 		break;
984 	case UART_CFG_PARITY_EVEN:
985 		uart_config->parityMode = kLPUART_ParityEven;
986 		break;
987 	default:
988 		return -ENOTSUP;
989 	}
990 
991 	switch (cfg->data_bits) {
992 #if defined(FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT) && \
993 	FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT
994 	case UART_CFG_DATA_BITS_7:
995 		uart_config->dataBitsCount  = kLPUART_SevenDataBits;
996 		break;
997 #endif
998 	case UART_CFG_DATA_BITS_8:
999 		uart_config->dataBitsCount  = kLPUART_EightDataBits;
1000 		break;
1001 	default:
1002 		return -ENOTSUP;
1003 	}
1004 
1005 #if defined(FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT) && \
1006 	FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT
1007 	switch (cfg->stop_bits) {
1008 	case UART_CFG_STOP_BITS_1:
1009 		uart_config->stopBitCount = kLPUART_OneStopBit;
1010 		break;
1011 	case UART_CFG_STOP_BITS_2:
1012 		uart_config->stopBitCount = kLPUART_TwoStopBit;
1013 		break;
1014 	default:
1015 		return -ENOTSUP;
1016 	}
1017 #endif
1018 
1019 #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
1020 	FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT
1021 	switch (cfg->flow_ctrl) {
1022 	case UART_CFG_FLOW_CTRL_NONE:
1023 	case UART_CFG_FLOW_CTRL_RS485:
1024 		uart_config->enableTxCTS = false;
1025 		uart_config->enableRxRTS = false;
1026 		break;
1027 	case UART_CFG_FLOW_CTRL_RTS_CTS:
1028 		uart_config->enableTxCTS = true;
1029 		uart_config->enableRxRTS = true;
1030 		break;
1031 	default:
1032 		return -ENOTSUP;
1033 	}
1034 #endif
1035 
1036 	uart_config->baudRate_Bps = cfg->baudrate;
1037 	uart_config->enableRx = true;
1038 	/* Tx will be enabled manually after set tx-rts */
1039 	uart_config->enableTx = false;
1040 
1041 	return 0;
1042 }
1043 
1044 #ifdef CONFIG_UART_ASYNC_API
mcux_lpuart_configure_async(const struct device * dev)1045 static int mcux_lpuart_configure_async(const struct device *dev)
1046 {
1047 	const struct mcux_lpuart_config *config = dev->config;
1048 	struct mcux_lpuart_data *data = dev->data;
1049 	lpuart_config_t uart_config;
1050 	int ret;
1051 
1052 	LPUART_GetDefaultConfig(&uart_config);
1053 
1054 	ret = mcux_lpuart_configure_basic(dev, &data->uart_config, &uart_config);
1055 	if (ret) {
1056 		return ret;
1057 	}
1058 
1059 	uart_config.rxIdleType = kLPUART_IdleTypeStopBit;
1060 	uart_config.rxIdleConfig = kLPUART_IdleCharacter1;
1061 	data->async.next_rx_buffer = NULL;
1062 	data->async.next_rx_buffer_len = 0;
1063 	data->async.uart_dev = dev;
1064 	k_work_init_delayable(&data->async.rx_dma_params.timeout_work,
1065 			      mcux_lpuart_async_rx_timeout);
1066 	k_work_init_delayable(&data->async.tx_dma_params.timeout_work,
1067 			      mcux_lpuart_async_tx_timeout);
1068 
1069 	/* Disable the UART Receiver until the async API provides a buffer to
1070 	 * receive into with rx_enable
1071 	 */
1072 	uart_config.enableRx = false;
1073 	/* Clearing the fifo of any junk received before the async rx enable was called */
1074 	while (LPUART_GetRxFifoCount(config->base) > 0) {
1075 		LPUART_ReadByte(config->base);
1076 	}
1077 
1078 	return 0;
1079 }
1080 #endif
1081 
mcux_lpuart_configure_init(const struct device * dev,const struct uart_config * cfg)1082 static int mcux_lpuart_configure_init(const struct device *dev, const struct uart_config *cfg)
1083 {
1084 	const struct mcux_lpuart_config *config = dev->config;
1085 	struct mcux_lpuart_data *data = dev->data;
1086 	lpuart_config_t uart_config;
1087 	uint32_t clock_freq;
1088 	int ret;
1089 
1090 	if (!device_is_ready(config->clock_dev)) {
1091 		return -ENODEV;
1092 	}
1093 
1094 	if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
1095 				   &clock_freq)) {
1096 		return -EINVAL;
1097 	}
1098 
1099 	LPUART_GetDefaultConfig(&uart_config);
1100 
1101 	ret = mcux_lpuart_configure_basic(dev, cfg, &uart_config);
1102 	if (ret) {
1103 		return ret;
1104 	}
1105 
1106 	LPUART_Init(config->base, &uart_config, clock_freq);
1107 
1108 #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \
1109 	FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT
1110 	if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485) {
1111 		/* Set the LPUART into RS485 mode (tx driver enable using RTS) */
1112 		config->base->MODIR |= LPUART_MODIR_TXRTSE(true);
1113 		if (!config->rs485_de_active_low) {
1114 			config->base->MODIR |= LPUART_MODIR_TXRTSPOL(1);
1115 		}
1116 	}
1117 #endif
1118 
1119 	/* Now can enable tx */
1120 	config->base->CTRL |= LPUART_CTRL_TE(true);
1121 
1122 
1123 	if (config->loopback_en) {
1124 		/* Set the LPUART into loopback mode */
1125 		config->base->CTRL |= LPUART_CTRL_LOOPS_MASK;
1126 		config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1127 	} else if (config->single_wire) {
1128 		/* Enable the single wire / half-duplex mode, only possible when
1129 		 * loopback is disabled. We need a critical section to prevent
1130 		 * the UART firing an interrupt during mode switch
1131 		 */
1132 		unsigned int key = irq_lock();
1133 
1134 		config->base->CTRL |= (LPUART_CTRL_LOOPS_MASK | LPUART_CTRL_RSRC_MASK);
1135 		irq_unlock(key);
1136 	} else {
1137 #ifdef LPUART_CTRL_TXINV
1138 		/* Only invert TX in full-duplex mode */
1139 		if (config->tx_invert) {
1140 			config->base->CTRL |= LPUART_CTRL_TXINV(1);
1141 		}
1142 #endif
1143 	}
1144 
1145 #ifdef LPUART_STAT_RXINV
1146 	if (config->rx_invert) {
1147 		config->base->STAT |= LPUART_STAT_RXINV(1);
1148 	}
1149 #endif
1150 
1151 	/* update internal uart_config */
1152 	data->uart_config = *cfg;
1153 
1154 	return 0;
1155 }
1156 
1157 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
mcux_lpuart_config_get(const struct device * dev,struct uart_config * cfg)1158 static int mcux_lpuart_config_get(const struct device *dev, struct uart_config *cfg)
1159 {
1160 	struct mcux_lpuart_data *data = dev->data;
1161 	*cfg = data->uart_config;
1162 	return 0;
1163 }
1164 
mcux_lpuart_configure(const struct device * dev,const struct uart_config * cfg)1165 static int mcux_lpuart_configure(const struct device *dev,
1166 				 const struct uart_config *cfg)
1167 {
1168 	const struct mcux_lpuart_config *config = dev->config;
1169 
1170 	/* Make sure that RSRC is de-asserted otherwise deinit will hang. */
1171 	config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK;
1172 
1173 	/* disable LPUART */
1174 	LPUART_Deinit(config->base);
1175 
1176 	int ret = mcux_lpuart_configure_init(dev, cfg);
1177 	if (ret) {
1178 		return ret;
1179 	}
1180 
1181 	/* wait for hardware init */
1182 	k_sleep(K_MSEC(1));
1183 
1184 	return 0;
1185 }
1186 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1187 
1188 #ifdef UART_LINE_CTRL_ENABLE
mcux_lpuart_line_ctrl_set_rts(const struct mcux_lpuart_config * config,uint32_t val)1189 static void mcux_lpuart_line_ctrl_set_rts(const struct mcux_lpuart_config *config,
1190 		uint32_t val)
1191 {
1192 	if (val >= 1U) {
1193 		/* Reset TXRTS to set RXRTSE bit, this provides high-level on RTS line */
1194 		config->base->MODIR &= ~(LPUART_MODIR_TXRTSPOL_MASK | LPUART_MODIR_TXRTSE_MASK);
1195 		config->base->MODIR |= LPUART_MODIR_RXRTSE_MASK;
1196 	} else {
1197 		/* Set TXRTSE to reset RXRTSE bit,this provide low-level on RTS line*/
1198 		config->base->MODIR &= ~(LPUART_MODIR_RXRTSE_MASK);
1199 		config->base->MODIR |= (LPUART_MODIR_TXRTSPOL_MASK | LPUART_MODIR_TXRTSE_MASK);
1200 	}
1201 }
1202 
mcux_lpuart_line_ctrl_set(const struct device * dev,uint32_t ctrl,uint32_t val)1203 static int mcux_lpuart_line_ctrl_set(const struct device *dev,
1204 		uint32_t ctrl, uint32_t val)
1205 {
1206 	const struct mcux_lpuart_config *config = dev->config;
1207 	int ret = 0;
1208 
1209 	switch (ctrl) {
1210 	case UART_LINE_CTRL_RTS:
1211 		/* Disable Transmitter and Receiver */
1212 		config->base->CTRL &= ~(LPUART_CTRL_TE_MASK | LPUART_CTRL_RE_MASK);
1213 
1214 		mcux_lpuart_line_ctrl_set_rts(config, val);
1215 
1216 		break;
1217 
1218 	default:
1219 		ret = -ENOTSUP;
1220 	}
1221 
1222 	return ret;
1223 }
1224 #endif /* UART_LINE_CTRL_ENABLE */
1225 
mcux_lpuart_init(const struct device * dev)1226 static int mcux_lpuart_init(const struct device *dev)
1227 {
1228 	const struct mcux_lpuart_config *config = dev->config;
1229 	struct mcux_lpuart_data *data = dev->data;
1230 	struct uart_config *uart_api_config = &data->uart_config;
1231 	int err;
1232 
1233 	uart_api_config->baudrate = config->baud_rate;
1234 	uart_api_config->parity = config->parity;
1235 	uart_api_config->stop_bits = UART_CFG_STOP_BITS_1;
1236 	uart_api_config->data_bits = UART_CFG_DATA_BITS_8;
1237 	uart_api_config->flow_ctrl = config->flow_ctrl;
1238 
1239 	/* set initial configuration */
1240 	mcux_lpuart_configure_init(dev, uart_api_config);
1241 	if (config->flow_ctrl) {
1242 		const struct pinctrl_state *state;
1243 
1244 		err = pinctrl_lookup_state(config->pincfg, PINCTRL_STATE_FLOWCONTROL, &state);
1245 		if (err < 0) {
1246 			err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1247 		}
1248 	} else {
1249 		err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
1250 	}
1251 	if (err < 0) {
1252 		return err;
1253 	}
1254 
1255 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1256 	config->irq_config_func(dev);
1257 #endif
1258 
1259 #ifdef CONFIG_UART_EXCLUSIVE_API_CALLBACKS
1260 	data->api_type = LPUART_NONE;
1261 #endif
1262 
1263 #ifdef CONFIG_PM
1264 	data->pm_state_lock_on = false;
1265 	data->tx_poll_stream_on = false;
1266 	data->tx_int_stream_on = false;
1267 #endif
1268 
1269 	return 0;
1270 }
1271 
1272 static DEVICE_API(uart, mcux_lpuart_driver_api) = {
1273 	.poll_in = mcux_lpuart_poll_in,
1274 	.poll_out = mcux_lpuart_poll_out,
1275 	.err_check = mcux_lpuart_err_check,
1276 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1277 	.configure = mcux_lpuart_configure,
1278 	.config_get = mcux_lpuart_config_get,
1279 #endif
1280 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
1281 	.fifo_fill = mcux_lpuart_fifo_fill,
1282 	.fifo_read = mcux_lpuart_fifo_read,
1283 	.irq_tx_enable = mcux_lpuart_irq_tx_enable,
1284 	.irq_tx_disable = mcux_lpuart_irq_tx_disable,
1285 	.irq_tx_complete = mcux_lpuart_irq_tx_complete,
1286 	.irq_tx_ready = mcux_lpuart_irq_tx_ready,
1287 	.irq_rx_enable = mcux_lpuart_irq_rx_enable,
1288 	.irq_rx_disable = mcux_lpuart_irq_rx_disable,
1289 	.irq_rx_ready = mcux_lpuart_irq_rx_full,
1290 	.irq_err_enable = mcux_lpuart_irq_err_enable,
1291 	.irq_err_disable = mcux_lpuart_irq_err_disable,
1292 	.irq_is_pending = mcux_lpuart_irq_is_pending,
1293 	.irq_update = mcux_lpuart_irq_update,
1294 	.irq_callback_set = mcux_lpuart_irq_callback_set,
1295 #endif
1296 #ifdef CONFIG_UART_ASYNC_API
1297 	.callback_set = mcux_lpuart_callback_set,
1298 	.tx = mcux_lpuart_tx,
1299 	.tx_abort = mcux_lpuart_tx_abort,
1300 	.rx_enable = mcux_lpuart_rx_enable,
1301 	.rx_buf_rsp = mcux_lpuart_rx_buf_rsp,
1302 	.rx_disable = mcux_lpuart_rx_disable,
1303 #endif /* CONFIG_UART_ASYNC_API */
1304 #ifdef UART_LINE_CTRL_ENABLE
1305 	.line_ctrl_set = mcux_lpuart_line_ctrl_set,
1306 #endif  /* UART_LINE_CTRL_ENABLE */
1307 };
1308 
1309 
1310 #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT
1311 #define MCUX_LPUART_IRQ_INSTALL(n, i)					\
1312 	do {								\
1313 		IRQ_CONNECT(DT_INST_IRQN_BY_IDX(n, i),			\
1314 			    DT_INST_IRQ_BY_IDX(n, i, priority),		\
1315 			    mcux_lpuart_isr, DEVICE_DT_INST_GET(n), 0);	\
1316 									\
1317 		irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq));		\
1318 	} while (false)
1319 #define MCUX_LPUART_IRQS_INSTALL(n)					\
1320 		IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0),			\
1321 			   (MCUX_LPUART_IRQ_INSTALL(n, 0);))		\
1322 		IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1),			\
1323 			   (MCUX_LPUART_IRQ_INSTALL(n, 1);))
1324 /* When using LP Flexcomm driver, register the interrupt handler
1325  * so we receive notification from the LP Flexcomm interrupt handler.
1326  */
1327 #define MCUX_LPUART_LPFLEXCOMM_IRQ_CONFIG(n)				\
1328 	nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)),	\
1329 					DEVICE_DT_INST_GET(n),		\
1330 					LP_FLEXCOMM_PERIPH_LPUART,	\
1331 					mcux_lpuart_isr)
1332 #define MCUX_LPUART_IRQ_INIT(n) .irq_config_func = mcux_lpuart_config_func_##n,
1333 #define MCUX_LPUART_IRQ_DEFINE(n)						\
1334 	static void mcux_lpuart_config_func_##n(const struct device *dev)	\
1335 	{									\
1336 		COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_PARENT(n), nxp_lp_flexcomm), \
1337 			    (MCUX_LPUART_LPFLEXCOMM_IRQ_CONFIG(n)),		\
1338 			    (MCUX_LPUART_IRQS_INSTALL(n)));			\
1339 	}
1340 #else
1341 #define MCUX_LPUART_IRQ_INIT(n)
1342 #define MCUX_LPUART_IRQ_DEFINE(n)
1343 #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */
1344 
1345 #ifdef CONFIG_UART_ASYNC_API
1346 #define TX_DMA_CONFIG(id)								       \
1347 	.tx_dma_config = {								       \
1348 		.dma_dev =								       \
1349 			DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)),		       \
1350 		.dma_channel =								       \
1351 			DT_INST_DMAS_CELL_BY_NAME(id, tx, mux),				       \
1352 		.dma_cfg = {								       \
1353 			.source_burst_length = 1,					       \
1354 			.dest_burst_length = 1,						       \
1355 			.source_data_size = 1,						       \
1356 			.dest_data_size = 1,						       \
1357 			.complete_callback_en = 1,					       \
1358 			.error_callback_dis = 0,					       \
1359 			.block_count = 1,						       \
1360 			.head_block =							       \
1361 				&mcux_lpuart_##id##_data.async.tx_dma_params.active_dma_block, \
1362 			.channel_direction = MEMORY_TO_PERIPHERAL,			       \
1363 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(				       \
1364 				id, tx, source),					       \
1365 			.dma_callback = dma_callback,					       \
1366 			.user_data = (void *)DEVICE_DT_INST_GET(id)			       \
1367 		},									       \
1368 	},
1369 #define RX_DMA_CONFIG(id)								       \
1370 	.rx_dma_config = {								       \
1371 		.dma_dev =								       \
1372 			DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)),		       \
1373 		.dma_channel =								       \
1374 			DT_INST_DMAS_CELL_BY_NAME(id, rx, mux),				       \
1375 		.dma_cfg = {								       \
1376 			.source_burst_length = 1,					       \
1377 			.dest_burst_length = 1,						       \
1378 			.source_data_size = 1,						       \
1379 			.dest_data_size = 1,						       \
1380 			.complete_callback_en = 1,					       \
1381 			.error_callback_dis = 0,					       \
1382 			.block_count = 1,						       \
1383 			.head_block =							       \
1384 				&mcux_lpuart_##id##_data.async.rx_dma_params.active_dma_block, \
1385 			.channel_direction = PERIPHERAL_TO_MEMORY,			       \
1386 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(				       \
1387 				id, rx, source),					       \
1388 			.dma_callback = dma_callback,					       \
1389 			.user_data = (void *)DEVICE_DT_INST_GET(id),			       \
1390 			.cyclic = 1,							       \
1391 		},									       \
1392 	},
1393 #else
1394 #define RX_DMA_CONFIG(n)
1395 #define TX_DMA_CONFIG(n)
1396 #endif /* CONFIG_UART_ASYNC_API */
1397 
1398 #define FLOW_CONTROL(n) \
1399 	DT_INST_PROP(n, hw_flow_control)   \
1400 		? UART_CFG_FLOW_CTRL_RTS_CTS     \
1401 		: DT_INST_PROP(n, nxp_rs485_mode)\
1402 				? UART_CFG_FLOW_CTRL_RS485   \
1403 				: UART_CFG_FLOW_CTRL_NONE
1404 
1405 #define LPUART_MCUX_DECLARE_CFG(n)                                      \
1406 static const struct mcux_lpuart_config mcux_lpuart_##n##_config = {     \
1407 	.base = (LPUART_Type *) DT_INST_REG_ADDR(n),                          \
1408 	.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)),                   \
1409 	.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),	\
1410 	.baud_rate = DT_INST_PROP(n, current_speed),                          \
1411 	.flow_ctrl = FLOW_CONTROL(n),                                         \
1412 	.parity = DT_INST_ENUM_IDX(n, parity),                                \
1413 	.rs485_de_active_low = DT_INST_PROP(n, nxp_rs485_de_active_low),      \
1414 	.loopback_en = DT_INST_PROP(n, nxp_loopback),                         \
1415 	.single_wire = DT_INST_PROP(n, single_wire),	                      \
1416 	.rx_invert = DT_INST_PROP(n, rx_invert),	                      \
1417 	.tx_invert = DT_INST_PROP(n, tx_invert),	                      \
1418 	.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),                          \
1419 	MCUX_LPUART_IRQ_INIT(n) \
1420 	RX_DMA_CONFIG(n)        \
1421 	TX_DMA_CONFIG(n)        \
1422 };
1423 
1424 #define LPUART_MCUX_INIT(n)						\
1425 									\
1426 	static struct mcux_lpuart_data mcux_lpuart_##n##_data;		\
1427 									\
1428 	PINCTRL_DT_INST_DEFINE(n);					\
1429 	MCUX_LPUART_IRQ_DEFINE(n)					\
1430 									\
1431 	LPUART_MCUX_DECLARE_CFG(n)					\
1432 									\
1433 	DEVICE_DT_INST_DEFINE(n,					\
1434 			    mcux_lpuart_init,				\
1435 			    NULL,					\
1436 			    &mcux_lpuart_##n##_data,			\
1437 			    &mcux_lpuart_##n##_config,			\
1438 			    PRE_KERNEL_1,				\
1439 			    CONFIG_SERIAL_INIT_PRIORITY,		\
1440 			    &mcux_lpuart_driver_api);			\
1441 
1442 DT_INST_FOREACH_STATUS_OKAY(LPUART_MCUX_INIT)
1443