1 /*
2  * Copyright (c) 2020 Nuvoton Technology Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nuvoton_npcx_uart
8 
9 #include <zephyr/sys/__assert.h>
10 #include <zephyr/drivers/gpio.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/pm/policy.h>
17 #include <soc.h>
18 #include "soc_miwu.h"
19 #include "soc_power.h"
20 
21 #include <zephyr/logging/log.h>
22 #include <zephyr/irq.h>
23 LOG_MODULE_REGISTER(uart_npcx, CONFIG_UART_LOG_LEVEL);
24 
25 /* Driver config */
26 struct uart_npcx_config {
27 	struct uart_reg *inst;
28 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
29 	uart_irq_config_func_t irq_config_func;
30 #endif
31 	/* clock configuration */
32 	struct npcx_clk_cfg clk_cfg;
33 	/* int-mux configuration */
34 	const struct npcx_wui uart_rx_wui;
35 	/* pinmux configuration */
36 	const struct pinctrl_dev_config *pcfg;
37 #ifdef CONFIG_UART_ASYNC_API
38 	struct npcx_clk_cfg mdma_clk_cfg;
39 	struct mdma_reg *mdma_reg_base;
40 #endif
41 };
42 
43 enum uart_pm_policy_state_flag {
44 	UART_PM_POLICY_STATE_TX_FLAG,
45 	UART_PM_POLICY_STATE_RX_FLAG,
46 
47 	UART_PM_POLICY_STATE_FLAG_COUNT,
48 };
49 
50 #ifdef CONFIG_UART_ASYNC_API
51 struct uart_npcx_rx_dma_params {
52 	uint8_t *buf;
53 	size_t buf_len;
54 	size_t offset;
55 	size_t counter;
56 	size_t timeout_us;
57 	struct k_work_delayable timeout_work;
58 	bool enabled;
59 };
60 
61 struct uart_npcx_tx_dma_params {
62 	const uint8_t *buf;
63 	size_t buf_len;
64 	struct k_work_delayable timeout_work;
65 	size_t timeout_us;
66 };
67 
68 struct uart_npcx_async_data {
69 	const struct device *uart_dev;
70 	uart_callback_t user_callback;
71 	void *user_data;
72 	struct uart_npcx_rx_dma_params rx_dma_params;
73 	struct uart_npcx_tx_dma_params tx_dma_params;
74 	uint8_t *next_rx_buffer;
75 	size_t next_rx_buffer_len;
76 	bool tx_in_progress;
77 };
78 #endif
79 
80 /* Driver data */
81 struct uart_npcx_data {
82 	/* Baud rate */
83 	uint32_t baud_rate;
84 	struct miwu_callback uart_rx_cb;
85 	struct k_spinlock lock;
86 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
87 	uart_irq_callback_user_data_t user_cb;
88 	void *user_data;
89 #endif
90 #ifdef CONFIG_PM
91 	ATOMIC_DEFINE(pm_policy_state_flag, UART_PM_POLICY_STATE_FLAG_COUNT);
92 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
93 	struct k_work_delayable rx_refresh_timeout_work;
94 #endif
95 #endif
96 #ifdef CONFIG_UART_ASYNC_API
97 	struct uart_npcx_async_data async;
98 #endif
99 };
100 
101 #ifdef CONFIG_PM
uart_npcx_pm_policy_state_lock_get(struct uart_npcx_data * data,enum uart_pm_policy_state_flag flag)102 static void uart_npcx_pm_policy_state_lock_get(struct uart_npcx_data *data,
103 					       enum uart_pm_policy_state_flag flag)
104 {
105 	if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
106 		pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
107 	}
108 }
109 
uart_npcx_pm_policy_state_lock_put(struct uart_npcx_data * data,enum uart_pm_policy_state_flag flag)110 static void uart_npcx_pm_policy_state_lock_put(struct uart_npcx_data *data,
111 					       enum uart_pm_policy_state_flag flag)
112 {
113 	if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
114 		pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
115 	}
116 }
117 #endif
118 
119 /* UART local functions */
uart_set_npcx_baud_rate(struct uart_reg * const inst,int baud_rate,int src_clk)120 static int uart_set_npcx_baud_rate(struct uart_reg *const inst, int baud_rate, int src_clk)
121 {
122 	/*
123 	 * Support two baud rate setting so far:
124 	 *   -  115200
125 	 *   - 3000000
126 	 */
127 	if (baud_rate == 115200) {
128 		if (src_clk == MHZ(15)) {
129 			inst->UPSR = 0x38;
130 			inst->UBAUD = 0x01;
131 		} else if (src_clk == MHZ(20)) {
132 			inst->UPSR = 0x08;
133 			inst->UBAUD = 0x0a;
134 		} else if (src_clk == MHZ(25)) {
135 			inst->UPSR = 0x10;
136 			inst->UBAUD = 0x08;
137 		} else if (src_clk == MHZ(30)) {
138 			inst->UPSR = 0x10;
139 			inst->UBAUD = 0x0a;
140 		} else if (src_clk == MHZ(48)) {
141 			inst->UPSR = 0x08;
142 			inst->UBAUD = 0x19;
143 		} else if (src_clk == MHZ(50)) {
144 			inst->UPSR = 0x08;
145 			inst->UBAUD = 0x1a;
146 		} else {
147 			return -EINVAL;
148 		}
149 	} else if (baud_rate == MHZ(3)) {
150 		if (src_clk == MHZ(48)) {
151 			inst->UPSR = 0x08;
152 			inst->UBAUD = 0x0;
153 		} else {
154 			return -EINVAL;
155 		}
156 	} else {
157 		return -EINVAL;
158 	}
159 
160 	return 0;
161 }
162 
163 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
uart_npcx_rx_fifo_available(const struct device * dev)164 static int uart_npcx_rx_fifo_available(const struct device *dev)
165 {
166 	const struct uart_npcx_config *const config = dev->config;
167 	struct uart_reg *const inst = config->inst;
168 
169 	/* True if at least one byte is in the Rx FIFO */
170 	return IS_BIT_SET(inst->UFRSTS, NPCX_UFRSTS_RFIFO_NEMPTY_STS);
171 }
172 
uart_npcx_dis_all_tx_interrupts(const struct device * dev)173 static void uart_npcx_dis_all_tx_interrupts(const struct device *dev)
174 {
175 	const struct uart_npcx_config *const config = dev->config;
176 	struct uart_reg *const inst = config->inst;
177 
178 	/* Disable all Tx interrupts */
179 	inst->UFTCTL &= ~(BIT(NPCX_UFTCTL_TEMPTY_LVL_EN) | BIT(NPCX_UFTCTL_TEMPTY_EN) |
180 			  BIT(NPCX_UFTCTL_NXMIP_EN));
181 }
182 
uart_npcx_clear_rx_fifo(const struct device * dev)183 static void uart_npcx_clear_rx_fifo(const struct device *dev)
184 {
185 	const struct uart_npcx_config *const config = dev->config;
186 	struct uart_reg *const inst = config->inst;
187 	uint8_t scratch;
188 
189 	/* Read all dummy bytes out from Rx FIFO */
190 	while (uart_npcx_rx_fifo_available(dev)) {
191 		scratch = inst->URBUF;
192 	}
193 }
194 
195 #endif
196 
197 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
uart_npcx_tx_fifo_ready(const struct device * dev)198 static int uart_npcx_tx_fifo_ready(const struct device *dev)
199 {
200 	const struct uart_npcx_config *const config = dev->config;
201 	struct uart_reg *const inst = config->inst;
202 
203 	/* True if the Tx FIFO is not completely full */
204 	return !(GET_FIELD(inst->UFTSTS, NPCX_UFTSTS_TEMPTY_LVL) == 0);
205 }
206 
uart_npcx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int size)207 static int uart_npcx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size)
208 {
209 	const struct uart_npcx_config *const config = dev->config;
210 	struct uart_reg *const inst = config->inst;
211 	struct uart_npcx_data *data = dev->data;
212 	uint8_t tx_bytes = 0U;
213 	k_spinlock_key_t key = k_spin_lock(&data->lock);
214 
215 	/* If Tx FIFO is still ready to send */
216 	while ((size - tx_bytes > 0) && uart_npcx_tx_fifo_ready(dev)) {
217 		/* Put a character into Tx FIFO */
218 		inst->UTBUF = tx_data[tx_bytes++];
219 	}
220 #ifdef CONFIG_PM
221 	uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_TX_FLAG);
222 	/* Enable NXMIP interrupt in case ec enters deep sleep early */
223 	inst->UFTCTL |= BIT(NPCX_UFTCTL_NXMIP_EN);
224 #endif /* CONFIG_PM */
225 	k_spin_unlock(&data->lock, key);
226 
227 	return tx_bytes;
228 }
229 
uart_npcx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)230 static int uart_npcx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size)
231 {
232 	const struct uart_npcx_config *const config = dev->config;
233 	struct uart_reg *const inst = config->inst;
234 	unsigned int rx_bytes = 0U;
235 
236 	/* If least one byte is in the Rx FIFO */
237 	while ((size - rx_bytes > 0) && uart_npcx_rx_fifo_available(dev)) {
238 		/* Receive one byte from Rx FIFO */
239 		rx_data[rx_bytes++] = inst->URBUF;
240 	}
241 
242 	return rx_bytes;
243 }
244 
uart_npcx_irq_tx_enable(const struct device * dev)245 static void uart_npcx_irq_tx_enable(const struct device *dev)
246 {
247 	const struct uart_npcx_config *const config = dev->config;
248 	struct uart_reg *const inst = config->inst;
249 	struct uart_npcx_data *data = dev->data;
250 	k_spinlock_key_t key = k_spin_lock(&data->lock);
251 
252 	inst->UFTCTL |= BIT(NPCX_UFTCTL_TEMPTY_EN);
253 	k_spin_unlock(&data->lock, key);
254 }
255 
uart_npcx_irq_tx_disable(const struct device * dev)256 static void uart_npcx_irq_tx_disable(const struct device *dev)
257 {
258 	const struct uart_npcx_config *const config = dev->config;
259 	struct uart_reg *const inst = config->inst;
260 	struct uart_npcx_data *data = dev->data;
261 	k_spinlock_key_t key = k_spin_lock(&data->lock);
262 
263 	inst->UFTCTL &= ~(BIT(NPCX_UFTCTL_TEMPTY_EN));
264 	k_spin_unlock(&data->lock, key);
265 }
266 
uart_npcx_irq_tx_is_enabled(const struct device * dev)267 static bool uart_npcx_irq_tx_is_enabled(const struct device *dev)
268 {
269 	const struct uart_npcx_config *const config = dev->config;
270 	struct uart_reg *const inst = config->inst;
271 
272 	return IS_BIT_SET(inst->UFTCTL, NPCX_UFTCTL_TEMPTY_EN);
273 }
274 
uart_npcx_irq_tx_ready(const struct device * dev)275 static int uart_npcx_irq_tx_ready(const struct device *dev)
276 {
277 	return uart_npcx_tx_fifo_ready(dev) && uart_npcx_irq_tx_is_enabled(dev);
278 }
279 
uart_npcx_irq_tx_complete(const struct device * dev)280 static int uart_npcx_irq_tx_complete(const struct device *dev)
281 {
282 	const struct uart_npcx_config *const config = dev->config;
283 	struct uart_reg *const inst = config->inst;
284 
285 	/* Tx FIFO is empty or last byte is sending */
286 	return IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP);
287 }
288 
uart_npcx_irq_rx_enable(const struct device * dev)289 static void uart_npcx_irq_rx_enable(const struct device *dev)
290 {
291 	const struct uart_npcx_config *const config = dev->config;
292 	struct uart_reg *const inst = config->inst;
293 
294 	inst->UFRCTL |= BIT(NPCX_UFRCTL_RNEMPTY_EN);
295 }
296 
uart_npcx_irq_rx_disable(const struct device * dev)297 static void uart_npcx_irq_rx_disable(const struct device *dev)
298 {
299 	const struct uart_npcx_config *const config = dev->config;
300 	struct uart_reg *const inst = config->inst;
301 
302 	inst->UFRCTL &= ~(BIT(NPCX_UFRCTL_RNEMPTY_EN));
303 }
304 
uart_npcx_irq_rx_is_enabled(const struct device * dev)305 static bool uart_npcx_irq_rx_is_enabled(const struct device *dev)
306 {
307 	const struct uart_npcx_config *const config = dev->config;
308 	struct uart_reg *const inst = config->inst;
309 
310 	return IS_BIT_SET(inst->UFRCTL, NPCX_UFRCTL_RNEMPTY_EN);
311 }
312 
uart_npcx_irq_rx_ready(const struct device * dev)313 static int uart_npcx_irq_rx_ready(const struct device *dev)
314 {
315 	return uart_npcx_rx_fifo_available(dev);
316 }
317 
uart_npcx_irq_err_enable(const struct device * dev)318 static void uart_npcx_irq_err_enable(const struct device *dev)
319 {
320 	const struct uart_npcx_config *const config = dev->config;
321 	struct uart_reg *const inst = config->inst;
322 
323 	inst->UICTRL |= BIT(NPCX_UICTRL_EEI);
324 }
325 
uart_npcx_irq_err_disable(const struct device * dev)326 static void uart_npcx_irq_err_disable(const struct device *dev)
327 {
328 	const struct uart_npcx_config *const config = dev->config;
329 	struct uart_reg *const inst = config->inst;
330 
331 	inst->UICTRL &= ~(BIT(NPCX_UICTRL_EEI));
332 }
333 
uart_npcx_irq_is_pending(const struct device * dev)334 static int uart_npcx_irq_is_pending(const struct device *dev)
335 {
336 	return uart_npcx_irq_tx_ready(dev) ||
337 	       (uart_npcx_irq_rx_ready(dev) && uart_npcx_irq_rx_is_enabled(dev));
338 }
339 
uart_npcx_irq_update(const struct device * dev)340 static int uart_npcx_irq_update(const struct device *dev)
341 {
342 	ARG_UNUSED(dev);
343 
344 	return 1;
345 }
346 
uart_npcx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)347 static void uart_npcx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
348 				       void *cb_data)
349 {
350 	struct uart_npcx_data *data = dev->data;
351 
352 	data->user_cb = cb;
353 	data->user_data = cb_data;
354 
355 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
356 	data->async.user_callback = NULL;
357 	data->async.user_data = NULL;
358 #endif
359 }
360 
361 /*
362  * Poll-in implementation for interrupt driven config, forward call to
363  * uart_npcx_fifo_read().
364  */
uart_npcx_poll_in(const struct device * dev,unsigned char * c)365 static int uart_npcx_poll_in(const struct device *dev, unsigned char *c)
366 {
367 	return uart_npcx_fifo_read(dev, c, 1) ? 0 : -1;
368 }
369 
370 /*
371  * Poll-out implementation for interrupt driven config, forward call to
372  * uart_npcx_fifo_fill().
373  */
uart_npcx_poll_out(const struct device * dev,unsigned char c)374 static void uart_npcx_poll_out(const struct device *dev, unsigned char c)
375 {
376 	while (!uart_npcx_fifo_fill(dev, &c, 1)) {
377 		continue;
378 	}
379 }
380 
381 #else  /* !CONFIG_UART_INTERRUPT_DRIVEN */
382 
383 /*
384  * Poll-in implementation for byte mode config, read byte from URBUF if
385  * available.
386  */
uart_npcx_poll_in(const struct device * dev,unsigned char * c)387 static int uart_npcx_poll_in(const struct device *dev, unsigned char *c)
388 {
389 	const struct uart_npcx_config *const config = dev->config;
390 	struct uart_reg *const inst = config->inst;
391 
392 	/* Rx single byte buffer is not full */
393 	if (!IS_BIT_SET(inst->UICTRL, NPCX_UICTRL_RBF)) {
394 		return -1;
395 	}
396 
397 	*c = inst->URBUF;
398 	return 0;
399 }
400 
401 /*
402  * Poll-out implementation for byte mode config, write byte to UTBUF if empty.
403  */
uart_npcx_poll_out(const struct device * dev,unsigned char c)404 static void uart_npcx_poll_out(const struct device *dev, unsigned char c)
405 {
406 	const struct uart_npcx_config *const config = dev->config;
407 	struct uart_reg *const inst = config->inst;
408 
409 	/* Wait while Tx single byte buffer is ready to send */
410 	while (!IS_BIT_SET(inst->UICTRL, NPCX_UICTRL_TBE)) {
411 		continue;
412 	}
413 
414 	inst->UTBUF = c;
415 }
416 #endif /* !CONFIG_UART_INTERRUPT_DRIVEN */
417 
418 #ifdef CONFIG_UART_ASYNC_API
async_user_callback(const struct device * dev,struct uart_event * evt)419 static void async_user_callback(const struct device *dev, struct uart_event *evt)
420 {
421 	const struct uart_npcx_data *data = dev->data;
422 
423 	if (data->async.user_callback) {
424 		data->async.user_callback(dev, evt, data->async.user_data);
425 	}
426 }
427 
async_evt_rx_rdy(const struct device * dev)428 static void async_evt_rx_rdy(const struct device *dev)
429 {
430 	struct uart_npcx_data *data = dev->data;
431 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
432 
433 	struct uart_event event = {.type = UART_RX_RDY,
434 				   .data.rx.buf = rx_dma_params->buf,
435 				   .data.rx.len = rx_dma_params->counter - rx_dma_params->offset,
436 				   .data.rx.offset = rx_dma_params->offset};
437 
438 	LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset,
439 		(uint32_t)event.data.rx.buf);
440 
441 	/* Update the current pos for new data */
442 	rx_dma_params->offset = rx_dma_params->counter;
443 
444 	/* Only send event for new data */
445 	if (event.data.rx.len > 0) {
446 		async_user_callback(dev, &event);
447 	}
448 }
449 
async_evt_tx_done(const struct device * dev)450 static void async_evt_tx_done(const struct device *dev)
451 {
452 	struct uart_npcx_data *data = dev->data;
453 
454 	(void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
455 
456 	LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len);
457 
458 	struct uart_event event = {.type = UART_TX_DONE,
459 				   .data.tx.buf = data->async.tx_dma_params.buf,
460 				   .data.tx.len = data->async.tx_dma_params.buf_len};
461 
462 	/* Reset TX Buffer */
463 	data->async.tx_dma_params.buf = NULL;
464 	data->async.tx_dma_params.buf_len = 0U;
465 	async_user_callback(dev, &event);
466 }
467 
uart_npcx_async_rx_dma_get_status(const struct device * dev,size_t * pending_length)468 static void uart_npcx_async_rx_dma_get_status(const struct device *dev, size_t *pending_length)
469 {
470 	const struct uart_npcx_config *const config = dev->config;
471 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
472 
473 	if (IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_MDMAEN)) {
474 		*pending_length = mdma_reg_base->MDMA_CTCNT0;
475 	} else {
476 		*pending_length = 0;
477 	}
478 }
479 
uart_npcx_async_rx_flush(const struct device * dev)480 static void uart_npcx_async_rx_flush(const struct device *dev)
481 {
482 	struct uart_npcx_data *data = dev->data;
483 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
484 	size_t curr_rcv_len, dma_pending_len;
485 
486 	uart_npcx_async_rx_dma_get_status(dev, &dma_pending_len);
487 	curr_rcv_len = rx_dma_params->buf_len - dma_pending_len;
488 
489 	if (curr_rcv_len > rx_dma_params->offset) {
490 		rx_dma_params->counter = curr_rcv_len;
491 		async_evt_rx_rdy(dev);
492 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
493 		k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT);
494 
495 		uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG);
496 		k_work_reschedule(&data->rx_refresh_timeout_work, delay);
497 #endif
498 	}
499 }
500 
async_evt_rx_buf_request(const struct device * dev)501 static void async_evt_rx_buf_request(const struct device *dev)
502 {
503 	struct uart_event evt = {
504 		.type = UART_RX_BUF_REQUEST,
505 	};
506 
507 	async_user_callback(dev, &evt);
508 }
509 
uart_npcx_async_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)510 static int uart_npcx_async_callback_set(const struct device *dev, uart_callback_t callback,
511 					void *user_data)
512 {
513 	struct uart_npcx_data *data = dev->data;
514 
515 	data->async.user_callback = callback;
516 	data->async.user_data = user_data;
517 
518 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
519 	data->user_cb = NULL;
520 	data->user_data = NULL;
521 #endif
522 
523 	return 0;
524 }
525 
async_timer_start(struct k_work_delayable * work,uint32_t timeout_us)526 static inline void async_timer_start(struct k_work_delayable *work, uint32_t timeout_us)
527 {
528 	if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) {
529 		LOG_DBG("async timer started for %d us", timeout_us);
530 		k_work_reschedule(work, K_USEC(timeout_us));
531 	}
532 }
533 
uart_npcx_async_tx_dma_get_status(const struct device * dev,size_t * pending_length)534 static int uart_npcx_async_tx_dma_get_status(const struct device *dev, size_t *pending_length)
535 {
536 	const struct uart_npcx_config *const config = dev->config;
537 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
538 
539 	if (IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_MDMAEN)) {
540 		*pending_length = mdma_reg_base->MDMA_CTCNT1;
541 	} else {
542 		*pending_length = 0;
543 		return -EBUSY;
544 	}
545 
546 	return 0;
547 }
548 
uart_npcx_async_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)549 static int uart_npcx_async_tx(const struct device *dev, const uint8_t *buf, size_t len,
550 			      int32_t timeout)
551 {
552 	const struct uart_npcx_config *const config = dev->config;
553 	struct uart_reg *const inst = config->inst;
554 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
555 	struct uart_npcx_data *data = dev->data;
556 	struct uart_npcx_tx_dma_params *tx_dma_params = &data->async.tx_dma_params;
557 	int key = irq_lock();
558 
559 	if (buf == NULL || len == 0) {
560 		irq_unlock(key);
561 		return -EINVAL;
562 	}
563 
564 	if (tx_dma_params->buf) {
565 		irq_unlock(key);
566 		return -EBUSY;
567 	}
568 
569 	data->async.tx_in_progress = true;
570 
571 	data->async.tx_dma_params.buf = buf;
572 	data->async.tx_dma_params.buf_len = len;
573 	data->async.tx_dma_params.timeout_us = timeout;
574 
575 	mdma_reg_base->MDMA_SRCB1 = (uint32_t)buf;
576 	mdma_reg_base->MDMA_TCNT1 = len;
577 
578 	async_timer_start(&data->async.tx_dma_params.timeout_work, timeout);
579 	mdma_reg_base->MDMA_CTL1 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN);
580 
581 	inst->UMDSL |= BIT(NPCX_UMDSL_ETD);
582 
583 #ifdef CONFIG_PM
584 	/* Do not allow system to suspend until transmission has completed */
585 	uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_TX_FLAG);
586 #endif
587 	irq_unlock(key);
588 
589 	return 0;
590 }
591 
uart_npcx_async_tx_abort(const struct device * dev)592 static int uart_npcx_async_tx_abort(const struct device *dev)
593 {
594 	const struct uart_npcx_config *const config = dev->config;
595 	struct uart_npcx_data *data = dev->data;
596 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
597 	size_t dma_pending_len, bytes_transmitted;
598 	int ret;
599 
600 	k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
601 
602 	mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
603 
604 	ret = uart_npcx_async_tx_dma_get_status(dev, &dma_pending_len);
605 	if (ret != 0) {
606 		bytes_transmitted = 0;
607 	} else {
608 		bytes_transmitted = data->async.tx_dma_params.buf_len - dma_pending_len;
609 	}
610 
611 	struct uart_event tx_aborted_event = {
612 		.type = UART_TX_ABORTED,
613 		.data.tx.buf = data->async.tx_dma_params.buf,
614 		.data.tx.len = bytes_transmitted,
615 	};
616 	async_user_callback(dev, &tx_aborted_event);
617 
618 	return ret;
619 }
620 
uart_npcx_async_tx_timeout(struct k_work * work)621 static void uart_npcx_async_tx_timeout(struct k_work *work)
622 {
623 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
624 	struct uart_npcx_tx_dma_params *tx_params =
625 		CONTAINER_OF(dwork, struct uart_npcx_tx_dma_params, timeout_work);
626 	struct uart_npcx_async_data *async_data =
627 		CONTAINER_OF(tx_params, struct uart_npcx_async_data, tx_dma_params);
628 	const struct device *dev = async_data->uart_dev;
629 
630 	LOG_ERR("Async Tx Timeout");
631 	uart_npcx_async_tx_abort(dev);
632 }
633 
uart_npcx_async_rx_enable(const struct device * dev,uint8_t * buf,const size_t len,const int32_t timeout_us)634 static int uart_npcx_async_rx_enable(const struct device *dev, uint8_t *buf, const size_t len,
635 				     const int32_t timeout_us)
636 {
637 	const struct uart_npcx_config *const config = dev->config;
638 	struct uart_reg *const inst = config->inst;
639 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
640 	struct uart_npcx_data *data = dev->data;
641 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
642 	unsigned int key;
643 
644 	LOG_DBG("Enable RX DMA, len:%d", len);
645 
646 	key = irq_lock();
647 
648 	__ASSERT_NO_MSG(buf != NULL);
649 	__ASSERT_NO_MSG(len > 0);
650 
651 	rx_dma_params->timeout_us = timeout_us;
652 	rx_dma_params->buf = buf;
653 	rx_dma_params->buf_len = len;
654 
655 	rx_dma_params->offset = 0;
656 	rx_dma_params->counter = 0;
657 
658 	SET_FIELD(inst->UFRCTL, NPCX_UFRCTL_RFULL_LVL_SEL, 1);
659 
660 	mdma_reg_base->MDMA_DSTB0 = (uint32_t)buf;
661 	mdma_reg_base->MDMA_TCNT0 = len;
662 	mdma_reg_base->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN);
663 
664 	inst->UMDSL |= BIT(NPCX_UMDSL_ERD);
665 
666 	rx_dma_params->enabled = true;
667 
668 	async_evt_rx_buf_request(dev);
669 
670 	inst->UFRCTL |= BIT(NPCX_UFRCTL_RNEMPTY_EN);
671 
672 	irq_unlock(key);
673 
674 	return 0;
675 }
676 
async_evt_rx_buf_release(const struct device * dev)677 static void async_evt_rx_buf_release(const struct device *dev)
678 {
679 	struct uart_npcx_data *data = dev->data;
680 	struct uart_event evt = {
681 		.type = UART_RX_BUF_RELEASED,
682 		.data.rx_buf.buf = data->async.rx_dma_params.buf,
683 	};
684 
685 	async_user_callback(dev, &evt);
686 	data->async.rx_dma_params.buf = NULL;
687 	data->async.rx_dma_params.buf_len = 0U;
688 	data->async.rx_dma_params.offset = 0U;
689 	data->async.rx_dma_params.counter = 0U;
690 }
691 
uart_npcx_async_rx_disable(const struct device * dev)692 static int uart_npcx_async_rx_disable(const struct device *dev)
693 {
694 	const struct uart_npcx_config *const config = dev->config;
695 	struct uart_reg *const inst = config->inst;
696 	struct uart_npcx_data *data = dev->data;
697 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
698 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
699 	unsigned int key;
700 
701 	LOG_DBG("Async RX Disable");
702 
703 	key = irq_lock();
704 	inst->UFRCTL &= ~(BIT(NPCX_UFRCTL_RNEMPTY_EN));
705 
706 	k_work_cancel_delayable(&rx_dma_params->timeout_work);
707 
708 	if (rx_dma_params->buf == NULL) {
709 		LOG_DBG("No buffers to release from RX DMA!");
710 	} else {
711 		uart_npcx_async_rx_flush(dev);
712 		async_evt_rx_buf_release(dev);
713 	}
714 
715 	rx_dma_params->enabled = false;
716 
717 	if (data->async.next_rx_buffer != NULL) {
718 		rx_dma_params->buf = data->async.next_rx_buffer;
719 		rx_dma_params->buf_len = data->async.next_rx_buffer_len;
720 		data->async.next_rx_buffer = NULL;
721 		data->async.next_rx_buffer_len = 0;
722 		/* Release the next buffer as well */
723 		async_evt_rx_buf_release(dev);
724 	}
725 
726 	mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
727 
728 	struct uart_event disabled_event = {.type = UART_RX_DISABLED};
729 
730 	async_user_callback(dev, &disabled_event);
731 
732 	irq_unlock(key);
733 
734 	return 0;
735 }
736 
uart_npcx_async_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)737 static int uart_npcx_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
738 {
739 	struct uart_npcx_data *data = dev->data;
740 
741 	if (data->async.next_rx_buffer != NULL) {
742 		return -EBUSY;
743 	} else if (data->async.rx_dma_params.enabled == false) {
744 		return -EACCES;
745 	}
746 
747 	data->async.next_rx_buffer = buf;
748 	data->async.next_rx_buffer_len = len;
749 
750 	LOG_DBG("Next RX buf rsp, new: %d", len);
751 
752 	return 0;
753 }
754 
uart_npcx_async_rx_timeout(struct k_work * work)755 static void uart_npcx_async_rx_timeout(struct k_work *work)
756 {
757 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
758 	struct uart_npcx_rx_dma_params *rx_params =
759 		CONTAINER_OF(dwork, struct uart_npcx_rx_dma_params, timeout_work);
760 	struct uart_npcx_async_data *async_data =
761 		CONTAINER_OF(rx_params, struct uart_npcx_async_data, rx_dma_params);
762 	const struct device *dev = async_data->uart_dev;
763 
764 	LOG_DBG("Async RX timeout");
765 	uart_npcx_async_rx_flush(dev);
766 }
767 
uart_npcx_async_dma_load_new_rx_buf(const struct device * dev)768 static void uart_npcx_async_dma_load_new_rx_buf(const struct device *dev)
769 {
770 	const struct uart_npcx_config *const config = dev->config;
771 	struct uart_reg *const inst = config->inst;
772 	struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
773 	struct uart_npcx_data *data = dev->data;
774 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
775 
776 	rx_dma_params->offset = 0;
777 	rx_dma_params->counter = 0;
778 
779 	rx_dma_params->buf = data->async.next_rx_buffer;
780 	rx_dma_params->buf_len = data->async.next_rx_buffer_len;
781 	data->async.next_rx_buffer = NULL;
782 	data->async.next_rx_buffer_len = 0;
783 
784 	mdma_reg_base->MDMA_DSTB0 = (uint32_t)rx_dma_params->buf;
785 	mdma_reg_base->MDMA_TCNT0 = rx_dma_params->buf_len;
786 	mdma_reg_base->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN);
787 	inst->UMDSL |= BIT(NPCX_UMDSL_ERD);
788 }
789 
790 /* DMA rx reaches the terminal Count */
uart_npcx_async_dma_rx_complete(const struct device * dev)791 static void uart_npcx_async_dma_rx_complete(const struct device *dev)
792 {
793 	struct uart_npcx_data *data = dev->data;
794 	struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
795 
796 	rx_dma_params->counter = rx_dma_params->buf_len;
797 
798 	async_evt_rx_rdy(dev);
799 
800 	/* A new buffer was available.  */
801 	if (data->async.next_rx_buffer != NULL) {
802 		async_evt_rx_buf_release(dev);
803 		uart_npcx_async_dma_load_new_rx_buf(dev);
804 		/* Request the next buffer */
805 		async_evt_rx_buf_request(dev);
806 		async_timer_start(&rx_dma_params->timeout_work, rx_dma_params->timeout_us);
807 	} else {
808 		/* Buffer full without valid next buffer, disable RX DMA */
809 		LOG_DBG("Disabled RX DMA, no valid next buffer ");
810 		uart_npcx_async_rx_disable(dev);
811 	}
812 }
813 #endif
814 
815 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
uart_npcx_isr(const struct device * dev)816 static void uart_npcx_isr(const struct device *dev)
817 {
818 	struct uart_npcx_data *data = dev->data;
819 #if defined(CONFIG_PM) || defined(CONFIG_UART_ASYNC_API)
820 	const struct uart_npcx_config *const config = dev->config;
821 	struct uart_reg *const inst = config->inst;
822 #endif
823 
824 	/*
825 	 * Set pm constraint to prevent the system enter suspend state within
826 	 * the CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT period.
827 	 */
828 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
829 	if (uart_npcx_irq_rx_ready(dev)) {
830 		k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT);
831 
832 		uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG);
833 		k_work_reschedule(&data->rx_refresh_timeout_work, delay);
834 	}
835 #endif
836 
837 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
838 	if (data->user_cb) {
839 		data->user_cb(dev, data->user_data);
840 	}
841 #endif
842 
843 #ifdef CONFIG_UART_ASYNC_API
844 	if (data->async.user_callback) {
845 		struct mdma_reg *const mdma_reg_base = config->mdma_reg_base;
846 
847 		/*
848 		 * Check rx in any way because the RFIFO_NEMPTY_STS is not valid when MDMA mode is
849 		 * used. This is needed when the rx timeout_us is zero. In the case that the
850 		 * rx timeout_us is not zero, rx_flush is done in the tiemout_work callback.
851 		 */
852 		if (data->async.rx_dma_params.timeout_us == 0) {
853 			uart_npcx_async_rx_flush(dev);
854 		} else if (IS_BIT_SET(inst->UFRCTL, NPCX_UFRCTL_RNEMPTY_EN)) {
855 			async_timer_start(&data->async.rx_dma_params.timeout_work,
856 					  data->async.rx_dma_params.timeout_us);
857 		}
858 
859 		/* MDMA rx end interrupt */
860 		if (IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_TC) &&
861 		    IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_SIEN)) {
862 			mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_SIEN);
863 			/* TC is write-0-clear bit */
864 			mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC);
865 			inst->UMDSL &= ~BIT(NPCX_UMDSL_ERD);
866 			uart_npcx_async_dma_rx_complete(dev);
867 			LOG_DBG("DMA Rx TC");
868 		}
869 
870 		/* MDMA tx done interrupt */
871 		if (IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_TC) &&
872 		    IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_SIEN)) {
873 			mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_SIEN);
874 			/* TC is write-0-clear bit */
875 			mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC);
876 
877 			/*
878 			 * MDMA tx is done (i.e. all data in the memory are moved to UART tx FIFO),
879 			 * but data in the tx FIFO are not completely sent to the bus.
880 			 */
881 			if (!IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP)) {
882 				k_spinlock_key_t key = k_spin_lock(&data->lock);
883 
884 				inst->UFTCTL |= BIT(NPCX_UFTCTL_NXMIP_EN);
885 				k_spin_unlock(&data->lock, key);
886 			} else {
887 				data->async.tx_in_progress = false;
888 #ifdef CONFIG_PM
889 				uart_npcx_pm_policy_state_lock_put(data,
890 								   UART_PM_POLICY_STATE_TX_FLAG);
891 #endif /* CONFIG_PM */
892 				async_evt_tx_done(dev);
893 			}
894 		}
895 	}
896 #endif
897 
898 #if defined(CONFIG_PM) || defined(CONFIG_UART_ASYNC_API)
899 	if (IS_BIT_SET(inst->UFTCTL, NPCX_UFTCTL_NXMIP_EN) &&
900 	    IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP)) {
901 		k_spinlock_key_t key = k_spin_lock(&data->lock);
902 
903 		/* Disable NXMIP interrupt */
904 		inst->UFTCTL &= ~BIT(NPCX_UFTCTL_NXMIP_EN);
905 		k_spin_unlock(&data->lock, key);
906 #ifdef CONFIG_PM
907 		uart_npcx_pm_policy_state_lock_put(data, UART_PM_POLICY_STATE_TX_FLAG);
908 #endif
909 #ifdef CONFIG_UART_ASYNC_API
910 		if (data->async.tx_in_progress) {
911 			data->async.tx_in_progress = false;
912 			async_evt_tx_done(dev);
913 			LOG_DBG("Tx wait-empty done");
914 		}
915 #endif
916 	}
917 #endif
918 }
919 #endif
920 
921 /* UART api functions */
uart_npcx_err_check(const struct device * dev)922 static int uart_npcx_err_check(const struct device *dev)
923 {
924 	const struct uart_npcx_config *const config = dev->config;
925 	struct uart_reg *const inst = config->inst;
926 	uint32_t err = 0U;
927 	uint8_t stat = inst->USTAT;
928 
929 	if (IS_BIT_SET(stat, NPCX_USTAT_DOE)) {
930 		err |= UART_ERROR_OVERRUN;
931 	}
932 
933 	if (IS_BIT_SET(stat, NPCX_USTAT_PE)) {
934 		err |= UART_ERROR_PARITY;
935 	}
936 
937 	if (IS_BIT_SET(stat, NPCX_USTAT_FE)) {
938 		err |= UART_ERROR_FRAMING;
939 	}
940 
941 	return err;
942 }
943 
uart_npcx_rx_wk_isr(const struct device * dev,struct npcx_wui * wui)944 static __unused void uart_npcx_rx_wk_isr(const struct device *dev, struct npcx_wui *wui)
945 {
946 	/*
947 	 * Set pm constraint to prevent the system enter suspend state within
948 	 * the CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT period.
949 	 */
950 	LOG_DBG("-->%s", dev->name);
951 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
952 	struct uart_npcx_data *data = dev->data;
953 	k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT);
954 
955 	uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG);
956 	k_work_reschedule(&data->rx_refresh_timeout_work, delay);
957 #endif
958 
959 	/*
960 	 * Disable MIWU CR_SIN interrupt to avoid the other redundant interrupts
961 	 * after ec wakes up.
962 	 */
963 	npcx_uart_disable_access_interrupt();
964 }
965 
966 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
uart_npcx_rx_refresh_timeout(struct k_work * work)967 static void uart_npcx_rx_refresh_timeout(struct k_work *work)
968 {
969 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
970 	struct uart_npcx_data *data =
971 		CONTAINER_OF(dwork, struct uart_npcx_data, rx_refresh_timeout_work);
972 
973 	uart_npcx_pm_policy_state_lock_put(data, UART_PM_POLICY_STATE_RX_FLAG);
974 }
975 #endif
976 
977 /* UART driver registration */
978 static DEVICE_API(uart, uart_npcx_driver_api) = {
979 	.poll_in = uart_npcx_poll_in,
980 	.poll_out = uart_npcx_poll_out,
981 	.err_check = uart_npcx_err_check,
982 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
983 	.fifo_fill = uart_npcx_fifo_fill,
984 	.fifo_read = uart_npcx_fifo_read,
985 	.irq_tx_enable = uart_npcx_irq_tx_enable,
986 	.irq_tx_disable = uart_npcx_irq_tx_disable,
987 	.irq_tx_ready = uart_npcx_irq_tx_ready,
988 	.irq_tx_complete = uart_npcx_irq_tx_complete,
989 	.irq_rx_enable = uart_npcx_irq_rx_enable,
990 	.irq_rx_disable = uart_npcx_irq_rx_disable,
991 	.irq_rx_ready = uart_npcx_irq_rx_ready,
992 	.irq_err_enable = uart_npcx_irq_err_enable,
993 	.irq_err_disable = uart_npcx_irq_err_disable,
994 	.irq_is_pending = uart_npcx_irq_is_pending,
995 	.irq_update = uart_npcx_irq_update,
996 	.irq_callback_set = uart_npcx_irq_callback_set,
997 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
998 #ifdef CONFIG_UART_ASYNC_API
999 	.callback_set = uart_npcx_async_callback_set,
1000 	.tx = uart_npcx_async_tx,
1001 	.tx_abort = uart_npcx_async_tx_abort,
1002 	.rx_enable = uart_npcx_async_rx_enable,
1003 	.rx_buf_rsp = uart_npcx_async_rx_buf_rsp,
1004 	.rx_disable = uart_npcx_async_rx_disable,
1005 #endif /* CONFIG_UART_ASYNC_API */
1006 };
1007 
uart_npcx_init(const struct device * dev)1008 static int uart_npcx_init(const struct device *dev)
1009 {
1010 	const struct uart_npcx_config *const config = dev->config;
1011 	struct uart_npcx_data *const data = dev->data;
1012 	const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
1013 	struct uart_reg *const inst = config->inst;
1014 	uint32_t uart_rate;
1015 	int ret;
1016 
1017 	if (!device_is_ready(clk_dev)) {
1018 		LOG_ERR("clock control device not ready");
1019 		return -ENODEV;
1020 	}
1021 
1022 	/* Turn on device clock first and get source clock freq. */
1023 	ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
1024 	if (ret < 0) {
1025 		LOG_ERR("Turn on UART clock fail %d", ret);
1026 		return ret;
1027 	}
1028 
1029 #ifdef CONFIG_UART_ASYNC_API
1030 	ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->mdma_clk_cfg);
1031 	if (ret < 0) {
1032 		LOG_ERR("Turn on UART MDMA clock fail %d", ret);
1033 		return ret;
1034 	}
1035 #endif
1036 
1037 	/*
1038 	 * If apb2's clock is not 15MHz, we need to find the other optimized
1039 	 * values of UPSR and UBAUD for baud rate 115200.
1040 	 */
1041 	ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg, &uart_rate);
1042 	if (ret < 0) {
1043 		LOG_ERR("Get UART clock rate error %d", ret);
1044 		return ret;
1045 	}
1046 
1047 	/* Configure baud rate */
1048 	ret = uart_set_npcx_baud_rate(inst, data->baud_rate, uart_rate);
1049 	if (ret < 0) {
1050 		LOG_ERR("Set baud rate %d with unsupported apb clock %d failed", data->baud_rate,
1051 			uart_rate);
1052 		return ret;
1053 	}
1054 
1055 	/*
1056 	 * 8-N-1, FIFO enabled.  Must be done after setting
1057 	 * the divisor for the new divisor to take effect.
1058 	 */
1059 	inst->UFRS = 0x00;
1060 
1061 	/* Initialize UART FIFO if mode is interrupt driven */
1062 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
1063 	/* Enable the UART FIFO mode */
1064 	inst->UMDSL |= BIT(NPCX_UMDSL_FIFO_MD);
1065 
1066 	/* Disable all UART tx FIFO interrupts */
1067 	uart_npcx_dis_all_tx_interrupts(dev);
1068 
1069 	/* Clear UART rx FIFO */
1070 	uart_npcx_clear_rx_fifo(dev);
1071 
1072 	/* Configure UART interrupts */
1073 	config->irq_config_func(dev);
1074 #endif
1075 
1076 #ifdef CONFIG_UART_ASYNC_API
1077 	data->async.next_rx_buffer = NULL;
1078 	data->async.next_rx_buffer_len = 0;
1079 	data->async.uart_dev = dev;
1080 	k_work_init_delayable(&data->async.rx_dma_params.timeout_work, uart_npcx_async_rx_timeout);
1081 	k_work_init_delayable(&data->async.tx_dma_params.timeout_work, uart_npcx_async_tx_timeout);
1082 #endif
1083 
1084 	if (IS_ENABLED(CONFIG_PM)) {
1085 		/* Initialize a miwu device input and its callback function */
1086 		npcx_miwu_init_dev_callback(&data->uart_rx_cb, &config->uart_rx_wui,
1087 					    uart_npcx_rx_wk_isr, dev);
1088 		npcx_miwu_manage_callback(&data->uart_rx_cb, true);
1089 		/*
1090 		 * Configure the UART wake-up event triggered from a falling
1091 		 * edge on CR_SIN pin. No need for callback function.
1092 		 */
1093 		npcx_miwu_interrupt_configure(&config->uart_rx_wui, NPCX_MIWU_MODE_EDGE,
1094 					      NPCX_MIWU_TRIG_LOW);
1095 
1096 #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED
1097 		k_work_init_delayable(&data->rx_refresh_timeout_work, uart_npcx_rx_refresh_timeout);
1098 #endif
1099 	}
1100 
1101 	/* Configure pin-mux for uart device */
1102 	ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
1103 	if (ret < 0) {
1104 		LOG_ERR("UART pinctrl setup failed (%d)", ret);
1105 		return ret;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
1112 #define NPCX_UART_IRQ_CONFIG_FUNC_DECL(inst)                                                       \
1113 	static void uart_npcx_irq_config_##inst(const struct device *dev)
1114 #define NPCX_UART_IRQ_CONFIG_FUNC_INIT(inst) .irq_config_func = uart_npcx_irq_config_##inst,
1115 #define NPCX_UART_IRQ_CONFIG_FUNC(inst)                                                            \
1116 	static void uart_npcx_irq_config_##inst(const struct device *dev)                          \
1117 	{                                                                                          \
1118 		IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), uart_npcx_isr,        \
1119 			    DEVICE_DT_INST_GET(inst), 0);                                          \
1120 		irq_enable(DT_INST_IRQN(inst));                                                    \
1121 	}
1122 #else
1123 #define NPCX_UART_IRQ_CONFIG_FUNC_DECL(inst)
1124 #define NPCX_UART_IRQ_CONFIG_FUNC_INIT(inst)
1125 #define NPCX_UART_IRQ_CONFIG_FUNC(inst)
1126 #endif
1127 
1128 #define NPCX_UART_INIT(i)                                                                          \
1129 	NPCX_UART_IRQ_CONFIG_FUNC_DECL(i);                                                         \
1130 	                                                                                           \
1131 	PINCTRL_DT_INST_DEFINE(i);                                                                 \
1132 	                                                                                           \
1133 	static const struct uart_npcx_config uart_npcx_cfg_##i = {                                 \
1134 		.inst = (struct uart_reg *)DT_INST_REG_ADDR(i),                                    \
1135 		.clk_cfg = NPCX_DT_CLK_CFG_ITEM(i),                                                \
1136 		.uart_rx_wui = NPCX_DT_WUI_ITEM_BY_NAME(i, uart_rx),                               \
1137 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i),                                         \
1138 		NPCX_UART_IRQ_CONFIG_FUNC_INIT(i)                                                  \
1139 	                                                                                           \
1140 		IF_ENABLED(CONFIG_UART_ASYNC_API, (                                                \
1141 			.mdma_clk_cfg = NPCX_DT_CLK_CFG_ITEM_BY_IDX(i, 1),                         \
1142 			.mdma_reg_base = (struct mdma_reg *)DT_INST_REG_ADDR_BY_IDX(i, 1),         \
1143 		))                                                                                 \
1144 	};                                                                                         \
1145 	                                                                                           \
1146 	static struct uart_npcx_data uart_npcx_data_##i = {                                        \
1147 		.baud_rate = DT_INST_PROP(i, current_speed),                                       \
1148 	};                                                                                         \
1149 	                                                                                           \
1150 	DEVICE_DT_INST_DEFINE(i, uart_npcx_init, NULL, &uart_npcx_data_##i, &uart_npcx_cfg_##i,    \
1151 			      PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_npcx_driver_api);   \
1152 												   \
1153 	NPCX_UART_IRQ_CONFIG_FUNC(i)
1154 
DT_INST_FOREACH_STATUS_OKAY(NPCX_UART_INIT)1155 DT_INST_FOREACH_STATUS_OKAY(NPCX_UART_INIT)
1156 
1157 #define ENABLE_MIWU_CRIN_IRQ(i)                                                                    \
1158 	npcx_miwu_irq_get_and_clear_pending(&uart_npcx_cfg_##i.uart_rx_wui);                       \
1159 	npcx_miwu_irq_enable(&uart_npcx_cfg_##i.uart_rx_wui);
1160 
1161 #define DISABLE_MIWU_CRIN_IRQ(i) npcx_miwu_irq_disable(&uart_npcx_cfg_##i.uart_rx_wui);
1162 
1163 void npcx_uart_enable_access_interrupt(void)
1164 {
1165 	DT_INST_FOREACH_STATUS_OKAY(ENABLE_MIWU_CRIN_IRQ)
1166 }
1167 
npcx_uart_disable_access_interrupt(void)1168 void npcx_uart_disable_access_interrupt(void)
1169 {
1170 	DT_INST_FOREACH_STATUS_OKAY(DISABLE_MIWU_CRIN_IRQ)
1171 }
1172