1 /*
2 * Copyright (c) 2023-2024 Analog Devices, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #ifdef CONFIG_UART_ASYNC_API
8 #include <zephyr/drivers/dma.h>
9 #include <wrap_max32_dma.h>
10 #endif
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/drivers/clock_control/adi_max32_clock_control.h>
17
18 #include <wrap_max32_uart.h>
19
20 #define DT_DRV_COMPAT adi_max32_uart
21
22 LOG_MODULE_REGISTER(uart_max32, CONFIG_UART_LOG_LEVEL);
23
24 #ifdef CONFIG_UART_ASYNC_API
25 struct max32_uart_dma_config {
26 const struct device *dev;
27 const uint32_t channel;
28 const uint32_t slot;
29 };
30 #endif /* CONFIG_UART_ASYNC_API */
31
32 struct max32_uart_config {
33 mxc_uart_regs_t *regs;
34 const struct pinctrl_dev_config *pctrl;
35 const struct device *clock;
36 struct max32_perclk perclk;
37 struct uart_config uart_conf;
38 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
39 uart_irq_config_func_t irq_config_func;
40 #endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API */
41 #ifdef CONFIG_UART_ASYNC_API
42 const struct max32_uart_dma_config tx_dma;
43 const struct max32_uart_dma_config rx_dma;
44 #endif /* CONFIG_UART_ASYNC_API */
45 };
46
47 #ifdef CONFIG_UART_ASYNC_API
48 #define MAX32_UART_TX_CACHE_NUM 2
49 struct max32_uart_async_tx {
50 const uint8_t *buf;
51 const uint8_t *src;
52 size_t len;
53 uint8_t cache[MAX32_UART_TX_CACHE_NUM][CONFIG_UART_TX_CACHE_LEN];
54 uint8_t cache_id;
55 struct dma_block_config dma_blk;
56 int32_t timeout;
57 struct k_work_delayable timeout_work;
58 };
59
60 struct max32_uart_async_rx {
61 uint8_t *buf;
62 size_t len;
63 size_t offset;
64 size_t counter;
65 uint8_t *next_buf;
66 size_t next_len;
67 int32_t timeout;
68 struct k_work_delayable timeout_work;
69 };
70
71 struct max32_uart_async_data {
72 const struct device *uart_dev;
73 struct max32_uart_async_tx tx;
74 struct max32_uart_async_rx rx;
75 uart_callback_t cb;
76 void *user_data;
77 };
78 #endif
79
80 struct max32_uart_data {
81 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
82 uart_irq_callback_user_data_t cb; /* Interrupt callback */
83 void *cb_data; /* Interrupt callback arg */
84 uint32_t flags; /* Cached interrupt flags */
85 uint32_t status; /* Cached status flags */
86 struct k_timer timer;
87 #endif
88 #ifdef CONFIG_UART_ASYNC_API
89 struct max32_uart_async_data async;
90 #endif
91 struct uart_config conf; /* baudrate, stopbits, ... */
92 };
93
94 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
95 static void uart_max32_isr(const struct device *dev);
96 static void uart_max32_soft_isr(struct k_timer *timer);
97 #endif
98
99 #ifdef CONFIG_UART_ASYNC_API
100 static int uart_max32_tx_dma_load(const struct device *dev, uint8_t *buf, size_t len);
101 #endif
102
api_poll_out(const struct device * dev,unsigned char c)103 static void api_poll_out(const struct device *dev, unsigned char c)
104 {
105 const struct max32_uart_config *cfg = dev->config;
106
107 MXC_UART_WriteCharacter(cfg->regs, c);
108 }
109
api_poll_in(const struct device * dev,unsigned char * c)110 static int api_poll_in(const struct device *dev, unsigned char *c)
111 {
112 int val;
113 const struct max32_uart_config *cfg = dev->config;
114
115 val = MXC_UART_ReadCharacterRaw(cfg->regs);
116 if (val >= 0) {
117 *c = (unsigned char)val;
118 } else {
119 return -1;
120 }
121
122 return 0;
123 }
124
api_err_check(const struct device * dev)125 static int api_err_check(const struct device *dev)
126 {
127 int err = 0;
128 uint32_t flags;
129 const struct max32_uart_config *cfg = dev->config;
130
131 flags = MXC_UART_GetFlags(cfg->regs);
132
133 if (flags & ADI_MAX32_UART_ERROR_FRAMING) {
134 err |= UART_ERROR_FRAMING;
135 }
136
137 if (flags & ADI_MAX32_UART_ERROR_PARITY) {
138 err |= UART_ERROR_PARITY;
139 }
140
141 if (flags & ADI_MAX32_UART_ERROR_OVERRUN) {
142 err |= UART_ERROR_OVERRUN;
143 }
144
145 return err;
146 }
147
api_configure(const struct device * dev,const struct uart_config * uart_cfg)148 static int api_configure(const struct device *dev, const struct uart_config *uart_cfg)
149 {
150 int err;
151 const struct max32_uart_config *const cfg = dev->config;
152 mxc_uart_regs_t *regs = cfg->regs;
153 struct max32_uart_data *data = dev->data;
154
155 /*
156 * Set parity
157 */
158 if (data->conf.parity != uart_cfg->parity) {
159 mxc_uart_parity_t mxc_parity;
160
161 switch (uart_cfg->parity) {
162 case UART_CFG_PARITY_NONE:
163 mxc_parity = ADI_MAX32_UART_CFG_PARITY_NONE;
164 break;
165 case UART_CFG_PARITY_ODD:
166 mxc_parity = ADI_MAX32_UART_CFG_PARITY_ODD;
167 break;
168 case UART_CFG_PARITY_EVEN:
169 mxc_parity = ADI_MAX32_UART_CFG_PARITY_EVEN;
170 break;
171 case UART_CFG_PARITY_MARK:
172 #if defined(ADI_MAX32_UART_CFG_PARITY_MARK)
173 mxc_parity = ADI_MAX32_UART_CFG_PARITY_MARK;
174 break;
175 #else
176 return -ENOTSUP;
177 #endif
178 case UART_CFG_PARITY_SPACE:
179 #if defined(ADI_MAX32_UART_CFG_PARITY_SPACE)
180 mxc_parity = ADI_MAX32_UART_CFG_PARITY_SPACE;
181 break;
182 #else
183 return -ENOTSUP;
184 #endif
185 default:
186 return -EINVAL;
187 }
188
189 err = MXC_UART_SetParity(regs, mxc_parity);
190 if (err < 0) {
191 return -ENOTSUP;
192 }
193 /* incase of success keep configuration */
194 data->conf.parity = uart_cfg->parity;
195 }
196
197 /*
198 * Set stop bit
199 */
200 if (data->conf.stop_bits != uart_cfg->stop_bits) {
201 if (uart_cfg->stop_bits == UART_CFG_STOP_BITS_1) {
202 err = MXC_UART_SetStopBits(regs, MXC_UART_STOP_1);
203 } else if (uart_cfg->stop_bits == UART_CFG_STOP_BITS_2) {
204 err = MXC_UART_SetStopBits(regs, MXC_UART_STOP_2);
205 } else {
206 return -ENOTSUP;
207 }
208 if (err < 0) {
209 return -ENOTSUP;
210 }
211 /* incase of success keep configuration */
212 data->conf.stop_bits = uart_cfg->stop_bits;
213 }
214
215 /*
216 * Set data bit
217 * Valid data for MAX32 is 5-6-7-8
218 * Valid data for Zepyhr is 0-1-2-3
219 * Added +5 to index match.
220 */
221 if (data->conf.data_bits != uart_cfg->data_bits) {
222 err = MXC_UART_SetDataSize(regs, (5 + uart_cfg->data_bits));
223 if (err < 0) {
224 return -ENOTSUP;
225 }
226 /* incase of success keep configuration */
227 data->conf.data_bits = uart_cfg->data_bits;
228 }
229
230 /*
231 * Set flow control
232 * Flow control not implemented yet so that only support no flow mode
233 */
234 if (data->conf.flow_ctrl != uart_cfg->flow_ctrl) {
235 if (uart_cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) {
236 return -ENOTSUP;
237 }
238 data->conf.flow_ctrl = uart_cfg->flow_ctrl;
239 }
240
241 /*
242 * Set baudrate
243 */
244 if (data->conf.baudrate != uart_cfg->baudrate) {
245 err = Wrap_MXC_UART_SetFrequency(regs, uart_cfg->baudrate, cfg->perclk.clk_src);
246 if (err < 0) {
247 return -ENOTSUP;
248 }
249 /* In case of success keep configuration */
250 data->conf.baudrate = uart_cfg->baudrate;
251 }
252 return 0;
253 }
254
255 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
256
api_config_get(const struct device * dev,struct uart_config * uart_cfg)257 static int api_config_get(const struct device *dev, struct uart_config *uart_cfg)
258 {
259 struct max32_uart_data *data = dev->data;
260
261 /* copy configs from global setting */
262 *uart_cfg = data->conf;
263
264 return 0;
265 }
266
267 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
268
269 #ifdef CONFIG_UART_ASYNC_API
270 static void uart_max32_async_tx_timeout(struct k_work *work);
271 static void uart_max32_async_rx_timeout(struct k_work *work);
272 #endif /* CONFIG_UART_ASYNC_API */
273
uart_max32_init(const struct device * dev)274 static int uart_max32_init(const struct device *dev)
275 {
276 int ret;
277 const struct max32_uart_config *const cfg = dev->config;
278 mxc_uart_regs_t *regs = cfg->regs;
279 #if defined(CONFIG_UART_ASYNC_API) || defined(CONFIG_UART_INTERRUPT_DRIVEN)
280 struct max32_uart_data *data = dev->data;
281 #endif
282
283 if (!device_is_ready(cfg->clock)) {
284 LOG_ERR("Clock control device not ready");
285 return -ENODEV;
286 }
287
288 ret = MXC_UART_Shutdown(regs);
289 if (ret) {
290 return ret;
291 }
292
293 ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk);
294 if (ret != 0) {
295 LOG_ERR("Cannot enable UART clock");
296 return ret;
297 }
298
299 ret = Wrap_MXC_UART_SetClockSource(regs, cfg->perclk.clk_src);
300 if (ret != 0) {
301 LOG_ERR("Cannot set UART clock source");
302 return ret;
303 }
304
305 ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT);
306 if (ret) {
307 return ret;
308 }
309
310 ret = api_configure(dev, &cfg->uart_conf);
311 if (ret) {
312 return ret;
313 }
314
315 ret = Wrap_MXC_UART_Init(regs);
316 if (ret) {
317 return ret;
318 }
319
320 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
321 /* Clear any pending UART RX/TX interrupts */
322 MXC_UART_ClearFlags(regs, (ADI_MAX32_UART_INT_RX | ADI_MAX32_UART_INT_TX));
323 cfg->irq_config_func(dev);
324 #endif
325
326 #ifdef CONFIG_UART_ASYNC_API
327 data->async.uart_dev = dev;
328 k_work_init_delayable(&data->async.tx.timeout_work, uart_max32_async_tx_timeout);
329 k_work_init_delayable(&data->async.rx.timeout_work, uart_max32_async_rx_timeout);
330 data->async.rx.len = 0;
331 data->async.rx.offset = 0;
332 #endif
333
334 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
335 k_timer_init(&data->timer, &uart_max32_soft_isr, NULL);
336 k_timer_user_data_set(&data->timer, (void *)dev);
337 #endif
338
339 return ret;
340 }
341
342 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
343
api_fifo_fill(const struct device * dev,const uint8_t * tx_data,int size)344 static int api_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size)
345 {
346 unsigned int num_tx = 0;
347 const struct max32_uart_config *cfg = dev->config;
348 #ifdef CONFIG_UART_MAX32_TX_AE_WORKAROUND
349 struct max32_uart_data *const data = dev->data;
350 #endif
351
352 num_tx = MXC_UART_WriteTXFIFO(cfg->regs, (unsigned char *)tx_data, size);
353
354 #ifdef CONFIG_UART_MAX32_TX_AE_WORKAROUND
355 /* AE doesn't always trigger when small payloads are sent, so trigger timer ISR */
356 if (size <= 2) {
357 k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT);
358 }
359 #endif
360
361 return (int)num_tx;
362 }
363
api_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)364 static int api_fifo_read(const struct device *dev, uint8_t *rx_data, const int size)
365 {
366 unsigned int num_rx = 0;
367 const struct max32_uart_config *cfg = dev->config;
368
369 num_rx = MXC_UART_ReadRXFIFO(cfg->regs, (unsigned char *)rx_data, size);
370
371 return num_rx;
372 }
373
api_irq_tx_enable(const struct device * dev)374 static void api_irq_tx_enable(const struct device *dev)
375 {
376 const struct max32_uart_config *cfg = dev->config;
377 struct max32_uart_data *const data = dev->data;
378
379 MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM);
380
381 /* Fire timer interrupt to run TX callbacks for ISR context */
382 k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT);
383 }
384
api_irq_tx_disable(const struct device * dev)385 static void api_irq_tx_disable(const struct device *dev)
386 {
387 const struct max32_uart_config *cfg = dev->config;
388
389 MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM);
390 }
391
api_irq_tx_ready(const struct device * dev)392 static int api_irq_tx_ready(const struct device *dev)
393 {
394 struct max32_uart_data *const data = dev->data;
395 const struct max32_uart_config *cfg = dev->config;
396 uint32_t inten = Wrap_MXC_UART_GetRegINTEN(cfg->regs);
397
398 return ((inten & (ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM)) &&
399 !(data->status & ADI_MAX32_UART_STATUS_TX_FULL));
400 }
401
api_irq_tx_complete(const struct device * dev)402 static int api_irq_tx_complete(const struct device *dev)
403 {
404 const struct max32_uart_config *cfg = dev->config;
405
406 if (MXC_UART_GetActive(cfg->regs) == E_BUSY) {
407 return 0;
408 } else {
409 return 1; /* transmission completed */
410 }
411 }
412
api_irq_rx_ready(const struct device * dev)413 static int api_irq_rx_ready(const struct device *dev)
414 {
415 struct max32_uart_data *const data = dev->data;
416 const struct max32_uart_config *cfg = dev->config;
417 uint32_t inten = Wrap_MXC_UART_GetRegINTEN(cfg->regs);
418
419 return ((inten & ADI_MAX32_UART_INT_RX) && !(data->status & ADI_MAX32_UART_RX_EMPTY));
420 }
421
api_irq_err_enable(const struct device * dev)422 static void api_irq_err_enable(const struct device *dev)
423 {
424 const struct max32_uart_config *cfg = dev->config;
425
426 MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_ERROR_INTERRUPTS);
427 }
428
api_irq_err_disable(const struct device * dev)429 static void api_irq_err_disable(const struct device *dev)
430 {
431 const struct max32_uart_config *cfg = dev->config;
432
433 MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_ERROR_INTERRUPTS);
434 }
435
api_irq_is_pending(const struct device * dev)436 static int api_irq_is_pending(const struct device *dev)
437 {
438 return api_irq_rx_ready(dev) || api_irq_tx_ready(dev);
439 }
440
api_irq_update(const struct device * dev)441 static int api_irq_update(const struct device *dev)
442 {
443 struct max32_uart_data *const data = dev->data;
444 const struct max32_uart_config *const cfg = dev->config;
445
446 data->flags = MXC_UART_GetFlags(cfg->regs);
447 data->status = MXC_UART_GetStatus(cfg->regs);
448
449 return 1;
450 }
451
api_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)452 static void api_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
453 void *cb_data)
454 {
455 struct max32_uart_data *const dev_data = dev->data;
456
457 dev_data->cb = cb;
458 dev_data->cb_data = cb_data;
459 }
460
uart_max32_soft_isr(struct k_timer * timer)461 static void uart_max32_soft_isr(struct k_timer *timer)
462 {
463 const struct device *dev = k_timer_user_data_get(timer);
464
465 uart_max32_isr(dev);
466 }
467
468 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
469
470 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
api_irq_rx_enable(const struct device * dev)471 static void api_irq_rx_enable(const struct device *dev)
472 {
473 const struct max32_uart_config *cfg = dev->config;
474
475 MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_RX);
476 }
477
api_irq_rx_disable(const struct device * dev)478 static void api_irq_rx_disable(const struct device *dev)
479 {
480 const struct max32_uart_config *cfg = dev->config;
481
482 MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_RX);
483 }
484
uart_max32_isr(const struct device * dev)485 static void uart_max32_isr(const struct device *dev)
486 {
487 struct max32_uart_data *data = dev->data;
488 const struct max32_uart_config *cfg = dev->config;
489 uint32_t intfl;
490
491 intfl = MXC_UART_GetFlags(cfg->regs);
492
493 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
494 if (data->cb) {
495 data->cb(dev, data->cb_data);
496 }
497 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
498
499 #ifdef CONFIG_UART_ASYNC_API
500 if (data->async.rx.timeout != SYS_FOREVER_US && data->async.rx.timeout != 0 &&
501 (intfl & ADI_MAX32_UART_INT_RX)) {
502 k_work_reschedule(&data->async.rx.timeout_work, K_USEC(data->async.rx.timeout));
503 }
504 #endif /* CONFIG_UART_ASYNC_API */
505
506 /* Clear RX/TX interrupts flag after cb is called */
507 MXC_UART_ClearFlags(cfg->regs, intfl);
508 }
509 #endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API */
510
511 #if defined(CONFIG_UART_ASYNC_API)
512
async_timer_start(struct k_work_delayable * work,int32_t timeout)513 static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout)
514 {
515 if ((timeout != SYS_FOREVER_US) && (timeout != 0)) {
516 k_work_reschedule(work, K_USEC(timeout));
517 }
518 }
519
async_user_callback(const struct device * dev,struct uart_event * evt)520 static void async_user_callback(const struct device *dev, struct uart_event *evt)
521 {
522 const struct max32_uart_data *data = dev->data;
523
524 if (data->async.cb) {
525 data->async.cb(dev, evt, data->async.user_data);
526 }
527 }
528
load_tx_cache(const uint8_t * src,size_t len,uint8_t * dest)529 static uint32_t load_tx_cache(const uint8_t *src, size_t len, uint8_t *dest)
530 {
531 memcpy(dest, src, MIN(len, CONFIG_UART_TX_CACHE_LEN));
532
533 return MIN(len, CONFIG_UART_TX_CACHE_LEN);
534 }
535
uart_max32_async_tx_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)536 static void uart_max32_async_tx_callback(const struct device *dma_dev, void *user_data,
537 uint32_t channel, int status)
538 {
539 const struct device *dev = user_data;
540 const struct max32_uart_config *config = dev->config;
541 struct max32_uart_data *data = dev->data;
542 struct max32_uart_async_tx *tx = &data->async.tx;
543 struct dma_status dma_stat;
544 int ret;
545
546 unsigned int key = irq_lock();
547
548 dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat);
549 /* Skip callback if channel is still busy */
550 if (dma_stat.busy) {
551 irq_unlock(key);
552 return;
553 }
554
555 k_work_cancel_delayable(&tx->timeout_work);
556 Wrap_MXC_UART_DisableTxDMA(config->regs);
557
558 irq_unlock(key);
559
560 tx->len -= tx->dma_blk.block_size;
561 if (tx->len > 0) {
562 tx->cache_id = !(tx->cache_id);
563 ret = uart_max32_tx_dma_load(dev, tx->cache[tx->cache_id],
564 MIN(tx->len, CONFIG_UART_TX_CACHE_LEN));
565 if (ret < 0) {
566 LOG_ERR("Error configuring Tx DMA (%d)", ret);
567 return;
568 }
569
570 ret = dma_start(config->tx_dma.dev, config->tx_dma.channel);
571 if (ret < 0) {
572 LOG_ERR("Error starting Tx DMA (%d)", ret);
573 return;
574 }
575
576 async_timer_start(&tx->timeout_work, tx->timeout);
577
578 Wrap_MXC_UART_SetTxDMALevel(config->regs, 2);
579 Wrap_MXC_UART_EnableTxDMA(config->regs);
580
581 /* Load next chunk as well */
582 if (tx->len > CONFIG_UART_TX_CACHE_LEN) {
583 tx->src += load_tx_cache(tx->src, tx->len - CONFIG_UART_TX_CACHE_LEN,
584 tx->cache[!(tx->cache_id)]);
585 }
586 } else {
587 struct uart_event tx_done = {
588 .type = status == 0 ? UART_TX_DONE : UART_TX_ABORTED,
589 .data.tx.buf = tx->buf,
590 .data.tx.len = tx->len,
591 };
592 async_user_callback(dev, &tx_done);
593 }
594 }
595
uart_max32_tx_dma_load(const struct device * dev,uint8_t * buf,size_t len)596 static int uart_max32_tx_dma_load(const struct device *dev, uint8_t *buf, size_t len)
597 {
598 int ret;
599 const struct max32_uart_config *config = dev->config;
600 struct max32_uart_data *data = dev->data;
601 struct dma_config dma_cfg = {0};
602 struct dma_block_config *dma_blk = &data->async.tx.dma_blk;
603
604 dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
605 dma_cfg.dma_callback = uart_max32_async_tx_callback;
606 dma_cfg.user_data = (void *)dev;
607 dma_cfg.dma_slot = config->tx_dma.slot;
608 dma_cfg.block_count = 1;
609 dma_cfg.source_data_size = 1U;
610 dma_cfg.source_burst_length = 1U;
611 dma_cfg.dest_data_size = 1U;
612 dma_cfg.head_block = dma_blk;
613 dma_blk->block_size = len;
614 dma_blk->source_address = (uint32_t)buf;
615
616 ret = dma_config(config->tx_dma.dev, config->tx_dma.channel, &dma_cfg);
617 if (ret < 0) {
618 return ret;
619 }
620
621 return 0;
622 }
623
api_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)624 static int api_callback_set(const struct device *dev, uart_callback_t callback, void *user_data)
625 {
626 struct max32_uart_data *data = dev->data;
627
628 data->async.cb = callback;
629 data->async.user_data = user_data;
630
631 return 0;
632 }
633
api_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)634 static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout)
635 {
636 struct max32_uart_data *data = dev->data;
637 const struct max32_uart_config *config = dev->config;
638 struct dma_status dma_stat;
639 int ret;
640 bool use_cache = false;
641 unsigned int key = irq_lock();
642
643 if (config->tx_dma.channel == 0xFF) {
644 LOG_ERR("Tx DMA channel is not configured");
645 ret = -ENOTSUP;
646 goto unlock;
647 }
648
649 ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat);
650 if (ret < 0 || dma_stat.busy) {
651 LOG_ERR("DMA Tx %s", ret < 0 ? "error" : "busy");
652 irq_unlock(key);
653 return ret < 0 ? ret : -EBUSY;
654 }
655
656 data->async.tx.buf = buf;
657 data->async.tx.len = len;
658 data->async.tx.src = data->async.tx.buf;
659
660 if (((uint32_t)buf < MXC_SRAM_MEM_BASE) ||
661 (((uint32_t)buf + len) > (MXC_SRAM_MEM_BASE + MXC_SRAM_MEM_SIZE))) {
662 use_cache = true;
663 len = load_tx_cache(data->async.tx.src, MIN(len, CONFIG_UART_TX_CACHE_LEN),
664 data->async.tx.cache[0]);
665 data->async.tx.src += len;
666 data->async.tx.cache_id = 0;
667 }
668
669 ret = uart_max32_tx_dma_load(dev, use_cache ? data->async.tx.cache[0] : ((uint8_t *)buf),
670 len);
671 if (ret < 0) {
672 LOG_ERR("Error configuring Tx DMA (%d)", ret);
673 goto unlock;
674 }
675
676 ret = dma_start(config->tx_dma.dev, config->tx_dma.channel);
677 if (ret < 0) {
678 LOG_ERR("Error starting Tx DMA (%d)", ret);
679 goto unlock;
680 }
681
682 data->async.tx.timeout = timeout;
683 async_timer_start(&data->async.tx.timeout_work, timeout);
684
685 Wrap_MXC_UART_SetTxDMALevel(config->regs, 2);
686 Wrap_MXC_UART_EnableTxDMA(config->regs);
687
688 unlock:
689 irq_unlock(key);
690
691 return ret;
692 }
693
api_tx_abort(const struct device * dev)694 static int api_tx_abort(const struct device *dev)
695 {
696 int ret;
697 struct max32_uart_data *data = dev->data;
698 const struct max32_uart_config *config = dev->config;
699 struct dma_status dma_stat;
700 size_t bytes_sent;
701
702 unsigned int key = irq_lock();
703
704 k_work_cancel_delayable(&data->async.tx.timeout_work);
705
706 Wrap_MXC_UART_DisableTxDMA(config->regs);
707
708 ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat);
709 if (!dma_stat.busy) {
710 irq_unlock(key);
711 return 0;
712 }
713
714 bytes_sent = (ret == 0) ? (data->async.tx.len - dma_stat.pending_length) : 0;
715
716 ret = dma_stop(config->tx_dma.dev, config->tx_dma.channel);
717
718 irq_unlock(key);
719
720 if (ret == 0) {
721 struct uart_event tx_aborted = {
722 .type = UART_TX_ABORTED,
723 .data.tx.buf = data->async.tx.buf,
724 .data.tx.len = bytes_sent,
725 };
726 async_user_callback(dev, &tx_aborted);
727 }
728
729 return 0;
730 }
731
uart_max32_async_tx_timeout(struct k_work * work)732 static void uart_max32_async_tx_timeout(struct k_work *work)
733 {
734 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
735 struct max32_uart_async_tx *tx =
736 CONTAINER_OF(dwork, struct max32_uart_async_tx, timeout_work);
737 struct max32_uart_async_data *async = CONTAINER_OF(tx, struct max32_uart_async_data, tx);
738 struct max32_uart_data *data = CONTAINER_OF(async, struct max32_uart_data, async);
739
740 api_tx_abort(data->async.uart_dev);
741 }
742
api_rx_disable(const struct device * dev)743 static int api_rx_disable(const struct device *dev)
744 {
745 struct max32_uart_data *data = dev->data;
746 const struct max32_uart_config *config = dev->config;
747 int ret;
748 unsigned int key = irq_lock();
749
750 k_work_cancel_delayable(&data->async.rx.timeout_work);
751
752 Wrap_MXC_UART_DisableRxDMA(config->regs);
753
754 ret = dma_stop(config->rx_dma.dev, config->rx_dma.channel);
755 if (ret) {
756 LOG_ERR("Error stopping Rx DMA (%d)", ret);
757 irq_unlock(key);
758 return ret;
759 }
760
761 api_irq_rx_disable(dev);
762
763 irq_unlock(key);
764
765 /* Release current buffer event */
766 struct uart_event rel_event = {
767 .type = UART_RX_BUF_RELEASED,
768 .data.rx_buf.buf = data->async.rx.buf,
769 };
770 async_user_callback(dev, &rel_event);
771
772 /* Disable RX event */
773 struct uart_event rx_disabled = {.type = UART_RX_DISABLED};
774
775 async_user_callback(dev, &rx_disabled);
776
777 data->async.rx.buf = NULL;
778 data->async.rx.len = 0;
779 data->async.rx.counter = 0;
780 data->async.rx.offset = 0;
781
782 if (data->async.rx.next_buf) {
783 /* Release next buffer event */
784 struct uart_event next_rel_event = {
785 .type = UART_RX_BUF_RELEASED,
786 .data.rx_buf.buf = data->async.rx.next_buf,
787 };
788 async_user_callback(dev, &next_rel_event);
789 data->async.rx.next_buf = NULL;
790 data->async.rx.next_len = 0;
791 }
792
793 return 0;
794 }
795
uart_max32_async_rx_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)796 static void uart_max32_async_rx_callback(const struct device *dma_dev, void *user_data,
797 uint32_t channel, int status)
798 {
799 const struct device *dev = user_data;
800 const struct max32_uart_config *config = dev->config;
801 struct max32_uart_data *data = dev->data;
802 struct max32_uart_async_data *async = &data->async;
803 struct dma_status dma_stat;
804 size_t total_rx;
805
806 unsigned int key = irq_lock();
807
808 dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat);
809
810 if (dma_stat.pending_length > 0) {
811 irq_unlock(key);
812 return;
813 }
814
815 total_rx = async->rx.len - dma_stat.pending_length;
816
817 api_irq_rx_disable(dev);
818
819 irq_unlock(key);
820
821 if (total_rx > async->rx.offset) {
822 async->rx.counter = total_rx - async->rx.offset;
823 struct uart_event rdy_event = {
824 .type = UART_RX_RDY,
825 .data.rx.buf = async->rx.buf,
826 .data.rx.len = async->rx.counter,
827 .data.rx.offset = async->rx.offset,
828 };
829 async_user_callback(dev, &rdy_event);
830 }
831
832 if (async->rx.next_buf) {
833 async->rx.offset = 0;
834 async->rx.counter = 0;
835
836 struct uart_event rel_event = {
837 .type = UART_RX_BUF_RELEASED,
838 .data.rx_buf.buf = async->rx.buf,
839 };
840 async_user_callback(dev, &rel_event);
841
842 async->rx.buf = async->rx.next_buf;
843 async->rx.len = async->rx.next_len;
844
845 async->rx.next_buf = NULL;
846 async->rx.next_len = 0;
847 struct uart_event req_event = {
848 .type = UART_RX_BUF_REQUEST,
849 };
850 async_user_callback(dev, &req_event);
851
852 dma_reload(config->rx_dma.dev, config->rx_dma.channel, config->rx_dma.slot,
853 (uint32_t)async->rx.buf, async->rx.len);
854 dma_start(config->rx_dma.dev, config->rx_dma.channel);
855
856 api_irq_rx_enable(dev);
857 async_timer_start(&async->rx.timeout_work, async->rx.timeout);
858 } else {
859 api_rx_disable(dev);
860 }
861 }
862
api_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)863 static int api_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout)
864 {
865 struct max32_uart_data *data = dev->data;
866 const struct max32_uart_config *config = dev->config;
867 struct dma_status dma_stat;
868 struct dma_config dma_cfg = {0};
869 struct dma_block_config dma_blk = {0};
870 int ret;
871
872 unsigned int key = irq_lock();
873
874 if (config->rx_dma.channel == 0xFF) {
875 LOG_ERR("Rx DMA channel is not configured");
876 irq_unlock(key);
877 return -ENOTSUP;
878 }
879
880 ret = dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat);
881 if (ret < 0 || dma_stat.busy) {
882 LOG_ERR("DMA Rx %s", ret < 0 ? "error" : "busy");
883 irq_unlock(key);
884 return ret < 0 ? ret : -EBUSY;
885 }
886
887 data->async.rx.buf = buf;
888 data->async.rx.len = len;
889
890 dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
891 dma_cfg.dma_callback = uart_max32_async_rx_callback;
892 dma_cfg.user_data = (void *)dev;
893 dma_cfg.dma_slot = config->rx_dma.slot;
894 dma_cfg.block_count = 1;
895 dma_cfg.source_data_size = 1U;
896 dma_cfg.source_burst_length = 1U;
897 dma_cfg.dest_data_size = 1U;
898 dma_cfg.head_block = &dma_blk;
899 dma_blk.block_size = len;
900 dma_blk.dest_address = (uint32_t)buf;
901
902 ret = dma_config(config->rx_dma.dev, config->rx_dma.channel, &dma_cfg);
903 if (ret < 0) {
904 LOG_ERR("Error configuring Rx DMA (%d)", ret);
905 irq_unlock(key);
906 return ret;
907 }
908
909 ret = dma_start(config->rx_dma.dev, config->rx_dma.channel);
910 if (ret < 0) {
911 LOG_ERR("Error starting Rx DMA (%d)", ret);
912 irq_unlock(key);
913 return ret;
914 }
915
916 data->async.rx.timeout = timeout;
917
918 Wrap_MXC_UART_SetRxDMALevel(config->regs, 1);
919 Wrap_MXC_UART_EnableRxDMA(config->regs);
920
921 struct uart_event buf_req = {
922 .type = UART_RX_BUF_REQUEST,
923 };
924
925 async_user_callback(dev, &buf_req);
926
927 api_irq_rx_enable(dev);
928 async_timer_start(&data->async.rx.timeout_work, timeout);
929
930 irq_unlock(key);
931 return ret;
932 }
933
api_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)934 static int api_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
935 {
936 struct max32_uart_data *data = dev->data;
937
938 data->async.rx.next_buf = buf;
939 data->async.rx.next_len = len;
940
941 return 0;
942 }
943
uart_max32_async_rx_timeout(struct k_work * work)944 static void uart_max32_async_rx_timeout(struct k_work *work)
945 {
946 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
947 struct max32_uart_async_rx *rx =
948 CONTAINER_OF(dwork, struct max32_uart_async_rx, timeout_work);
949 struct max32_uart_async_data *async = CONTAINER_OF(rx, struct max32_uart_async_data, rx);
950 struct max32_uart_data *data = CONTAINER_OF(async, struct max32_uart_data, async);
951 const struct max32_uart_config *config = data->async.uart_dev->config;
952 struct dma_status dma_stat;
953 uint32_t total_rx;
954
955 unsigned int key = irq_lock();
956
957 dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat);
958
959 api_irq_rx_disable(data->async.uart_dev);
960 k_work_cancel_delayable(&data->async.rx.timeout_work);
961
962 irq_unlock(key);
963
964 total_rx = async->rx.len - dma_stat.pending_length;
965
966 if (total_rx > async->rx.offset) {
967 async->rx.counter = total_rx - async->rx.offset;
968 struct uart_event rdy_event = {
969 .type = UART_RX_RDY,
970 .data.rx.buf = async->rx.buf,
971 .data.rx.len = async->rx.counter,
972 .data.rx.offset = async->rx.offset,
973 };
974 async_user_callback(async->uart_dev, &rdy_event);
975 }
976 async->rx.offset += async->rx.counter;
977 async->rx.counter = 0;
978
979 api_irq_rx_enable(data->async.uart_dev);
980 }
981
982 #endif
983
984 #ifdef CONFIG_PM_DEVICE
uart_max32_pm_action(const struct device * dev,enum pm_device_action action)985 static int uart_max32_pm_action(const struct device *dev, enum pm_device_action action)
986 {
987 int ret;
988 const struct max32_uart_config *const cfg = dev->config;
989
990 switch (action) {
991 case PM_DEVICE_ACTION_RESUME:
992
993 /* Enable clock */
994 ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk);
995 if (ret != 0) {
996 LOG_ERR("cannot enable UART clock");
997 return ret;
998 }
999
1000 /* Set pins to active state */
1001 ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT);
1002 if (ret) {
1003 return ret;
1004 }
1005
1006 break;
1007 case PM_DEVICE_ACTION_SUSPEND:
1008 /* Flush uart before sleep */
1009 while (MXC_UART_ReadyForSleep(cfg->regs) != E_NO_ERROR) {
1010 }
1011
1012 /* Move pins to sleep state */
1013 ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_SLEEP);
1014 if ((ret < 0) && (ret != -ENOENT)) {
1015 /*
1016 * If returning -ENOENT, no pins where defined for sleep mode :
1017 * Do not output on console (might sleep already) when going to sleep,
1018 * "(LP)UART pinctrl sleep state not available"
1019 * and don't block PM suspend.
1020 * Else return the error.
1021 */
1022 return ret;
1023 }
1024
1025 /* Disable clock */
1026 ret = clock_control_off(cfg->clock, (clock_control_subsys_t)&cfg->perclk);
1027 if (ret != 0) {
1028 LOG_ERR("cannot disable UART clock");
1029 return ret;
1030 }
1031
1032 break;
1033 default:
1034 return -ENOTSUP;
1035 }
1036
1037 return 0;
1038 }
1039 #endif /* CONFIG_PM_DEVICE */
1040
1041 static DEVICE_API(uart, uart_max32_driver_api) = {
1042 .poll_in = api_poll_in,
1043 .poll_out = api_poll_out,
1044 .err_check = api_err_check,
1045 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1046 .configure = api_configure,
1047 .config_get = api_config_get,
1048 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1049 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
1050 .fifo_fill = api_fifo_fill,
1051 .fifo_read = api_fifo_read,
1052 .irq_tx_enable = api_irq_tx_enable,
1053 .irq_tx_disable = api_irq_tx_disable,
1054 .irq_tx_ready = api_irq_tx_ready,
1055 .irq_rx_enable = api_irq_rx_enable,
1056 .irq_rx_disable = api_irq_rx_disable,
1057 .irq_tx_complete = api_irq_tx_complete,
1058 .irq_rx_ready = api_irq_rx_ready,
1059 .irq_err_enable = api_irq_err_enable,
1060 .irq_err_disable = api_irq_err_disable,
1061 .irq_is_pending = api_irq_is_pending,
1062 .irq_update = api_irq_update,
1063 .irq_callback_set = api_irq_callback_set,
1064 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
1065 #ifdef CONFIG_UART_ASYNC_API
1066 .callback_set = api_callback_set,
1067 .tx = api_tx,
1068 .tx_abort = api_tx_abort,
1069 .rx_enable = api_rx_enable,
1070 .rx_buf_rsp = api_rx_buf_rsp,
1071 .rx_disable = api_rx_disable,
1072 #endif /* CONFIG_UART_ASYNC_API */
1073 };
1074
1075 #ifdef CONFIG_UART_ASYNC_API
1076 #define MAX32_DT_INST_DMA_CTLR(n, name) \
1077 COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
1078 (DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, name))), (NULL))
1079
1080 #define MAX32_DT_INST_DMA_CELL(n, name, cell) \
1081 COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), (DT_INST_DMAS_CELL_BY_NAME(n, name, cell)), \
1082 (0xff))
1083
1084 #define MAX32_UART_DMA_INIT(n) \
1085 .tx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, tx), \
1086 .tx_dma.channel = MAX32_DT_INST_DMA_CELL(n, tx, channel), \
1087 .tx_dma.slot = MAX32_DT_INST_DMA_CELL(n, tx, slot), \
1088 .rx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, rx), \
1089 .rx_dma.channel = MAX32_DT_INST_DMA_CELL(n, rx, channel), \
1090 .rx_dma.slot = MAX32_DT_INST_DMA_CELL(n, rx, slot),
1091 #else
1092 #define MAX32_UART_DMA_INIT(n)
1093 #endif
1094
1095 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
1096 #define MAX32_UART_USE_IRQ 1
1097 #else
1098 #define MAX32_UART_USE_IRQ 0
1099 #endif
1100
1101 #define MAX32_UART_INIT(_num) \
1102 PINCTRL_DT_INST_DEFINE(_num); \
1103 IF_ENABLED(MAX32_UART_USE_IRQ, \
1104 (static void uart_max32_irq_init_##_num(const struct device *dev) \
1105 { \
1106 IRQ_CONNECT(DT_INST_IRQN(_num), DT_INST_IRQ(_num, priority), \
1107 uart_max32_isr, DEVICE_DT_INST_GET(_num), 0); \
1108 irq_enable(DT_INST_IRQN(_num)); \
1109 })); \
1110 static const struct max32_uart_config max32_uart_config_##_num = { \
1111 .regs = (mxc_uart_regs_t *)DT_INST_REG_ADDR(_num), \
1112 .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num), \
1113 .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)), \
1114 .perclk.bus = DT_INST_CLOCKS_CELL(_num, offset), \
1115 .perclk.bit = DT_INST_CLOCKS_CELL(_num, bit), \
1116 .perclk.clk_src = \
1117 DT_INST_PROP_OR(_num, clock_source, ADI_MAX32_PRPH_CLK_SRC_PCLK), \
1118 .uart_conf.baudrate = DT_INST_PROP(_num, current_speed), \
1119 .uart_conf.parity = DT_INST_ENUM_IDX(_num, parity), \
1120 .uart_conf.data_bits = DT_INST_ENUM_IDX(_num, data_bits), \
1121 .uart_conf.stop_bits = DT_INST_ENUM_IDX(_num, stop_bits), \
1122 .uart_conf.flow_ctrl = \
1123 DT_INST_PROP_OR(index, hw_flow_control, UART_CFG_FLOW_CTRL_NONE), \
1124 MAX32_UART_DMA_INIT(_num) IF_ENABLED( \
1125 MAX32_UART_USE_IRQ, (.irq_config_func = uart_max32_irq_init_##_num,))}; \
1126 static struct max32_uart_data max32_uart_data##_num = { \
1127 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.cb = NULL,))}; \
1128 PM_DEVICE_DT_INST_DEFINE(_num, uart_max32_pm_action); \
1129 DEVICE_DT_INST_DEFINE(_num, uart_max32_init, PM_DEVICE_DT_INST_GET(_num), \
1130 &max32_uart_data##_num, &max32_uart_config_##_num, PRE_KERNEL_1, \
1131 CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_max32_driver_api);
1132
1133 DT_INST_FOREACH_STATUS_OKAY(MAX32_UART_INIT)
1134