1 /*
2  * Copyright (c) 2020 Linumiz
3  * Author: Parthiban Nallathambi <parthiban@linumiz.com>
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT	infineon_xmc4xxx_uart
9 
10 #include <xmc_uart.h>
11 #include <zephyr/drivers/dma.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/drivers/uart.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/irq.h>
16 
17 #define MAX_FIFO_SIZE 64
18 #define USIC_IRQ_MIN  84
19 #define USIC_IRQ_MAX  101
20 #define IRQS_PER_USIC 6
21 
22 #define CURRENT_BUFFER 0
23 #define NEXT_BUFFER 1
24 
25 struct uart_xmc4xxx_config {
26 	XMC_USIC_CH_t *uart;
27 	const struct pinctrl_dev_config *pcfg;
28 	uint8_t input_src;
29 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
30 	uart_irq_config_func_t irq_config_func;
31 	uint8_t irq_num_tx;
32 	uint8_t irq_num_rx;
33 #endif
34 	uint8_t fifo_start_offset;
35 	uint8_t fifo_tx_size;
36 	uint8_t fifo_rx_size;
37 };
38 
39 #ifdef CONFIG_UART_ASYNC_API
40 struct uart_dma_stream {
41 	const struct device *dma_dev;
42 	uint32_t dma_channel;
43 	struct dma_config dma_cfg;
44 	struct dma_block_config blk_cfg;
45 	uint8_t *buffer;
46 	size_t buffer_len;
47 	size_t offset;
48 	size_t counter;
49 	int32_t timeout;
50 	struct k_work_delayable timeout_work;
51 };
52 #endif
53 
54 struct uart_xmc4xxx_data {
55 	XMC_UART_CH_CONFIG_t config;
56 #if defined(CONFIG_UART_INTERRUPT_DRIVEN)
57 	uart_irq_callback_user_data_t user_cb;
58 	void *user_data;
59 #endif
60 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
61 	uint8_t service_request_tx;
62 	uint8_t service_request_rx;
63 #endif
64 #if defined(CONFIG_UART_ASYNC_API)
65 	const struct device *dev;
66 	uart_callback_t async_cb;
67 	void *async_user_data;
68 	struct uart_dma_stream dma_rx;
69 	struct uart_dma_stream dma_tx;
70 	uint8_t *rx_next_buffer;
71 	size_t rx_next_buffer_len;
72 #endif
73 };
74 
uart_xmc4xxx_poll_in(const struct device * dev,unsigned char * c)75 static int uart_xmc4xxx_poll_in(const struct device *dev, unsigned char *c)
76 {
77 	const struct uart_xmc4xxx_config *config = dev->config;
78 	bool fifo_empty;
79 
80 	if (config->fifo_rx_size > 0) {
81 		fifo_empty = XMC_USIC_CH_RXFIFO_IsEmpty(config->uart);
82 	} else {
83 		fifo_empty = !XMC_USIC_CH_GetReceiveBufferStatus(config->uart);
84 	}
85 	if (fifo_empty) {
86 		return -1;
87 	}
88 
89 	*c = (unsigned char)XMC_UART_CH_GetReceivedData(config->uart);
90 
91 	return 0;
92 }
93 
uart_xmc4xxx_poll_out(const struct device * dev,unsigned char c)94 static void uart_xmc4xxx_poll_out(const struct device *dev, unsigned char c)
95 {
96 	const struct uart_xmc4xxx_config *config = dev->config;
97 
98 	/* XMC_UART_CH_Transmit() only blocks for UART to finish transmitting */
99 	/* when fifo is not used */
100 	while (config->fifo_tx_size > 0 && XMC_USIC_CH_TXFIFO_IsFull(config->uart)) {
101 	}
102 	XMC_UART_CH_Transmit(config->uart, c);
103 }
104 
105 #if defined(CONFIG_UART_ASYNC_API)
async_timer_start(struct k_work_delayable * work,int32_t timeout)106 static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout)
107 {
108 	if ((timeout != SYS_FOREVER_US) && (timeout != 0)) {
109 		k_work_reschedule(work, K_USEC(timeout));
110 	}
111 }
112 
disable_tx_events(const struct uart_xmc4xxx_config * config)113 static void disable_tx_events(const struct uart_xmc4xxx_config *config)
114 {
115 	if (config->fifo_tx_size > 0) {
116 		XMC_USIC_CH_TXFIFO_DisableEvent(config->uart,
117 					       XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
118 	} else {
119 		XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_SHIFT);
120 	}
121 }
122 #endif
123 
124 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
enable_tx_events(const struct uart_xmc4xxx_config * config)125 static void enable_tx_events(const struct uart_xmc4xxx_config *config)
126 {
127 	if (config->fifo_tx_size > 0) {
128 		/* wait till the fifo has at least 1 byte free */
129 		while (XMC_USIC_CH_TXFIFO_IsFull(config->uart)) {
130 		}
131 		XMC_USIC_CH_TXFIFO_EnableEvent(config->uart,
132 					       XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
133 	} else {
134 		XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_SHIFT);
135 	}
136 }
137 
138 #define NVIC_ICPR_BASE 0xe000e280u
clear_pending_interrupt(int irq_num)139 static void clear_pending_interrupt(int irq_num)
140 {
141 	uint32_t *clearpend = (uint32_t *)(NVIC_ICPR_BASE) + irq_num / 32;
142 
143 	irq_num = irq_num & 0x1f;
144 	/* writing zero has not effect, i.e. we only clear irq_num */
145 	*clearpend = BIT(irq_num);
146 }
147 
uart_xmc4xxx_isr(void * arg)148 static void uart_xmc4xxx_isr(void *arg)
149 {
150 	const struct device *dev = arg;
151 	struct uart_xmc4xxx_data *data = dev->data;
152 
153 #if defined(CONFIG_UART_INTERRUPT_DRIVEN)
154 	if (data->user_cb) {
155 		data->user_cb(dev, data->user_data);
156 	}
157 #endif
158 
159 #if defined(CONFIG_UART_ASYNC_API)
160 	const struct uart_xmc4xxx_config *config = dev->config;
161 	unsigned int key = irq_lock();
162 
163 	if (data->dma_rx.buffer_len) {
164 		/* We only need to trigger this irq once to start timer */
165 		/* event. Everything else is handled by the timer callback and dma_rx_callback. */
166 		/* Note that we can't simply disable the event that triggers this irq, since the */
167 		/* same service_request gets routed to the dma. Thus we disable the nvic irq */
168 		/* below. Any pending irq must be cleared before irq_enable() is called. */
169 		irq_disable(config->irq_num_rx);
170 
171 		async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
172 	}
173 	irq_unlock(key);
174 #endif
175 }
176 
uart_xmc4xxx_configure_service_requests(const struct device * dev)177 static void uart_xmc4xxx_configure_service_requests(const struct device *dev)
178 {
179 	struct uart_xmc4xxx_data *data = dev->data;
180 	const struct uart_xmc4xxx_config *config = dev->config;
181 
182 	__ASSERT(config->irq_num_tx >= USIC_IRQ_MIN && config->irq_num_tx <= USIC_IRQ_MAX,
183 		 "Invalid irq number\n");
184 	data->service_request_tx = (config->irq_num_tx - USIC_IRQ_MIN) % IRQS_PER_USIC;
185 
186 	if (config->fifo_tx_size > 0) {
187 		XMC_USIC_CH_TXFIFO_SetInterruptNodePointer(
188 			config->uart, XMC_USIC_CH_TXFIFO_INTERRUPT_NODE_POINTER_STANDARD,
189 			data->service_request_tx);
190 	} else {
191 		XMC_USIC_CH_SetInterruptNodePointer(
192 			config->uart, XMC_USIC_CH_INTERRUPT_NODE_POINTER_TRANSMIT_SHIFT,
193 			data->service_request_tx);
194 	}
195 
196 	__ASSERT(config->irq_num_rx >= USIC_IRQ_MIN && config->irq_num_rx <= USIC_IRQ_MAX,
197 		 "Invalid irq number\n");
198 	data->service_request_rx = (config->irq_num_rx - USIC_IRQ_MIN) % IRQS_PER_USIC;
199 
200 	if (config->fifo_rx_size > 0) {
201 		XMC_USIC_CH_RXFIFO_SetInterruptNodePointer(
202 			config->uart, XMC_USIC_CH_RXFIFO_INTERRUPT_NODE_POINTER_STANDARD,
203 			data->service_request_rx);
204 		XMC_USIC_CH_RXFIFO_SetInterruptNodePointer(
205 			config->uart, XMC_USIC_CH_RXFIFO_INTERRUPT_NODE_POINTER_ALTERNATE,
206 			data->service_request_rx);
207 	} else {
208 		XMC_USIC_CH_SetInterruptNodePointer(config->uart,
209 						    XMC_USIC_CH_INTERRUPT_NODE_POINTER_RECEIVE,
210 						    data->service_request_rx);
211 		XMC_USIC_CH_SetInterruptNodePointer(
212 			config->uart, XMC_USIC_CH_INTERRUPT_NODE_POINTER_ALTERNATE_RECEIVE,
213 			data->service_request_rx);
214 	}
215 }
216 
uart_xmc4xxx_irq_tx_ready(const struct device * dev)217 static int uart_xmc4xxx_irq_tx_ready(const struct device *dev)
218 {
219 	const struct uart_xmc4xxx_config *config = dev->config;
220 
221 	if (config->fifo_tx_size > 0) {
222 		return !XMC_USIC_CH_TXFIFO_IsFull(config->uart);
223 	} else {
224 		return XMC_USIC_CH_GetTransmitBufferStatus(config->uart) ==
225 			XMC_USIC_CH_TBUF_STATUS_IDLE;
226 	}
227 }
228 
uart_xmc4xxx_irq_rx_disable(const struct device * dev)229 static void uart_xmc4xxx_irq_rx_disable(const struct device *dev)
230 {
231 	const struct uart_xmc4xxx_config *config = dev->config;
232 
233 	if (config->fifo_rx_size > 0) {
234 		XMC_USIC_CH_RXFIFO_DisableEvent(config->uart,
235 						XMC_USIC_CH_RXFIFO_EVENT_CONF_STANDARD |
236 						XMC_USIC_CH_RXFIFO_EVENT_CONF_ALTERNATE);
237 	} else {
238 		XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_STANDARD_RECEIVE |
239 						       XMC_USIC_CH_EVENT_ALTERNATIVE_RECEIVE);
240 	}
241 }
uart_xmc4xxx_irq_rx_enable(const struct device * dev)242 static void uart_xmc4xxx_irq_rx_enable(const struct device *dev)
243 {
244 	const struct uart_xmc4xxx_config *config = dev->config;
245 	uint32_t recv_status;
246 
247 	/* re-enable the IRQ as it may have been disabled during async_rx */
248 	clear_pending_interrupt(config->irq_num_rx);
249 	irq_enable(config->irq_num_rx);
250 
251 	if (config->fifo_rx_size > 0) {
252 		XMC_USIC_CH_RXFIFO_Flush(config->uart);
253 		XMC_USIC_CH_RXFIFO_SetSizeTriggerLimit(config->uart, config->fifo_rx_size, 0);
254 #if CONFIG_UART_XMC4XXX_RX_FIFO_INT_TRIGGER
255 		config->uart->RBCTR |= BIT(USIC_CH_RBCTR_SRBTEN_Pos);
256 #endif
257 		XMC_USIC_CH_RXFIFO_EnableEvent(config->uart,
258 					       XMC_USIC_CH_RXFIFO_EVENT_CONF_STANDARD |
259 					       XMC_USIC_CH_RXFIFO_EVENT_CONF_ALTERNATE);
260 	} else {
261 		/* flush out any received bytes while the uart rx irq was disabled */
262 		recv_status = XMC_USIC_CH_GetReceiveBufferStatus(config->uart);
263 		if (recv_status & USIC_CH_RBUFSR_RDV0_Msk) {
264 			XMC_UART_CH_GetReceivedData(config->uart);
265 		}
266 		if (recv_status & USIC_CH_RBUFSR_RDV1_Msk) {
267 			XMC_UART_CH_GetReceivedData(config->uart);
268 		}
269 
270 		XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_STANDARD_RECEIVE |
271 						      XMC_USIC_CH_EVENT_ALTERNATIVE_RECEIVE);
272 	}
273 }
274 #endif
275 
276 #if defined(CONFIG_UART_INTERRUPT_DRIVEN)
277 
uart_xmc4xxx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)278 static int uart_xmc4xxx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len)
279 {
280 	const struct uart_xmc4xxx_config *config = dev->config;
281 	int i = 0;
282 
283 	for (i = 0; i < len; i++) {
284 		bool fifo_full;
285 
286 		XMC_UART_CH_Transmit(config->uart, tx_data[i]);
287 		if (config->fifo_tx_size == 0) {
288 			return 1;
289 		}
290 
291 		fifo_full = XMC_USIC_CH_TXFIFO_IsFull(config->uart);
292 		if (fifo_full) {
293 			return i + 1;
294 		}
295 	}
296 	return i;
297 }
298 
uart_xmc4xxx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)299 static int uart_xmc4xxx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size)
300 {
301 	const struct uart_xmc4xxx_config *config = dev->config;
302 	int i;
303 
304 	for (i = 0; i < size; i++) {
305 		bool fifo_empty;
306 
307 		if (config->fifo_rx_size > 0) {
308 			fifo_empty = XMC_USIC_CH_RXFIFO_IsEmpty(config->uart);
309 		} else {
310 			fifo_empty = !XMC_USIC_CH_GetReceiveBufferStatus(config->uart);
311 		}
312 		if (fifo_empty) {
313 			break;
314 		}
315 		rx_data[i] = XMC_UART_CH_GetReceivedData(config->uart);
316 	}
317 	return i;
318 }
319 
uart_xmc4xxx_irq_tx_enable(const struct device * dev)320 static void uart_xmc4xxx_irq_tx_enable(const struct device *dev)
321 {
322 	const struct uart_xmc4xxx_config *config = dev->config;
323 	const struct uart_xmc4xxx_data *data = dev->data;
324 
325 	clear_pending_interrupt(config->irq_num_tx);
326 	irq_enable(config->irq_num_tx);
327 
328 	enable_tx_events(config);
329 
330 	XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx);
331 }
332 
uart_xmc4xxx_irq_tx_disable(const struct device * dev)333 static void uart_xmc4xxx_irq_tx_disable(const struct device *dev)
334 {
335 	const struct uart_xmc4xxx_config *config = dev->config;
336 
337 	if (config->fifo_tx_size > 0) {
338 		XMC_USIC_CH_TXFIFO_DisableEvent(config->uart,
339 						XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
340 	} else {
341 		XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_SHIFT);
342 	}
343 }
344 
uart_xmc4xxx_irq_rx_ready(const struct device * dev)345 static int uart_xmc4xxx_irq_rx_ready(const struct device *dev)
346 {
347 	const struct uart_xmc4xxx_config *config = dev->config;
348 
349 	if (config->fifo_rx_size > 0) {
350 		return !XMC_USIC_CH_RXFIFO_IsEmpty(config->uart);
351 	} else {
352 		return XMC_USIC_CH_GetReceiveBufferStatus(config->uart);
353 	}
354 }
355 
uart_xmc4xxx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)356 static void uart_xmc4xxx_irq_callback_set(const struct device *dev,
357 					  uart_irq_callback_user_data_t cb, void *user_data)
358 {
359 	struct uart_xmc4xxx_data *data = dev->data;
360 
361 	data->user_cb = cb;
362 	data->user_data = user_data;
363 
364 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
365 	data->async_cb = NULL;
366 	data->async_user_data = NULL;
367 #endif
368 }
369 
370 #define NVIC_ISPR_BASE 0xe000e200u
uart_xmc4xxx_irq_is_pending(const struct device * dev)371 static int uart_xmc4xxx_irq_is_pending(const struct device *dev)
372 {
373 	const struct uart_xmc4xxx_config *config = dev->config;
374 	uint32_t irq_num_tx = config->irq_num_tx;
375 	uint32_t irq_num_rx = config->irq_num_rx;
376 	bool tx_pending;
377 	bool rx_pending;
378 	uint32_t setpend;
379 
380 	/* the NVIC_ISPR_BASE address stores info which interrupts are pending */
381 	/* bit 0 -> irq 0, bit 1 -> irq 1,...  */
382 	setpend = *((uint32_t *)(NVIC_ISPR_BASE) + irq_num_tx / 32);
383 	irq_num_tx = irq_num_tx & 0x1f; /* take modulo 32 */
384 	tx_pending = setpend & BIT(irq_num_tx);
385 
386 	setpend = *((uint32_t *)(NVIC_ISPR_BASE) + irq_num_rx / 32);
387 	irq_num_rx = irq_num_rx & 0x1f; /* take modulo 32 */
388 	rx_pending = setpend & BIT(irq_num_rx);
389 
390 	return tx_pending || rx_pending;
391 }
392 #endif
393 
394 #if defined(CONFIG_UART_ASYNC_API)
async_evt_rx_buf_request(struct uart_xmc4xxx_data * data)395 static inline void async_evt_rx_buf_request(struct uart_xmc4xxx_data *data)
396 {
397 	struct uart_event evt = {.type = UART_RX_BUF_REQUEST};
398 
399 	if (data->async_cb) {
400 		data->async_cb(data->dev, &evt, data->async_user_data);
401 	}
402 }
403 
async_evt_rx_release_buffer(struct uart_xmc4xxx_data * data,int buffer_type)404 static inline void async_evt_rx_release_buffer(struct uart_xmc4xxx_data *data, int buffer_type)
405 {
406 	struct uart_event event = {.type = UART_RX_BUF_RELEASED};
407 
408 	if (buffer_type == NEXT_BUFFER && !data->rx_next_buffer) {
409 		return;
410 	}
411 
412 	if (buffer_type == CURRENT_BUFFER && !data->dma_rx.buffer) {
413 		return;
414 	}
415 
416 	if (buffer_type == NEXT_BUFFER) {
417 		event.data.rx_buf.buf = data->rx_next_buffer;
418 		data->rx_next_buffer = NULL;
419 		data->rx_next_buffer_len = 0;
420 	} else {
421 		event.data.rx_buf.buf = data->dma_rx.buffer;
422 		data->dma_rx.buffer = NULL;
423 		data->dma_rx.buffer_len = 0;
424 	}
425 
426 	if (data->async_cb) {
427 		data->async_cb(data->dev, &event, data->async_user_data);
428 	}
429 }
430 
async_evt_rx_stopped(struct uart_xmc4xxx_data * data,enum uart_rx_stop_reason reason)431 static inline void async_evt_rx_stopped(struct uart_xmc4xxx_data *data,
432 					enum uart_rx_stop_reason reason)
433 {
434 	struct uart_event event = {.type = UART_RX_STOPPED, .data.rx_stop.reason = reason};
435 	struct uart_event_rx *rx = &event.data.rx_stop.data;
436 	struct dma_status stat;
437 
438 	if (data->dma_rx.buffer_len == 0 || data->async_cb == NULL) {
439 		return;
440 	}
441 
442 	rx->buf = data->dma_rx.buffer;
443 	if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) {
444 		data->dma_rx.counter = data->dma_rx.buffer_len - stat.pending_length;
445 	}
446 
447 	rx->len = data->dma_rx.counter - data->dma_rx.offset;
448 	rx->offset = data->dma_rx.counter;
449 
450 	data->async_cb(data->dev, &event, data->async_user_data);
451 }
452 
async_evt_rx_disabled(struct uart_xmc4xxx_data * data)453 static inline void async_evt_rx_disabled(struct uart_xmc4xxx_data *data)
454 {
455 	struct uart_event event = {.type = UART_RX_DISABLED};
456 
457 	data->dma_rx.buffer = NULL;
458 	data->dma_rx.buffer_len = 0;
459 	data->dma_rx.offset = 0;
460 	data->dma_rx.counter = 0;
461 
462 	if (data->async_cb) {
463 		data->async_cb(data->dev, &event, data->async_user_data);
464 	}
465 }
466 
async_evt_rx_rdy(struct uart_xmc4xxx_data * data)467 static inline void async_evt_rx_rdy(struct uart_xmc4xxx_data *data)
468 {
469 	struct uart_event event = {.type = UART_RX_RDY,
470 				   .data.rx.buf = (uint8_t *)data->dma_rx.buffer,
471 				   .data.rx.len = data->dma_rx.counter - data->dma_rx.offset,
472 				   .data.rx.offset = data->dma_rx.offset};
473 
474 	data->dma_rx.offset = data->dma_rx.counter;
475 
476 	if (event.data.rx.len > 0 && data->async_cb) {
477 		data->async_cb(data->dev, &event, data->async_user_data);
478 	}
479 }
480 
async_evt_tx_done(struct uart_xmc4xxx_data * data)481 static inline void async_evt_tx_done(struct uart_xmc4xxx_data *data)
482 {
483 	struct uart_event event = {.type = UART_TX_DONE,
484 				   .data.tx.buf = data->dma_tx.buffer,
485 				   .data.tx.len = data->dma_tx.counter};
486 
487 	data->dma_tx.buffer = NULL;
488 	data->dma_tx.buffer_len = 0;
489 	data->dma_tx.counter = 0;
490 
491 	if (data->async_cb) {
492 		data->async_cb(data->dev, &event, data->async_user_data);
493 	}
494 }
495 
async_evt_tx_abort(struct uart_xmc4xxx_data * data)496 static inline void async_evt_tx_abort(struct uart_xmc4xxx_data *data)
497 {
498 	struct uart_event event = {.type = UART_TX_ABORTED,
499 				   .data.tx.buf = data->dma_tx.buffer,
500 				   .data.tx.len = data->dma_tx.counter};
501 
502 	data->dma_tx.buffer = NULL;
503 	data->dma_tx.buffer_len = 0;
504 	data->dma_tx.counter = 0;
505 
506 	if (data->async_cb) {
507 		data->async_cb(data->dev, &event, data->async_user_data);
508 	}
509 }
510 
uart_xmc4xxx_async_rx_timeout(struct k_work * work)511 static void uart_xmc4xxx_async_rx_timeout(struct k_work *work)
512 {
513 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
514 	struct uart_dma_stream *rx_stream =
515 		CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work);
516 	struct uart_xmc4xxx_data *data = CONTAINER_OF(rx_stream, struct uart_xmc4xxx_data, dma_rx);
517 	struct dma_status stat;
518 	unsigned int key = irq_lock();
519 
520 	if (data->dma_rx.buffer_len == 0) {
521 		irq_unlock(key);
522 		return;
523 	}
524 
525 	if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) {
526 		size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length;
527 
528 		if (rx_rcv_len > data->dma_rx.offset) {
529 			data->dma_rx.counter = rx_rcv_len;
530 			async_evt_rx_rdy(data);
531 		}
532 	}
533 	irq_unlock(key);
534 	async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
535 }
536 
uart_xmc4xxx_async_tx_abort(const struct device * dev)537 static int uart_xmc4xxx_async_tx_abort(const struct device *dev)
538 {
539 	struct uart_xmc4xxx_data *data = dev->data;
540 	struct dma_status stat;
541 	size_t tx_buffer_len;
542 	unsigned int key = irq_lock();
543 
544 	k_work_cancel_delayable(&data->dma_tx.timeout_work);
545 	tx_buffer_len = data->dma_tx.buffer_len;
546 
547 	if (tx_buffer_len == 0) {
548 		irq_unlock(key);
549 		return -EINVAL;
550 	}
551 
552 	if (!dma_get_status(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &stat)) {
553 		data->dma_tx.counter = tx_buffer_len - stat.pending_length;
554 	}
555 
556 	dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
557 	disable_tx_events(dev->config);
558 	async_evt_tx_abort(data);
559 
560 	irq_unlock(key);
561 
562 	return 0;
563 }
564 
uart_xmc4xxx_async_tx_timeout(struct k_work * work)565 static void uart_xmc4xxx_async_tx_timeout(struct k_work *work)
566 {
567 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
568 	struct uart_dma_stream *tx_stream =
569 		CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work);
570 	struct uart_xmc4xxx_data *data = CONTAINER_OF(tx_stream, struct uart_xmc4xxx_data, dma_tx);
571 
572 	uart_xmc4xxx_async_tx_abort(data->dev);
573 }
574 
uart_xmc4xxx_async_init(const struct device * dev)575 static int uart_xmc4xxx_async_init(const struct device *dev)
576 {
577 	const struct uart_xmc4xxx_config *config = dev->config;
578 	struct uart_xmc4xxx_data *data = dev->data;
579 
580 	data->dev = dev;
581 
582 	if (data->dma_rx.dma_dev != NULL) {
583 		if (!device_is_ready(data->dma_rx.dma_dev)) {
584 			return -ENODEV;
585 		}
586 
587 		k_work_init_delayable(&data->dma_rx.timeout_work, uart_xmc4xxx_async_rx_timeout);
588 		if (config->fifo_rx_size > 0) {
589 			data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->OUTR;
590 		} else {
591 			data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->RBUF;
592 		}
593 
594 		data->dma_rx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
595 		data->dma_rx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
596 		data->dma_rx.dma_cfg.head_block = &data->dma_rx.blk_cfg;
597 		data->dma_rx.dma_cfg.user_data = (void *)dev;
598 	}
599 
600 	if (data->dma_tx.dma_dev != NULL) {
601 		if (!device_is_ready(data->dma_tx.dma_dev)) {
602 			return -ENODEV;
603 		}
604 
605 		k_work_init_delayable(&data->dma_tx.timeout_work, uart_xmc4xxx_async_tx_timeout);
606 
607 		if (config->fifo_tx_size > 0) {
608 			data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->IN[0];
609 		} else {
610 			data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->TBUF[0];
611 		}
612 
613 		data->dma_tx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
614 		data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
615 		data->dma_tx.dma_cfg.head_block = &data->dma_tx.blk_cfg;
616 		data->dma_tx.dma_cfg.user_data = (void *)dev;
617 	}
618 
619 	return 0;
620 }
621 
uart_xmc4xxx_async_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)622 static int uart_xmc4xxx_async_callback_set(const struct device *dev, uart_callback_t callback,
623 					   void *user_data)
624 {
625 	struct uart_xmc4xxx_data *data = dev->data;
626 
627 	data->async_cb = callback;
628 	data->async_user_data = user_data;
629 
630 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
631 	data->user_cb = NULL;
632 	data->user_data = NULL;
633 #endif
634 
635 	return 0;
636 }
637 
uart_xmc4xxx_async_tx(const struct device * dev,const uint8_t * tx_data,size_t buf_size,int32_t timeout)638 static int uart_xmc4xxx_async_tx(const struct device *dev, const uint8_t *tx_data, size_t buf_size,
639 				 int32_t timeout)
640 {
641 	struct uart_xmc4xxx_data *data = dev->data;
642 	const struct uart_xmc4xxx_config *config = dev->config;
643 	int ret;
644 
645 	/* Assume threads are pre-emptive so this call cannot be interrupted */
646 	/* by uart_xmc4xxx_async_tx_abort */
647 	if (data->dma_tx.dma_dev == NULL) {
648 		return -ENODEV;
649 	}
650 
651 	if (tx_data == NULL || buf_size == 0) {
652 		return -EINVAL;
653 	}
654 
655 	/* No need to lock irq. Isr uart_xmc4xxx_dma_tx_cb() will only trigger if */
656 	/* dma_tx.buffer_len != 0 */
657 	if (data->dma_tx.buffer_len != 0) {
658 		return -EBUSY;
659 	}
660 
661 	data->dma_tx.buffer = (uint8_t *)tx_data;
662 	data->dma_tx.buffer_len = buf_size;
663 	data->dma_tx.timeout = timeout;
664 
665 	/* set source address */
666 	data->dma_tx.blk_cfg.source_address = (uint32_t)data->dma_tx.buffer;
667 	data->dma_tx.blk_cfg.block_size = data->dma_tx.buffer_len;
668 
669 	ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &data->dma_tx.dma_cfg);
670 	if (ret < 0) {
671 		return ret;
672 	}
673 
674 	/* make sure the tx is not transmitting */
675 	while (!uart_xmc4xxx_irq_tx_ready(dev)) {
676 	};
677 
678 	/* Tx irq is not used in async mode so disable it */
679 	irq_disable(config->irq_num_tx);
680 	enable_tx_events(config);
681 	XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx);
682 
683 	async_timer_start(&data->dma_tx.timeout_work, data->dma_tx.timeout);
684 
685 	return dma_start(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
686 }
687 
uart_xmc4xxx_async_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)688 static int uart_xmc4xxx_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len,
689 					int32_t timeout)
690 {
691 	struct uart_xmc4xxx_data *data = dev->data;
692 	int ret;
693 
694 	if (data->dma_rx.dma_dev == NULL) {
695 		return -ENODEV;
696 	}
697 
698 	if (data->dma_rx.buffer_len != 0) {
699 		return -EBUSY;
700 	}
701 
702 	uart_xmc4xxx_irq_rx_disable(dev);
703 
704 	data->dma_rx.buffer = buf;
705 	data->dma_rx.buffer_len = len;
706 	data->dma_rx.timeout = timeout;
707 
708 	data->dma_rx.blk_cfg.dest_address = (uint32_t)data->dma_rx.buffer;
709 	data->dma_rx.blk_cfg.block_size = data->dma_rx.buffer_len;
710 
711 	ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &data->dma_rx.dma_cfg);
712 	if (ret < 0) {
713 		return ret;
714 	}
715 
716 	/* Request buffers before enabling rx. It's unlikely, but we may not */
717 	/* request a new buffer in time (for example if receive buffer size is one byte). */
718 	async_evt_rx_buf_request(data);
719 	uart_xmc4xxx_irq_rx_enable(dev);
720 
721 	return dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
722 }
723 
uart_xmc4xxx_dma_rx_cb(const struct device * dma_dev,void * user_data,uint32_t channel,int status)724 static void uart_xmc4xxx_dma_rx_cb(const struct device *dma_dev, void *user_data, uint32_t channel,
725 				   int status)
726 {
727 	const struct device *dev_uart = user_data;
728 	struct uart_xmc4xxx_data *data = dev_uart->data;
729 	unsigned int key;
730 	int ret;
731 
732 	__ASSERT_NO_MSG(channel == data->dma_rx.dma_channel);
733 	key = irq_lock();
734 	k_work_cancel_delayable(&data->dma_rx.timeout_work);
735 
736 	if (status < 0) {
737 		async_evt_rx_stopped(data, UART_ERROR_OVERRUN);
738 		uart_xmc4xxx_irq_rx_disable(dev_uart);
739 		dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
740 		async_evt_rx_release_buffer(data, CURRENT_BUFFER);
741 		async_evt_rx_release_buffer(data, NEXT_BUFFER);
742 		async_evt_rx_disabled(data);
743 		goto done;
744 	}
745 
746 	if (data->dma_rx.buffer_len == 0) {
747 		goto done;
748 	}
749 
750 	data->dma_rx.counter = data->dma_rx.buffer_len;
751 	async_evt_rx_rdy(data);
752 
753 	async_evt_rx_release_buffer(data, CURRENT_BUFFER);
754 
755 	if (!data->rx_next_buffer) {
756 		uart_xmc4xxx_irq_rx_disable(dev_uart);
757 		dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
758 		async_evt_rx_disabled(data);
759 		goto done;
760 	}
761 
762 	data->dma_rx.buffer = data->rx_next_buffer;
763 	data->dma_rx.buffer_len = data->rx_next_buffer_len;
764 	data->dma_rx.offset = 0;
765 	data->dma_rx.counter = 0;
766 	data->rx_next_buffer = NULL;
767 	data->rx_next_buffer_len = 0;
768 
769 	ret = dma_reload(data->dma_rx.dma_dev, data->dma_rx.dma_channel,
770 			 data->dma_rx.blk_cfg.source_address, (uint32_t)data->dma_rx.buffer,
771 			 data->dma_rx.buffer_len);
772 
773 	if (ret < 0) {
774 		uart_xmc4xxx_irq_rx_disable(dev_uart);
775 		dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
776 		async_evt_rx_release_buffer(data, CURRENT_BUFFER);
777 		async_evt_rx_disabled(data);
778 		goto done;
779 	}
780 
781 	dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
782 
783 	async_evt_rx_buf_request(data);
784 	async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
785 done:
786 	irq_unlock(key);
787 }
788 
uart_xmc4xxx_async_rx_disable(const struct device * dev)789 static int uart_xmc4xxx_async_rx_disable(const struct device *dev)
790 {
791 	struct uart_xmc4xxx_data *data = dev->data;
792 	struct dma_status stat;
793 	unsigned int key;
794 
795 	k_work_cancel_delayable(&data->dma_rx.timeout_work);
796 
797 	key = irq_lock();
798 
799 	if (data->dma_rx.buffer_len == 0) {
800 		__ASSERT_NO_MSG(data->dma_rx.buffer == NULL);
801 		irq_unlock(key);
802 		return -EINVAL;
803 	}
804 
805 	dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
806 	uart_xmc4xxx_irq_rx_disable(dev);
807 
808 	if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) {
809 		size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length;
810 
811 		if (rx_rcv_len > data->dma_rx.offset) {
812 			data->dma_rx.counter = rx_rcv_len;
813 			async_evt_rx_rdy(data);
814 		}
815 	}
816 
817 	async_evt_rx_release_buffer(data, CURRENT_BUFFER);
818 	async_evt_rx_release_buffer(data, NEXT_BUFFER);
819 	async_evt_rx_disabled(data);
820 
821 	irq_unlock(key);
822 
823 	return 0;
824 }
825 
uart_xmc4xxx_dma_tx_cb(const struct device * dma_dev,void * user_data,uint32_t channel,int status)826 static void uart_xmc4xxx_dma_tx_cb(const struct device *dma_dev, void *user_data, uint32_t channel,
827 				   int status)
828 {
829 	const struct device *dev_uart = user_data;
830 	struct uart_xmc4xxx_data *data = dev_uart->data;
831 	size_t tx_buffer_len = data->dma_tx.buffer_len;
832 	struct dma_status stat;
833 
834 	if (status != 0) {
835 		return;
836 	}
837 
838 	__ASSERT_NO_MSG(channel == data->dma_tx.dma_channel);
839 
840 	k_work_cancel_delayable(&data->dma_tx.timeout_work);
841 
842 	if (tx_buffer_len == 0) {
843 		return;
844 	}
845 
846 	if (!dma_get_status(data->dma_tx.dma_dev, channel, &stat)) {
847 		data->dma_tx.counter = tx_buffer_len - stat.pending_length;
848 	}
849 
850 	async_evt_tx_done(data);
851 	/* if the callback doesn't doesn't do a chained uart_tx write, then stop the dma */
852 	if (data->dma_tx.buffer == NULL) {
853 		dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
854 		disable_tx_events(dev_uart->config);
855 	}
856 }
857 
uart_xmc4xxx_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)858 static int uart_xmc4xxx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
859 {
860 	struct uart_xmc4xxx_data *data = dev->data;
861 	unsigned int key;
862 	int ret = 0;
863 
864 	key = irq_lock();
865 
866 	if (data->dma_rx.buffer_len == 0U) {
867 		ret = -EACCES;
868 		goto done;
869 	}
870 
871 	if (data->rx_next_buffer_len != 0U) {
872 		ret = -EBUSY;
873 		goto done;
874 	}
875 
876 	data->rx_next_buffer = buf;
877 	data->rx_next_buffer_len = len;
878 
879 done:
880 	irq_unlock(key);
881 	return ret;
882 }
883 
884 #endif
885 
uart_xmc4xxx_init(const struct device * dev)886 static int uart_xmc4xxx_init(const struct device *dev)
887 {
888 	int ret;
889 	const struct uart_xmc4xxx_config *config = dev->config;
890 	struct uart_xmc4xxx_data *data = dev->data;
891 	uint8_t fifo_offset = config->fifo_start_offset;
892 
893 	data->config.data_bits = 8U;
894 	data->config.stop_bits = 1U;
895 
896 	XMC_UART_CH_Init(config->uart, &(data->config));
897 
898 	if (config->fifo_tx_size > 0) {
899 		/* fifos need to be aligned on fifo size */
900 		fifo_offset = ROUND_UP(fifo_offset, BIT(config->fifo_tx_size));
901 		XMC_USIC_CH_TXFIFO_Configure(config->uart, fifo_offset, config->fifo_tx_size, 1);
902 		fifo_offset += BIT(config->fifo_tx_size);
903 	}
904 
905 	if (config->fifo_rx_size > 0) {
906 		/* fifos need to be aligned on fifo size */
907 		fifo_offset = ROUND_UP(fifo_offset, BIT(config->fifo_rx_size));
908 		XMC_USIC_CH_RXFIFO_Configure(config->uart, fifo_offset, config->fifo_rx_size, 0);
909 		fifo_offset += BIT(config->fifo_rx_size);
910 	}
911 
912 	if (fifo_offset > MAX_FIFO_SIZE) {
913 		return -EINVAL;
914 	}
915 
916 	/* Connect UART RX to logical 1. It is connected to proper pin after pinctrl is applied */
917 	XMC_UART_CH_SetInputSource(config->uart, XMC_UART_CH_INPUT_RXD, 0x7);
918 
919 	/* Start the UART before pinctrl, because the USIC is driving the TX line */
920 	/* low in off state */
921 	XMC_UART_CH_Start(config->uart);
922 
923 	ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
924 	if (ret < 0) {
925 		return ret;
926 	}
927 	/* Connect UART RX to the target pin */
928 	XMC_UART_CH_SetInputSource(config->uart, XMC_UART_CH_INPUT_RXD,
929 				   config->input_src);
930 
931 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
932 	config->irq_config_func(dev);
933 	uart_xmc4xxx_configure_service_requests(dev);
934 #endif
935 
936 #if defined(CONFIG_UART_ASYNC_API)
937 	ret = uart_xmc4xxx_async_init(dev);
938 #endif
939 
940 	return ret;
941 }
942 
943 static DEVICE_API(uart, uart_xmc4xxx_driver_api) = {
944 	.poll_in = uart_xmc4xxx_poll_in,
945 	.poll_out = uart_xmc4xxx_poll_out,
946 #if defined(CONFIG_UART_INTERRUPT_DRIVEN)
947 	.fifo_fill = uart_xmc4xxx_fifo_fill,
948 	.fifo_read = uart_xmc4xxx_fifo_read,
949 	.irq_tx_enable = uart_xmc4xxx_irq_tx_enable,
950 	.irq_tx_disable = uart_xmc4xxx_irq_tx_disable,
951 	.irq_tx_ready = uart_xmc4xxx_irq_tx_ready,
952 	.irq_rx_enable = uart_xmc4xxx_irq_rx_enable,
953 	.irq_rx_disable = uart_xmc4xxx_irq_rx_disable,
954 	.irq_rx_ready = uart_xmc4xxx_irq_rx_ready,
955 	.irq_callback_set = uart_xmc4xxx_irq_callback_set,
956 	.irq_is_pending = uart_xmc4xxx_irq_is_pending,
957 #endif
958 #if defined(CONFIG_UART_ASYNC_API)
959 	.callback_set = uart_xmc4xxx_async_callback_set,
960 	.tx = uart_xmc4xxx_async_tx,
961 	.tx_abort = uart_xmc4xxx_async_tx_abort,
962 	.rx_enable = uart_xmc4xxx_async_rx_enable,
963 	.rx_buf_rsp = uart_xmc4xxx_rx_buf_rsp,
964 	.rx_disable = uart_xmc4xxx_async_rx_disable,
965 #endif
966 };
967 
968 #ifdef CONFIG_UART_ASYNC_API
969 #define UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst)                            \
970 	.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)),                           \
971 	.dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel),                             \
972 	.dma_cfg = {                                                                               \
973 		.dma_slot = DT_INST_DMAS_CELL_BY_NAME(index, dir, config),                         \
974 		.channel_direction = ch_dir,                                                       \
975 		.channel_priority = DT_INST_DMAS_CELL_BY_NAME(index, dir, priority),               \
976 		.source_data_size = 1,                                                             \
977 		.dest_data_size = 1,                                                               \
978 		.source_burst_length = src_burst,                                                  \
979 		.dest_burst_length = dst_burst,                                                    \
980 		.block_count = 1,                                                                  \
981 		.dma_callback = uart_xmc4xxx_dma_##dir##_cb,                                       \
982 	},
983 
984 #define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst)                                 \
985 	.dma_##dir = {COND_CODE_1(                                                                 \
986 		DT_INST_DMAS_HAS_NAME(index, dir),                                                 \
987 		(UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst)), (NULL))},
988 #else
989 #define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst)
990 #endif
991 
992 #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
993 #define XMC4XXX_IRQ_HANDLER(index)                                                         \
994 static void uart_xmc4xxx_irq_setup_##index(const struct device *dev)                       \
995 {                                                                                          \
996 	IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, tx, irq),                                   \
997 		    DT_INST_IRQ_BY_NAME(index, tx, priority), uart_xmc4xxx_isr,            \
998 		    DEVICE_DT_INST_GET(index), 0);                                         \
999 	IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, rx, irq),                                   \
1000 		    DT_INST_IRQ_BY_NAME(index, rx, priority), uart_xmc4xxx_isr,            \
1001 		    DEVICE_DT_INST_GET(index), 0);                                         \
1002 	irq_enable(DT_INST_IRQ_BY_NAME(index, tx, irq));                                   \
1003 	irq_enable(DT_INST_IRQ_BY_NAME(index, rx, irq));                                   \
1004 }
1005 
1006 #define XMC4XXX_IRQ_STRUCT_INIT(index)                                                     \
1007 	.irq_config_func = uart_xmc4xxx_irq_setup_##index,                                 \
1008 	.irq_num_tx = DT_INST_IRQ_BY_NAME(index, tx, irq),                                 \
1009 	.irq_num_rx = DT_INST_IRQ_BY_NAME(index, rx, irq),
1010 
1011 #else
1012 #define XMC4XXX_IRQ_HANDLER(index)
1013 #define XMC4XXX_IRQ_STRUCT_INIT(index)
1014 #endif
1015 
1016 #define XMC4XXX_INIT(index)						\
1017 PINCTRL_DT_INST_DEFINE(index);						\
1018 XMC4XXX_IRQ_HANDLER(index)						\
1019 static struct uart_xmc4xxx_data xmc4xxx_data_##index = {		\
1020 	.config.baudrate = DT_INST_PROP(index, current_speed),		\
1021 	UART_DMA_CHANNEL(index, tx, MEMORY_TO_PERIPHERAL, 8, 1)         \
1022 	UART_DMA_CHANNEL(index, rx, PERIPHERAL_TO_MEMORY, 1, 8)         \
1023 };									\
1024 									\
1025 static const struct uart_xmc4xxx_config xmc4xxx_config_##index = {	\
1026 	.uart = (XMC_USIC_CH_t *)DT_INST_REG_ADDR(index),		\
1027 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index),			\
1028 	.input_src = DT_INST_ENUM_IDX(index, input_src),		\
1029 XMC4XXX_IRQ_STRUCT_INIT(index)						\
1030 	.fifo_start_offset = DT_INST_PROP(index, fifo_start_offset),    \
1031 	.fifo_tx_size = DT_INST_ENUM_IDX(index, fifo_tx_size),          \
1032 	.fifo_rx_size = DT_INST_ENUM_IDX(index, fifo_rx_size),          \
1033 };									\
1034 									\
1035 	DEVICE_DT_INST_DEFINE(index, uart_xmc4xxx_init,			\
1036 			    NULL,					\
1037 			    &xmc4xxx_data_##index,			\
1038 			    &xmc4xxx_config_##index, PRE_KERNEL_1,	\
1039 			    CONFIG_SERIAL_INIT_PRIORITY,		\
1040 			    &uart_xmc4xxx_driver_api);
1041 
1042 DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_INIT)
1043