1 /*
2  * Copyright (c) 2020 Henrik Brix Andersen <henrik@brixandersen.dk>
3  *
4  * Based on uart_mcux_lpuart.c, which is:
5  * Copyright (c) 2017, NXP
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #define DT_DRV_COMPAT xlnx_xps_uartlite_1_00_a
11 
12 #include <device.h>
13 #include <drivers/uart.h>
14 #include <sys/sys_io.h>
15 
16 /* AXI UART Lite v2 registers offsets (See Xilinx PG142 for details) */
17 #define RX_FIFO_OFFSET  0x00
18 #define TX_FIFO_OFFSET  0x04
19 #define STAT_REG_OFFSET 0x08
20 #define CTRL_REG_OFFSET 0x0c
21 
22 /* STAT_REG bit definitions */
23 #define STAT_REG_RX_FIFO_VALID_DATA BIT(0)
24 #define STAT_REG_RX_FIFO_FULL       BIT(1)
25 #define STAT_REG_TX_FIFO_EMPTY      BIT(2)
26 #define STAT_REG_TX_FIFO_FULL       BIT(3)
27 #define STAT_REG_INTR_ENABLED       BIT(4)
28 #define STAT_REG_OVERRUN_ERROR      BIT(5)
29 #define STAT_REG_FRAME_ERROR        BIT(6)
30 #define STAT_REG_PARITY_ERROR       BIT(7)
31 
32 /* STAT_REG bit masks */
33 #define STAT_REG_ERROR_MASK GENMASK(7, 5)
34 
35 /* CTRL_REG bit definitions */
36 #define CTRL_REG_RST_TX_FIFO BIT(0)
37 #define CTRL_REG_RST_RX_FIFO BIT(1)
38 #define CTRL_REG_ENABLE_INTR BIT(4)
39 
40 struct xlnx_uartlite_config {
41 	mm_reg_t base;
42 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
43 	void (*irq_config_func)(const struct device *dev);
44 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
45 };
46 
47 struct xlnx_uartlite_data {
48 	uint32_t errors;
49 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
50 	const struct device *dev;
51 	struct k_timer timer;
52 	uart_irq_callback_user_data_t callback;
53 	void *callback_data;
54 
55 	volatile uint8_t tx_irq_enabled : 1;
56 	volatile uint8_t rx_irq_enabled : 1;
57 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
58 };
59 
xlnx_uartlite_read_status(const struct device * dev)60 static inline uint32_t xlnx_uartlite_read_status(const struct device *dev)
61 {
62 	const struct xlnx_uartlite_config *config = dev->config;
63 	struct xlnx_uartlite_data *data = dev->data;
64 	uint32_t status;
65 
66 	/* Cache errors as they are cleared by reading the STAT_REG */
67 	status = sys_read32(config->base + STAT_REG_OFFSET);
68 	data->errors &= (status & STAT_REG_ERROR_MASK);
69 
70 	/* Return current status and previously cached errors */
71 	return status | data->errors;
72 }
73 
xlnx_uartlite_clear_status(const struct device * dev)74 static inline void xlnx_uartlite_clear_status(const struct device *dev)
75 {
76 	struct xlnx_uartlite_data *data = dev->data;
77 
78 	/* Clear cached errors */
79 	data->errors = 0;
80 }
81 
xlnx_uartlite_read_rx_fifo(const struct device * dev)82 static inline unsigned char xlnx_uartlite_read_rx_fifo(const struct device *dev)
83 {
84 	const struct xlnx_uartlite_config *config = dev->config;
85 
86 	return (sys_read32(config->base + RX_FIFO_OFFSET) & BIT_MASK(8));
87 }
88 
xlnx_uartlite_write_tx_fifo(const struct device * dev,unsigned char c)89 static inline void xlnx_uartlite_write_tx_fifo(const struct device *dev,
90 					       unsigned char c)
91 {
92 	const struct xlnx_uartlite_config *config = dev->config;
93 
94 	sys_write32((uint32_t)c, config->base + TX_FIFO_OFFSET);
95 }
96 
xlnx_uartlite_poll_in(const struct device * dev,unsigned char * c)97 static int xlnx_uartlite_poll_in(const struct device *dev, unsigned char *c)
98 {
99 	if (xlnx_uartlite_read_status(dev) & STAT_REG_RX_FIFO_VALID_DATA) {
100 		*c = xlnx_uartlite_read_rx_fifo(dev);
101 		return 0;
102 	}
103 
104 	return -1;
105 }
106 
xlnx_uartlite_poll_out(const struct device * dev,unsigned char c)107 static void xlnx_uartlite_poll_out(const struct device *dev, unsigned char c)
108 {
109 	while (xlnx_uartlite_read_status(dev) & STAT_REG_TX_FIFO_FULL) {
110 	}
111 
112 	xlnx_uartlite_write_tx_fifo(dev, c);
113 }
114 
xlnx_uartlite_err_check(const struct device * dev)115 static int xlnx_uartlite_err_check(const struct device *dev)
116 {
117 	uint32_t status = xlnx_uartlite_read_status(dev);
118 	int err = 0;
119 
120 	if (status & STAT_REG_OVERRUN_ERROR) {
121 		err |= UART_ERROR_OVERRUN;
122 	}
123 
124 	if (status & STAT_REG_PARITY_ERROR) {
125 		err |= UART_ERROR_PARITY;
126 	}
127 
128 	if (status & STAT_REG_FRAME_ERROR) {
129 		err |= UART_ERROR_FRAMING;
130 	}
131 
132 	xlnx_uartlite_clear_status(dev);
133 
134 	return err;
135 }
136 
137 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
xlnx_uartlite_irq_enable(const struct device * dev)138 static inline void xlnx_uartlite_irq_enable(const struct device *dev)
139 {
140 	const struct xlnx_uartlite_config *config = dev->config;
141 
142 	sys_write32(CTRL_REG_ENABLE_INTR, config->base + CTRL_REG_OFFSET);
143 }
144 
xlnx_uartlite_irq_cond_disable(const struct device * dev)145 static inline void xlnx_uartlite_irq_cond_disable(const struct device *dev)
146 {
147 	const struct xlnx_uartlite_config *config = dev->config;
148 	struct xlnx_uartlite_data *data = dev->data;
149 
150 	/* TX and RX IRQs are shared. Only disable if both are disabled. */
151 	if (!data->tx_irq_enabled && !data->rx_irq_enabled) {
152 		sys_write32(0, config->base + CTRL_REG_OFFSET);
153 	}
154 }
155 
xlnx_uartlite_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)156 static int xlnx_uartlite_fifo_fill(const struct device *dev,
157 				   const uint8_t *tx_data,
158 				   int len)
159 {
160 	uint32_t status = xlnx_uartlite_read_status(dev);
161 	int count = 0U;
162 
163 	while ((len - count > 0) && (status & STAT_REG_TX_FIFO_FULL) == 0U) {
164 		xlnx_uartlite_write_tx_fifo(dev, tx_data[count++]);
165 		status = xlnx_uartlite_read_status(dev);
166 	}
167 
168 	return count;
169 }
170 
xlnx_uartlite_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)171 static int xlnx_uartlite_fifo_read(const struct device *dev, uint8_t *rx_data,
172 				   const int len)
173 {
174 	uint32_t status = xlnx_uartlite_read_status(dev);
175 	int count = 0U;
176 
177 	while ((len - count > 0) && (status & STAT_REG_RX_FIFO_VALID_DATA)) {
178 		rx_data[count++] = xlnx_uartlite_read_rx_fifo(dev);
179 		status = xlnx_uartlite_read_status(dev);
180 	}
181 
182 	return count;
183 }
184 
xlnx_uartlite_tx_soft_isr(struct k_timer * timer)185 static void xlnx_uartlite_tx_soft_isr(struct k_timer *timer)
186 {
187 	struct xlnx_uartlite_data *data =
188 		CONTAINER_OF(timer, struct xlnx_uartlite_data, timer);
189 
190 	if (data->callback) {
191 		data->callback(data->dev, data->callback_data);
192 	}
193 }
194 
xlnx_uartlite_irq_tx_enable(const struct device * dev)195 static void xlnx_uartlite_irq_tx_enable(const struct device *dev)
196 {
197 	struct xlnx_uartlite_data *data = dev->data;
198 	uint32_t status;
199 
200 	data->tx_irq_enabled = true;
201 	status = xlnx_uartlite_read_status(dev);
202 	xlnx_uartlite_irq_enable(dev);
203 
204 	if ((status & STAT_REG_TX_FIFO_EMPTY) && data->callback) {
205 		/*
206 		 * TX_FIFO_EMPTY event already generated an edge
207 		 * interrupt. Generate a soft interrupt and have it call the
208 		 * callback function in timer isr context.
209 		 */
210 		k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT);
211 	}
212 }
213 
xlnx_uartlite_irq_tx_disable(const struct device * dev)214 static void xlnx_uartlite_irq_tx_disable(const struct device *dev)
215 {
216 	struct xlnx_uartlite_data *data = dev->data;
217 
218 	data->tx_irq_enabled = false;
219 	xlnx_uartlite_irq_cond_disable(dev);
220 }
221 
xlnx_uartlite_irq_tx_ready(const struct device * dev)222 static int xlnx_uartlite_irq_tx_ready(const struct device *dev)
223 {
224 	struct xlnx_uartlite_data *data = dev->data;
225 	uint32_t status = xlnx_uartlite_read_status(dev);
226 
227 	return (((status & STAT_REG_TX_FIFO_FULL) == 0U) &&
228 		data->tx_irq_enabled);
229 }
230 
xlnx_uartlite_irq_tx_complete(const struct device * dev)231 static int xlnx_uartlite_irq_tx_complete(const struct device *dev)
232 {
233 	uint32_t status = xlnx_uartlite_read_status(dev);
234 
235 	return (status & STAT_REG_TX_FIFO_EMPTY);
236 }
237 
xlnx_uartlite_irq_rx_enable(const struct device * dev)238 static void xlnx_uartlite_irq_rx_enable(const struct device *dev)
239 {
240 	struct xlnx_uartlite_data *data = dev->data;
241 
242 	data->rx_irq_enabled = true;
243 	/* RX_FIFO_VALID_DATA generates a level interrupt */
244 	xlnx_uartlite_irq_enable(dev);
245 }
246 
xlnx_uartlite_irq_rx_disable(const struct device * dev)247 static void xlnx_uartlite_irq_rx_disable(const struct device *dev)
248 {
249 	struct xlnx_uartlite_data *data = dev->data;
250 
251 	data->rx_irq_enabled = false;
252 	xlnx_uartlite_irq_cond_disable(dev);
253 }
254 
xlnx_uartlite_irq_rx_ready(const struct device * dev)255 static int xlnx_uartlite_irq_rx_ready(const struct device *dev)
256 {
257 	struct xlnx_uartlite_data *data = dev->data;
258 	uint32_t status = xlnx_uartlite_read_status(dev);
259 
260 	return ((status & STAT_REG_RX_FIFO_VALID_DATA) &&
261 		data->rx_irq_enabled);
262 }
263 
xlnx_uartlite_irq_is_pending(const struct device * dev)264 static int xlnx_uartlite_irq_is_pending(const struct device *dev)
265 {
266 	return (xlnx_uartlite_irq_tx_ready(dev) ||
267 		xlnx_uartlite_irq_rx_ready(dev));
268 }
269 
xlnx_uartlite_irq_update(const struct device * dev)270 static int xlnx_uartlite_irq_update(const struct device *dev)
271 {
272 	return 1;
273 }
274 
xlnx_uartlite_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)275 static void xlnx_uartlite_irq_callback_set(const struct device *dev,
276 					   uart_irq_callback_user_data_t cb,
277 					   void *user_data)
278 {
279 	struct xlnx_uartlite_data *data = dev->data;
280 
281 	data->callback = cb;
282 	data->callback_data = user_data;
283 }
284 
xlnx_uartlite_isr(const struct device * dev)285 static __unused void xlnx_uartlite_isr(const struct device *dev)
286 {
287 	struct xlnx_uartlite_data *data = dev->data;
288 
289 	if (data->callback) {
290 		data->callback(dev, data->callback_data);
291 	}
292 }
293 
294 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
295 
xlnx_uartlite_init(const struct device * dev)296 static int xlnx_uartlite_init(const struct device *dev)
297 {
298 	const struct xlnx_uartlite_config *config = dev->config;
299 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
300 	struct xlnx_uartlite_data *data = dev->data;
301 
302 	data->dev = dev;
303 	k_timer_init(&data->timer, &xlnx_uartlite_tx_soft_isr, NULL);
304 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
305 
306 	/* Reset FIFOs and disable interrupts */
307 	sys_write32(CTRL_REG_RST_RX_FIFO | CTRL_REG_RST_TX_FIFO,
308 		    config->base + CTRL_REG_OFFSET);
309 
310 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
311 	config->irq_config_func(dev);
312 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
313 
314 	return 0;
315 }
316 
317 static const struct uart_driver_api xlnx_uartlite_driver_api = {
318 	.poll_in = xlnx_uartlite_poll_in,
319 	.poll_out = xlnx_uartlite_poll_out,
320 	.err_check = xlnx_uartlite_err_check,
321 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
322 	.fifo_fill = xlnx_uartlite_fifo_fill,
323 	.fifo_read = xlnx_uartlite_fifo_read,
324 	.irq_tx_enable = xlnx_uartlite_irq_tx_enable,
325 	.irq_tx_disable = xlnx_uartlite_irq_tx_disable,
326 	.irq_tx_ready = xlnx_uartlite_irq_tx_ready,
327 	.irq_tx_complete = xlnx_uartlite_irq_tx_complete,
328 	.irq_rx_enable = xlnx_uartlite_irq_rx_enable,
329 	.irq_rx_disable = xlnx_uartlite_irq_rx_disable,
330 	.irq_rx_ready = xlnx_uartlite_irq_rx_ready,
331 	.irq_is_pending = xlnx_uartlite_irq_is_pending,
332 	.irq_update = xlnx_uartlite_irq_update,
333 	.irq_callback_set = xlnx_uartlite_irq_callback_set,
334 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
335 };
336 
337 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
338 #define XLNX_UARTLITE_IRQ_INIT(n, i)					\
339 	do {								\
340 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, i, irq),		\
341 			    DT_INST_IRQ_BY_IDX(n, i, priority),		\
342 			    xlnx_uartlite_isr,				\
343 			    DEVICE_DT_INST_GET(n), 0);			\
344 									\
345 		irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq));		\
346 	} while (0)
347 #define XLNX_UARTLITE_CONFIG_FUNC(n)					\
348 	static void xlnx_uartlite_config_func_##n(const struct device *dev) \
349 	{								\
350 		/* IRQ line not always present on all instances */	\
351 		IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0),			\
352 			   (XLNX_UARTLITE_IRQ_INIT(n, 0);))		\
353 	}
354 #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n)				\
355 	.irq_config_func = xlnx_uartlite_config_func_##n
356 #define XLNX_UARTLITE_INIT_CFG(n)					\
357 	XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n))
358 #else
359 #define XLNX_UARTLITE_CONFIG_FUNC(n)
360 #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT
361 #define XLNX_UARTLITE_INIT_CFG(n)					\
362 	XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT)
363 #endif
364 
365 #define XLNX_UARTLITE_DECLARE_CFG(n, IRQ_FUNC_INIT)			\
366 static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config = {	\
367 	.base = DT_INST_REG_ADDR(n),					\
368 	IRQ_FUNC_INIT							\
369 }
370 
371 #define XLNX_UARTLITE_INIT(n)						\
372 	static struct xlnx_uartlite_data xlnx_uartlite_##n##_data;	\
373 									\
374 	static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config;\
375 									\
376 	DEVICE_DT_INST_DEFINE(n,					\
377 			    &xlnx_uartlite_init,			\
378 			    NULL,					\
379 			    &xlnx_uartlite_##n##_data,			\
380 			    &xlnx_uartlite_##n##_config,		\
381 			    PRE_KERNEL_1,				\
382 			    CONFIG_SERIAL_INIT_PRIORITY,		\
383 			    &xlnx_uartlite_driver_api);			\
384 									\
385 	XLNX_UARTLITE_CONFIG_FUNC(n)					\
386 									\
387 	XLNX_UARTLITE_INIT_CFG(n);
388 
389 DT_INST_FOREACH_STATUS_OKAY(XLNX_UARTLITE_INIT)
390