1 /*
2 * Copyright (c) 2020 Henrik Brix Andersen <henrik@brixandersen.dk>
3 *
4 * Based on uart_mcux_lpuart.c, which is:
5 * Copyright (c) 2017, NXP
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #define DT_DRV_COMPAT xlnx_xps_uartlite_1_00_a
11
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/uart.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/sys_io.h>
17
18 /* AXI UART Lite v2 registers offsets (See Xilinx PG142 for details) */
19 #define RX_FIFO_OFFSET 0x00
20 #define TX_FIFO_OFFSET 0x04
21 #define STAT_REG_OFFSET 0x08
22 #define CTRL_REG_OFFSET 0x0c
23
24 /* STAT_REG bit definitions */
25 #define STAT_REG_RX_FIFO_VALID_DATA BIT(0)
26 #define STAT_REG_RX_FIFO_FULL BIT(1)
27 #define STAT_REG_TX_FIFO_EMPTY BIT(2)
28 #define STAT_REG_TX_FIFO_FULL BIT(3)
29 #define STAT_REG_INTR_ENABLED BIT(4)
30 #define STAT_REG_OVERRUN_ERROR BIT(5)
31 #define STAT_REG_FRAME_ERROR BIT(6)
32 #define STAT_REG_PARITY_ERROR BIT(7)
33
34 /* STAT_REG bit masks */
35 #define STAT_REG_ERROR_MASK GENMASK(7, 5)
36
37 /* CTRL_REG bit definitions */
38 #define CTRL_REG_RST_TX_FIFO BIT(0)
39 #define CTRL_REG_RST_RX_FIFO BIT(1)
40 #define CTRL_REG_ENABLE_INTR BIT(4)
41
42 struct xlnx_uartlite_config {
43 mm_reg_t base;
44 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
45 void (*irq_config_func)(const struct device *dev);
46 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
47 };
48
49 struct xlnx_uartlite_data {
50 uint32_t errors;
51
52 /* spinlocks for RX and TX FIFO preventing a bus error */
53 struct k_spinlock rx_lock;
54 struct k_spinlock tx_lock;
55
56 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
57 const struct device *dev;
58 struct k_timer timer;
59 uart_irq_callback_user_data_t callback;
60 void *callback_data;
61
62 volatile uint8_t tx_irq_enabled : 1;
63 volatile uint8_t rx_irq_enabled : 1;
64 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
65 };
66
xlnx_uartlite_read_status(const struct device * dev)67 static inline uint32_t xlnx_uartlite_read_status(const struct device *dev)
68 {
69 const struct xlnx_uartlite_config *config = dev->config;
70 struct xlnx_uartlite_data *data = dev->data;
71 uint32_t status;
72
73 /* Cache errors as they are cleared by reading the STAT_REG */
74 status = sys_read32(config->base + STAT_REG_OFFSET);
75 data->errors &= (status & STAT_REG_ERROR_MASK);
76
77 /* Return current status and previously cached errors */
78 return status | data->errors;
79 }
80
xlnx_uartlite_clear_status(const struct device * dev)81 static inline void xlnx_uartlite_clear_status(const struct device *dev)
82 {
83 struct xlnx_uartlite_data *data = dev->data;
84
85 /* Clear cached errors */
86 data->errors = 0;
87 }
88
xlnx_uartlite_read_rx_fifo(const struct device * dev)89 static inline unsigned char xlnx_uartlite_read_rx_fifo(const struct device *dev)
90 {
91 const struct xlnx_uartlite_config *config = dev->config;
92
93 return (sys_read32(config->base + RX_FIFO_OFFSET) & BIT_MASK(8));
94 }
95
xlnx_uartlite_write_tx_fifo(const struct device * dev,unsigned char c)96 static inline void xlnx_uartlite_write_tx_fifo(const struct device *dev,
97 unsigned char c)
98 {
99 const struct xlnx_uartlite_config *config = dev->config;
100
101 sys_write32((uint32_t)c, config->base + TX_FIFO_OFFSET);
102 }
103
xlnx_uartlite_poll_in(const struct device * dev,unsigned char * c)104 static int xlnx_uartlite_poll_in(const struct device *dev, unsigned char *c)
105 {
106 uint32_t status;
107 k_spinlock_key_t key;
108 struct xlnx_uartlite_data *data = dev->data;
109 int ret = -1;
110
111 key = k_spin_lock(&data->rx_lock);
112 status = xlnx_uartlite_read_status(dev);
113 if ((status & STAT_REG_RX_FIFO_VALID_DATA) != 0) {
114 *c = xlnx_uartlite_read_rx_fifo(dev);
115 ret = 0;
116 }
117 k_spin_unlock(&data->rx_lock, key);
118
119 return ret;
120 }
121
xlnx_uartlite_poll_out(const struct device * dev,unsigned char c)122 static void xlnx_uartlite_poll_out(const struct device *dev, unsigned char c)
123 {
124 uint32_t status;
125 k_spinlock_key_t key;
126 struct xlnx_uartlite_data *data = dev->data;
127 bool done = false;
128
129 while (!done) {
130 key = k_spin_lock(&data->tx_lock);
131 status = xlnx_uartlite_read_status(dev);
132 if ((status & STAT_REG_TX_FIFO_FULL) == 0) {
133 xlnx_uartlite_write_tx_fifo(dev, c);
134 done = true;
135 }
136 k_spin_unlock(&data->tx_lock, key);
137 }
138 }
139
xlnx_uartlite_err_check(const struct device * dev)140 static int xlnx_uartlite_err_check(const struct device *dev)
141 {
142 uint32_t status = xlnx_uartlite_read_status(dev);
143 int err = 0;
144
145 if (status & STAT_REG_OVERRUN_ERROR) {
146 err |= UART_ERROR_OVERRUN;
147 }
148
149 if (status & STAT_REG_PARITY_ERROR) {
150 err |= UART_ERROR_PARITY;
151 }
152
153 if (status & STAT_REG_FRAME_ERROR) {
154 err |= UART_ERROR_FRAMING;
155 }
156
157 xlnx_uartlite_clear_status(dev);
158
159 return err;
160 }
161
162 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
xlnx_uartlite_irq_enable(const struct device * dev)163 static inline void xlnx_uartlite_irq_enable(const struct device *dev)
164 {
165 const struct xlnx_uartlite_config *config = dev->config;
166
167 sys_write32(CTRL_REG_ENABLE_INTR, config->base + CTRL_REG_OFFSET);
168 }
169
xlnx_uartlite_irq_cond_disable(const struct device * dev)170 static inline void xlnx_uartlite_irq_cond_disable(const struct device *dev)
171 {
172 const struct xlnx_uartlite_config *config = dev->config;
173 struct xlnx_uartlite_data *data = dev->data;
174
175 /* TX and RX IRQs are shared. Only disable if both are disabled. */
176 if (!data->tx_irq_enabled && !data->rx_irq_enabled) {
177 sys_write32(0, config->base + CTRL_REG_OFFSET);
178 }
179 }
180
xlnx_uartlite_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)181 static int xlnx_uartlite_fifo_fill(const struct device *dev,
182 const uint8_t *tx_data,
183 int len)
184 {
185 uint32_t status;
186 k_spinlock_key_t key;
187 struct xlnx_uartlite_data *data = dev->data;
188 int count = 0U;
189
190 while (len - count > 0) {
191 key = k_spin_lock(&data->tx_lock);
192 status = xlnx_uartlite_read_status(dev);
193 if ((status & STAT_REG_TX_FIFO_FULL) == 0U) {
194 xlnx_uartlite_write_tx_fifo(dev, tx_data[count++]);
195 }
196 k_spin_unlock(&data->tx_lock, key);
197 }
198
199 return count;
200 }
201
xlnx_uartlite_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)202 static int xlnx_uartlite_fifo_read(const struct device *dev, uint8_t *rx_data,
203 const int len)
204 {
205 uint32_t status;
206 k_spinlock_key_t key;
207 struct xlnx_uartlite_data *data = dev->data;
208 int count = 0U;
209
210 while ((len - count) > 0) {
211 key = k_spin_lock(&data->rx_lock);
212 status = xlnx_uartlite_read_status(dev);
213 if ((status & STAT_REG_RX_FIFO_VALID_DATA) != 0) {
214 rx_data[count++] = xlnx_uartlite_read_rx_fifo(dev);
215 }
216 k_spin_unlock(&data->rx_lock, key);
217 if (!(status & STAT_REG_RX_FIFO_VALID_DATA)) {
218 break;
219 }
220 }
221
222 return count;
223 }
224
xlnx_uartlite_tx_soft_isr(struct k_timer * timer)225 static void xlnx_uartlite_tx_soft_isr(struct k_timer *timer)
226 {
227 struct xlnx_uartlite_data *data =
228 CONTAINER_OF(timer, struct xlnx_uartlite_data, timer);
229
230 if (data->callback) {
231 data->callback(data->dev, data->callback_data);
232 }
233 }
234
xlnx_uartlite_irq_tx_enable(const struct device * dev)235 static void xlnx_uartlite_irq_tx_enable(const struct device *dev)
236 {
237 struct xlnx_uartlite_data *data = dev->data;
238 uint32_t status;
239
240 data->tx_irq_enabled = true;
241 status = xlnx_uartlite_read_status(dev);
242 xlnx_uartlite_irq_enable(dev);
243
244 if ((status & STAT_REG_TX_FIFO_EMPTY) && data->callback) {
245 /*
246 * TX_FIFO_EMPTY event already generated an edge
247 * interrupt. Generate a soft interrupt and have it call the
248 * callback function in timer isr context.
249 */
250 k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT);
251 }
252 }
253
xlnx_uartlite_irq_tx_disable(const struct device * dev)254 static void xlnx_uartlite_irq_tx_disable(const struct device *dev)
255 {
256 struct xlnx_uartlite_data *data = dev->data;
257
258 data->tx_irq_enabled = false;
259 xlnx_uartlite_irq_cond_disable(dev);
260 }
261
xlnx_uartlite_irq_tx_ready(const struct device * dev)262 static int xlnx_uartlite_irq_tx_ready(const struct device *dev)
263 {
264 struct xlnx_uartlite_data *data = dev->data;
265 uint32_t status = xlnx_uartlite_read_status(dev);
266
267 return (((status & STAT_REG_TX_FIFO_FULL) == 0U) &&
268 data->tx_irq_enabled);
269 }
270
xlnx_uartlite_irq_tx_complete(const struct device * dev)271 static int xlnx_uartlite_irq_tx_complete(const struct device *dev)
272 {
273 uint32_t status = xlnx_uartlite_read_status(dev);
274
275 return (status & STAT_REG_TX_FIFO_EMPTY);
276 }
277
xlnx_uartlite_irq_rx_enable(const struct device * dev)278 static void xlnx_uartlite_irq_rx_enable(const struct device *dev)
279 {
280 struct xlnx_uartlite_data *data = dev->data;
281
282 data->rx_irq_enabled = true;
283 /* RX_FIFO_VALID_DATA generates a level interrupt */
284 xlnx_uartlite_irq_enable(dev);
285 }
286
xlnx_uartlite_irq_rx_disable(const struct device * dev)287 static void xlnx_uartlite_irq_rx_disable(const struct device *dev)
288 {
289 struct xlnx_uartlite_data *data = dev->data;
290
291 data->rx_irq_enabled = false;
292 xlnx_uartlite_irq_cond_disable(dev);
293 }
294
xlnx_uartlite_irq_rx_ready(const struct device * dev)295 static int xlnx_uartlite_irq_rx_ready(const struct device *dev)
296 {
297 struct xlnx_uartlite_data *data = dev->data;
298 uint32_t status = xlnx_uartlite_read_status(dev);
299
300 return ((status & STAT_REG_RX_FIFO_VALID_DATA) &&
301 data->rx_irq_enabled);
302 }
303
xlnx_uartlite_irq_is_pending(const struct device * dev)304 static int xlnx_uartlite_irq_is_pending(const struct device *dev)
305 {
306 return (xlnx_uartlite_irq_tx_ready(dev) ||
307 xlnx_uartlite_irq_rx_ready(dev));
308 }
309
xlnx_uartlite_irq_update(const struct device * dev)310 static int xlnx_uartlite_irq_update(const struct device *dev)
311 {
312 return 1;
313 }
314
xlnx_uartlite_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)315 static void xlnx_uartlite_irq_callback_set(const struct device *dev,
316 uart_irq_callback_user_data_t cb,
317 void *user_data)
318 {
319 struct xlnx_uartlite_data *data = dev->data;
320
321 data->callback = cb;
322 data->callback_data = user_data;
323 }
324
xlnx_uartlite_isr(const struct device * dev)325 static __unused void xlnx_uartlite_isr(const struct device *dev)
326 {
327 struct xlnx_uartlite_data *data = dev->data;
328
329 if (data->callback) {
330 data->callback(dev, data->callback_data);
331 }
332 }
333
334 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
335
xlnx_uartlite_init(const struct device * dev)336 static int xlnx_uartlite_init(const struct device *dev)
337 {
338 const struct xlnx_uartlite_config *config = dev->config;
339 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
340 struct xlnx_uartlite_data *data = dev->data;
341
342 data->dev = dev;
343 k_timer_init(&data->timer, &xlnx_uartlite_tx_soft_isr, NULL);
344 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
345
346 /* Reset FIFOs and disable interrupts */
347 sys_write32(CTRL_REG_RST_RX_FIFO | CTRL_REG_RST_TX_FIFO,
348 config->base + CTRL_REG_OFFSET);
349
350 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
351 config->irq_config_func(dev);
352 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
353
354 return 0;
355 }
356
357 static const struct uart_driver_api xlnx_uartlite_driver_api = {
358 .poll_in = xlnx_uartlite_poll_in,
359 .poll_out = xlnx_uartlite_poll_out,
360 .err_check = xlnx_uartlite_err_check,
361 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
362 .fifo_fill = xlnx_uartlite_fifo_fill,
363 .fifo_read = xlnx_uartlite_fifo_read,
364 .irq_tx_enable = xlnx_uartlite_irq_tx_enable,
365 .irq_tx_disable = xlnx_uartlite_irq_tx_disable,
366 .irq_tx_ready = xlnx_uartlite_irq_tx_ready,
367 .irq_tx_complete = xlnx_uartlite_irq_tx_complete,
368 .irq_rx_enable = xlnx_uartlite_irq_rx_enable,
369 .irq_rx_disable = xlnx_uartlite_irq_rx_disable,
370 .irq_rx_ready = xlnx_uartlite_irq_rx_ready,
371 .irq_is_pending = xlnx_uartlite_irq_is_pending,
372 .irq_update = xlnx_uartlite_irq_update,
373 .irq_callback_set = xlnx_uartlite_irq_callback_set,
374 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
375 };
376
377 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
378 #define XLNX_UARTLITE_IRQ_INIT(n, i) \
379 do { \
380 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, i, irq), \
381 DT_INST_IRQ_BY_IDX(n, i, priority), \
382 xlnx_uartlite_isr, \
383 DEVICE_DT_INST_GET(n), 0); \
384 \
385 irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \
386 } while (false)
387 #define XLNX_UARTLITE_CONFIG_FUNC(n) \
388 static void xlnx_uartlite_config_func_##n(const struct device *dev) \
389 { \
390 /* IRQ line not always present on all instances */ \
391 IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \
392 (XLNX_UARTLITE_IRQ_INIT(n, 0);)) \
393 }
394 #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n) \
395 .irq_config_func = xlnx_uartlite_config_func_##n
396 #define XLNX_UARTLITE_INIT_CFG(n) \
397 XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n))
398 #else
399 #define XLNX_UARTLITE_CONFIG_FUNC(n)
400 #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT
401 #define XLNX_UARTLITE_INIT_CFG(n) \
402 XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT)
403 #endif
404
405 #define XLNX_UARTLITE_DECLARE_CFG(n, IRQ_FUNC_INIT) \
406 static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config = { \
407 .base = DT_INST_REG_ADDR(n), \
408 IRQ_FUNC_INIT \
409 }
410
411 #define XLNX_UARTLITE_INIT(n) \
412 static struct xlnx_uartlite_data xlnx_uartlite_##n##_data; \
413 \
414 static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config;\
415 \
416 DEVICE_DT_INST_DEFINE(n, \
417 &xlnx_uartlite_init, \
418 NULL, \
419 &xlnx_uartlite_##n##_data, \
420 &xlnx_uartlite_##n##_config, \
421 PRE_KERNEL_1, \
422 CONFIG_SERIAL_INIT_PRIORITY, \
423 &xlnx_uartlite_driver_api); \
424 \
425 XLNX_UARTLITE_CONFIG_FUNC(n) \
426 \
427 XLNX_UARTLITE_INIT_CFG(n);
428
429 DT_INST_FOREACH_STATUS_OKAY(XLNX_UARTLITE_INIT)
430