1 /*
2  * Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 /**
8  * @file	linux/irq.c
9  * @brief	Linux libmetal irq operations
10  */
11 
12 #include <pthread.h>
13 #include <sched.h>
14 #include <metal/device.h>
15 #include <metal/irq.h>
16 #include <metal/irq_controller.h>
17 #include <metal/sys.h>
18 #include <metal/mutex.h>
19 #include <metal/list.h>
20 #include <metal/utilities.h>
21 #include <metal/alloc.h>
22 #include <sys/time.h>
23 #include <sys/eventfd.h>
24 #include <sched.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 #include <string.h>
28 #include <poll.h>
29 #include <unistd.h>
30 
31 #define MAX_IRQS	(FD_SETSIZE - 1)  /**< maximum number of irqs */
32 
33 static struct metal_device *irqs_devs[MAX_IRQS]; /**< Linux devices for IRQs */
34 static int irq_notify_fd; /**< irq handling state change notification file
35 			    *   descriptor
36 			    */
37 static metal_mutex_t irq_lock; /**< irq handling lock */
38 
39 static bool irq_handling_stop; /**< stop interrupts handling */
40 
41 static pthread_t irq_pthread; /**< irq handling thread id */
42 
43 /**< Indicate which IRQ is enabled */
44 static unsigned long
45 irqs_enabled[metal_div_round_up(MAX_IRQS, METAL_BITS_PER_ULONG)];
46 
47 static struct metal_irq irqs[MAX_IRQS]; /**< Linux IRQs array */
48 
49 /* Static functions */
50 static void metal_linux_irq_set_enable(struct metal_irq_controller *irq_cntr,
51 				       int irq, unsigned int state);
52 
53 /**< Linux IRQ controller */
54 static METAL_IRQ_CONTROLLER_DECLARE(linux_irq_cntr,
55 				    0, MAX_IRQS,
56 				    NULL,
57 				    metal_linux_irq_set_enable, NULL,
58 				    irqs);
59 
metal_irq_save_disable(void)60 unsigned int metal_irq_save_disable(void)
61 {
62 	/* This is to avoid deadlock if it is called in ISR */
63 	if (pthread_self() == irq_pthread)
64 		return 0;
65 	metal_mutex_acquire(&irq_lock);
66 	return 0;
67 }
68 
metal_irq_restore_enable(unsigned int flags)69 void metal_irq_restore_enable(unsigned int flags)
70 {
71 	(void)flags;
72 	if (pthread_self() != irq_pthread)
73 		metal_mutex_release(&irq_lock);
74 }
75 
metal_linux_irq_notify(void)76 static int metal_linux_irq_notify(void)
77 {
78 	uint64_t val = 1;
79 	int ret;
80 
81 	ret = write(irq_notify_fd, &val, sizeof(val));
82 	if (ret < 0) {
83 		metal_log(METAL_LOG_ERROR, "%s failed\n", __func__);
84 	}
85 	return ret;
86 }
87 
metal_linux_irq_set_enable(struct metal_irq_controller * irq_cntr,int irq,unsigned int state)88 static void metal_linux_irq_set_enable(struct metal_irq_controller *irq_cntr,
89 				       int irq, unsigned int state)
90 {
91 	int offset, ret;
92 
93 	if (irq < irq_cntr->irq_base ||
94 	    irq >= irq_cntr->irq_base + irq_cntr->irq_num) {
95 		metal_log(METAL_LOG_ERROR, "%s: invalid irq %d\n",
96 			  __func__, irq);
97 		return;
98 	}
99 	offset = irq - linux_irq_cntr.irq_base;
100 	metal_mutex_acquire(&irq_lock);
101 	if (state == METAL_IRQ_ENABLE)
102 		metal_bitmap_set_bit(irqs_enabled, offset);
103 	else
104 		metal_bitmap_clear_bit(irqs_enabled, offset);
105 	metal_mutex_release(&irq_lock);
106 	/* Notify IRQ thread that IRQ state has changed */
107 	ret = metal_linux_irq_notify();
108 	if (ret < 0) {
109 		metal_log(METAL_LOG_ERROR,
110 			  "%s: failed to notify set %d enable\n",
111 			  __func__, irq);
112 	}
113 }
114 
115 /**
116  * @brief       IRQ handler
117  * @param[in]   args  not used. required for pthread.
118  */
metal_linux_irq_handling(void * args)119 static void *metal_linux_irq_handling(void *args)
120 {
121 	struct sched_param param;
122 	uint64_t val;
123 	int ret;
124 	int i, j, pfds_total;
125 	struct pollfd *pfds;
126 
127 	(void)args;
128 
129 	pfds = (struct pollfd *)malloc(FD_SETSIZE * sizeof(struct pollfd));
130 	if (!pfds) {
131 		metal_log(METAL_LOG_ERROR,
132 			  "%s: failed to allocate irq fds mem.\n", __func__);
133 		return NULL;
134 	}
135 
136 	param.sched_priority = sched_get_priority_max(SCHED_FIFO);
137 	/* Ignore the set scheduler error */
138 	ret = sched_setscheduler(0, SCHED_FIFO, &param);
139 	if (ret) {
140 		metal_log(METAL_LOG_WARNING,
141 			  "%s: Failed to set scheduler: %s.\n", __func__,
142 			  strerror(ret));
143 	}
144 
145 	while (1) {
146 		metal_mutex_acquire(&irq_lock);
147 		if (irq_handling_stop) {
148 			/* Killing this IRQ handling thread */
149 			metal_mutex_release(&irq_lock);
150 			break;
151 		}
152 
153 		/* Get the fdset */
154 		memset(pfds, 0, MAX_IRQS * sizeof(struct pollfd));
155 		pfds[0].fd = irq_notify_fd;
156 		pfds[0].events = POLLIN;
157 		j = 1;
158 		metal_bitmap_for_each_set_bit(irqs_enabled, i,
159 					      linux_irq_cntr.irq_num) {
160 			pfds[j].fd = i;
161 			pfds[j].events = POLLIN;
162 			j++;
163 		}
164 		metal_mutex_release(&irq_lock);
165 		/* Wait for interrupt */
166 		ret = poll(pfds, j, -1);
167 		if (ret < 0) {
168 			metal_log(METAL_LOG_ERROR, "%s: poll() failed: %s.\n",
169 				  __func__, strerror(errno));
170 			break;
171 		}
172 		/* Waken up from interrupt */
173 		pfds_total = j;
174 		for (i = 0; i < pfds_total; i++) {
175 			if ((pfds[i].fd == irq_notify_fd) &&
176 			    (pfds[i].revents & (POLLIN | POLLRDNORM))) {
177 				/* IRQ registration change notification */
178 				if (read(pfds[i].fd,
179 					 (void *)&val, sizeof(uint64_t)) < 0)
180 					metal_log(METAL_LOG_ERROR,
181 						  "%s, read irq fd %d failed\n",
182 						  __func__, pfds[i].fd);
183 			} else if ((pfds[i].revents & (POLLIN | POLLRDNORM))) {
184 				struct metal_device *dev = NULL;
185 				int irq_handled = 0;
186 				int fd;
187 
188 				fd = pfds[i].fd;
189 				dev = irqs_devs[fd];
190 				metal_mutex_acquire(&irq_lock);
191 				if (metal_irq_handle(&irqs[fd], fd)
192 				    == METAL_IRQ_HANDLED)
193 					irq_handled = 1;
194 				if (irq_handled) {
195 					if (dev && dev->bus->ops.dev_irq_ack)
196 						dev->bus->ops.dev_irq_ack(
197 							dev->bus, dev, fd);
198 				}
199 				metal_mutex_release(&irq_lock);
200 			} else if (pfds[i].revents) {
201 				metal_log(METAL_LOG_DEBUG,
202 					  "%s: poll unexpected. fd %d: %d\n",
203 					  __func__,
204 					  pfds[i].fd, pfds[i].revents);
205 			}
206 		}
207 	}
208 	free(pfds);
209 	return NULL;
210 }
211 
212 /**
213  * @brief irq handling initialization
214  * @return 0 on success, non-zero on failure
215  */
metal_linux_irq_init(void)216 int metal_linux_irq_init(void)
217 {
218 	int ret;
219 
220 	memset(&irqs, 0, sizeof(irqs));
221 
222 	irq_notify_fd = eventfd(0, EFD_CLOEXEC);
223 	if (irq_notify_fd < 0) {
224 		metal_log(METAL_LOG_ERROR,
225 			  "Failed to create eventfd for IRQ handling.\n");
226 		return  -EAGAIN;
227 	}
228 
229 	metal_mutex_init(&irq_lock);
230 	irq_handling_stop = false;
231 	ret = metal_irq_register_controller(&linux_irq_cntr);
232 	if (ret < 0) {
233 		metal_log(METAL_LOG_ERROR,
234 			  "Linux IRQ controller failed to register.\n");
235 		return -EINVAL;
236 	}
237 	ret = pthread_create(&irq_pthread, NULL,
238 			     metal_linux_irq_handling, NULL);
239 	if (ret != 0) {
240 		metal_log(METAL_LOG_ERROR, "Failed to create IRQ thread: %d.\n",
241 			  ret);
242 		return -EAGAIN;
243 	}
244 
245 	return 0;
246 }
247 
248 /**
249  * @brief irq handling shutdown
250  */
metal_linux_irq_shutdown(void)251 void metal_linux_irq_shutdown(void)
252 {
253 	int ret;
254 
255 	metal_log(METAL_LOG_DEBUG, "%s\n", __func__);
256 	irq_handling_stop = true;
257 	metal_linux_irq_notify();
258 	ret = pthread_join(irq_pthread, NULL);
259 	if (ret) {
260 		metal_log(METAL_LOG_ERROR, "Failed to join IRQ thread: %d.\n",
261 			  ret);
262 	}
263 	close(irq_notify_fd);
264 	metal_mutex_deinit(&irq_lock);
265 }
266 
metal_linux_irq_register_dev(struct metal_device * dev,int irq)267 void metal_linux_irq_register_dev(struct metal_device *dev, int irq)
268 {
269 	if (irq > MAX_IRQS) {
270 		metal_log(METAL_LOG_ERROR,
271 			  "Failed to register device to irq %d\n", irq);
272 		return;
273 	}
274 	irqs_devs[irq] = dev;
275 }
276