1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/interrupt.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/of_irq.h>
19 #include <linux/spinlock.h>
20 #include <linux/syscore_ops.h>
21 #include <linux/irq.h>
22
23 #include <asm/i8259.h>
24 #include <asm/io.h>
25
26 /*
27 * This is the 'legacy' 8259A Programmable Interrupt Controller,
28 * present in the majority of PC/AT boxes.
29 * plus some generic x86 specific things if generic specifics makes
30 * any sense at all.
31 * this file should become arch/i386/kernel/irq.c when the old irq.c
32 * moves to arch independent land
33 */
34
35 static int i8259A_auto_eoi = -1;
36 DEFINE_RAW_SPINLOCK(i8259A_lock);
37 static void disable_8259A_irq(struct irq_data *d);
38 static void enable_8259A_irq(struct irq_data *d);
39 static void mask_and_ack_8259A(struct irq_data *d);
40 static void init_8259A(int auto_eoi);
41 static int (*i8259_poll)(void) = i8259_irq;
42
43 static struct irq_chip i8259A_chip = {
44 .name = "XT-PIC",
45 .irq_mask = disable_8259A_irq,
46 .irq_disable = disable_8259A_irq,
47 .irq_unmask = enable_8259A_irq,
48 .irq_mask_ack = mask_and_ack_8259A,
49 };
50
51 /*
52 * 8259A PIC functions to handle ISA devices:
53 */
54
i8259_set_poll(int (* poll)(void))55 void i8259_set_poll(int (*poll)(void))
56 {
57 i8259_poll = poll;
58 }
59
60 /*
61 * This contains the irq mask for both 8259A irq controllers,
62 */
63 static unsigned int cached_irq_mask = 0xffff;
64
65 #define cached_master_mask (cached_irq_mask)
66 #define cached_slave_mask (cached_irq_mask >> 8)
67
disable_8259A_irq(struct irq_data * d)68 static void disable_8259A_irq(struct irq_data *d)
69 {
70 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
71 unsigned long flags;
72
73 mask = 1 << irq;
74 raw_spin_lock_irqsave(&i8259A_lock, flags);
75 cached_irq_mask |= mask;
76 if (irq & 8)
77 outb(cached_slave_mask, PIC_SLAVE_IMR);
78 else
79 outb(cached_master_mask, PIC_MASTER_IMR);
80 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
81 }
82
enable_8259A_irq(struct irq_data * d)83 static void enable_8259A_irq(struct irq_data *d)
84 {
85 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
86 unsigned long flags;
87
88 mask = ~(1 << irq);
89 raw_spin_lock_irqsave(&i8259A_lock, flags);
90 cached_irq_mask &= mask;
91 if (irq & 8)
92 outb(cached_slave_mask, PIC_SLAVE_IMR);
93 else
94 outb(cached_master_mask, PIC_MASTER_IMR);
95 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
96 }
97
make_8259A_irq(unsigned int irq)98 void make_8259A_irq(unsigned int irq)
99 {
100 disable_irq_nosync(irq);
101 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
102 enable_irq(irq);
103 }
104
105 /*
106 * This function assumes to be called rarely. Switching between
107 * 8259A registers is slow.
108 * This has to be protected by the irq controller spinlock
109 * before being called.
110 */
i8259A_irq_real(unsigned int irq)111 static inline int i8259A_irq_real(unsigned int irq)
112 {
113 int value;
114 int irqmask = 1 << irq;
115
116 if (irq < 8) {
117 outb(0x0B, PIC_MASTER_CMD); /* ISR register */
118 value = inb(PIC_MASTER_CMD) & irqmask;
119 outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
120 return value;
121 }
122 outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
123 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
124 outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
125 return value;
126 }
127
128 /*
129 * Careful! The 8259A is a fragile beast, it pretty
130 * much _has_ to be done exactly like this (mask it
131 * first, _then_ send the EOI, and the order of EOI
132 * to the two 8259s is important!
133 */
mask_and_ack_8259A(struct irq_data * d)134 static void mask_and_ack_8259A(struct irq_data *d)
135 {
136 unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
137 unsigned long flags;
138
139 irqmask = 1 << irq;
140 raw_spin_lock_irqsave(&i8259A_lock, flags);
141 /*
142 * Lightweight spurious IRQ detection. We do not want
143 * to overdo spurious IRQ handling - it's usually a sign
144 * of hardware problems, so we only do the checks we can
145 * do without slowing down good hardware unnecessarily.
146 *
147 * Note that IRQ7 and IRQ15 (the two spurious IRQs
148 * usually resulting from the 8259A-1|2 PICs) occur
149 * even if the IRQ is masked in the 8259A. Thus we
150 * can check spurious 8259A IRQs without doing the
151 * quite slow i8259A_irq_real() call for every IRQ.
152 * This does not cover 100% of spurious interrupts,
153 * but should be enough to warn the user that there
154 * is something bad going on ...
155 */
156 if (cached_irq_mask & irqmask)
157 goto spurious_8259A_irq;
158 cached_irq_mask |= irqmask;
159
160 handle_real_irq:
161 if (irq & 8) {
162 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
163 outb(cached_slave_mask, PIC_SLAVE_IMR);
164 outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
165 outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
166 } else {
167 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
168 outb(cached_master_mask, PIC_MASTER_IMR);
169 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
170 }
171 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
172 return;
173
174 spurious_8259A_irq:
175 /*
176 * this is the slow path - should happen rarely.
177 */
178 if (i8259A_irq_real(irq))
179 /*
180 * oops, the IRQ _is_ in service according to the
181 * 8259A - not spurious, go handle it.
182 */
183 goto handle_real_irq;
184
185 {
186 static int spurious_irq_mask;
187 /*
188 * At this point we can be sure the IRQ is spurious,
189 * lets ACK and report it. [once per IRQ]
190 */
191 if (!(spurious_irq_mask & irqmask)) {
192 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
193 spurious_irq_mask |= irqmask;
194 }
195 atomic_inc(&irq_err_count);
196 /*
197 * Theoretically we do not have to handle this IRQ,
198 * but in Linux this does not cause problems and is
199 * simpler for us.
200 */
201 goto handle_real_irq;
202 }
203 }
204
i8259A_resume(void)205 static void i8259A_resume(void)
206 {
207 if (i8259A_auto_eoi >= 0)
208 init_8259A(i8259A_auto_eoi);
209 }
210
i8259A_shutdown(void)211 static void i8259A_shutdown(void)
212 {
213 /* Put the i8259A into a quiescent state that
214 * the kernel initialization code can get it
215 * out of.
216 */
217 if (i8259A_auto_eoi >= 0) {
218 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
219 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
220 }
221 }
222
223 static struct syscore_ops i8259_syscore_ops = {
224 .resume = i8259A_resume,
225 .shutdown = i8259A_shutdown,
226 };
227
init_8259A(int auto_eoi)228 static void init_8259A(int auto_eoi)
229 {
230 unsigned long flags;
231
232 i8259A_auto_eoi = auto_eoi;
233
234 raw_spin_lock_irqsave(&i8259A_lock, flags);
235
236 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
237 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
238
239 /*
240 * outb_p - this has to work on a wide range of PC hardware.
241 */
242 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
243 outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
244 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
245 if (auto_eoi) /* master does Auto EOI */
246 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
247 else /* master expects normal EOI */
248 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
249
250 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
251 outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
252 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
253 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
254 if (auto_eoi)
255 /*
256 * In AEOI mode we just have to mask the interrupt
257 * when acking.
258 */
259 i8259A_chip.irq_mask_ack = disable_8259A_irq;
260 else
261 i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
262
263 udelay(100); /* wait for 8259A to initialize */
264
265 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
266 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
267
268 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
269 }
270
271 static struct resource pic1_io_resource = {
272 .name = "pic1",
273 .start = PIC_MASTER_CMD,
274 .end = PIC_MASTER_IMR,
275 .flags = IORESOURCE_IO | IORESOURCE_BUSY
276 };
277
278 static struct resource pic2_io_resource = {
279 .name = "pic2",
280 .start = PIC_SLAVE_CMD,
281 .end = PIC_SLAVE_IMR,
282 .flags = IORESOURCE_IO | IORESOURCE_BUSY
283 };
284
i8259A_irq_domain_map(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw)285 static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
286 irq_hw_number_t hw)
287 {
288 irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
289 irq_set_probe(virq);
290 return 0;
291 }
292
293 static const struct irq_domain_ops i8259A_ops = {
294 .map = i8259A_irq_domain_map,
295 .xlate = irq_domain_xlate_onecell,
296 };
297
298 /*
299 * On systems with i8259-style interrupt controllers we assume for
300 * driver compatibility reasons interrupts 0 - 15 to be the i8259
301 * interrupts even if the hardware uses a different interrupt numbering.
302 */
__init_i8259_irqs(struct device_node * node)303 struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
304 {
305 /*
306 * PIC_CASCADE_IR is cascade interrupt to second interrupt controller
307 */
308 int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
309 struct irq_domain *domain;
310
311 insert_resource(&ioport_resource, &pic1_io_resource);
312 insert_resource(&ioport_resource, &pic2_io_resource);
313
314 init_8259A(0);
315
316 domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
317 &i8259A_ops, NULL);
318 if (!domain)
319 panic("Failed to add i8259 IRQ domain");
320
321 if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
322 pr_err("Failed to register cascade interrupt\n");
323 register_syscore_ops(&i8259_syscore_ops);
324 return domain;
325 }
326
init_i8259_irqs(void)327 void __init init_i8259_irqs(void)
328 {
329 __init_i8259_irqs(NULL);
330 }
331
i8259_irq_dispatch(struct irq_desc * desc)332 static void i8259_irq_dispatch(struct irq_desc *desc)
333 {
334 struct irq_domain *domain = irq_desc_get_handler_data(desc);
335 int hwirq = i8259_poll();
336 unsigned int irq;
337
338 if (hwirq < 0)
339 return;
340
341 irq = irq_linear_revmap(domain, hwirq);
342 generic_handle_irq(irq);
343 }
344
i8259_of_init(struct device_node * node,struct device_node * parent)345 int __init i8259_of_init(struct device_node *node, struct device_node *parent)
346 {
347 struct irq_domain *domain;
348 unsigned int parent_irq;
349
350 domain = __init_i8259_irqs(node);
351
352 parent_irq = irq_of_parse_and_map(node, 0);
353 if (!parent_irq) {
354 pr_err("Failed to map i8259 parent IRQ\n");
355 irq_domain_remove(domain);
356 return -ENODEV;
357 }
358
359 irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
360 domain);
361 return 0;
362 }
363 IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
364