1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Local APIC related interfaces to support IOAPIC, MSI, etc.
4 *
5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Moved from arch/x86/kernel/apic/io_apic.c.
7 * Jiang Liu <jiang.liu@linux.intel.com>
8 * Enable support of hierarchical irqdomains
9 */
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/seq_file.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <asm/irqdomain.h>
17 #include <asm/hw_irq.h>
18 #include <asm/traps.h>
19 #include <asm/apic.h>
20 #include <asm/i8259.h>
21 #include <asm/desc.h>
22 #include <asm/irq_remapping.h>
23
24 #include <asm/trace/irq_vectors.h>
25
26 struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
28 unsigned int vector;
29 unsigned int prev_vector;
30 unsigned int cpu;
31 unsigned int prev_cpu;
32 unsigned int irq;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
35 is_managed : 1,
36 can_reserve : 1,
37 has_reserved : 1;
38 };
39
40 struct irq_domain *x86_vector_domain;
41 EXPORT_SYMBOL_GPL(x86_vector_domain);
42 static DEFINE_RAW_SPINLOCK(vector_lock);
43 static cpumask_var_t vector_searchmask;
44 static struct irq_chip lapic_controller;
45 static struct irq_matrix *vector_matrix;
46 #ifdef CONFIG_SMP
47 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
48 #endif
49
lock_vector_lock(void)50 void lock_vector_lock(void)
51 {
52 /* Used to the online set of cpus does not change
53 * during assign_irq_vector.
54 */
55 raw_spin_lock(&vector_lock);
56 }
57
unlock_vector_lock(void)58 void unlock_vector_lock(void)
59 {
60 raw_spin_unlock(&vector_lock);
61 }
62
init_irq_alloc_info(struct irq_alloc_info * info,const struct cpumask * mask)63 void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
65 {
66 memset(info, 0, sizeof(*info));
67 info->mask = mask;
68 }
69
copy_irq_alloc_info(struct irq_alloc_info * dst,struct irq_alloc_info * src)70 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
71 {
72 if (src)
73 *dst = *src;
74 else
75 memset(dst, 0, sizeof(*dst));
76 }
77
apic_chip_data(struct irq_data * irqd)78 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
79 {
80 if (!irqd)
81 return NULL;
82
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
85
86 return irqd->chip_data;
87 }
88
irqd_cfg(struct irq_data * irqd)89 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
90 {
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
92
93 return apicd ? &apicd->hw_irq_cfg : NULL;
94 }
95 EXPORT_SYMBOL_GPL(irqd_cfg);
96
irq_cfg(unsigned int irq)97 struct irq_cfg *irq_cfg(unsigned int irq)
98 {
99 return irqd_cfg(irq_get_irq_data(irq));
100 }
101
alloc_apic_chip_data(int node)102 static struct apic_chip_data *alloc_apic_chip_data(int node)
103 {
104 struct apic_chip_data *apicd;
105
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
107 if (apicd)
108 INIT_HLIST_NODE(&apicd->clist);
109 return apicd;
110 }
111
free_apic_chip_data(struct apic_chip_data * apicd)112 static void free_apic_chip_data(struct apic_chip_data *apicd)
113 {
114 kfree(apicd);
115 }
116
apic_update_irq_cfg(struct irq_data * irqd,unsigned int vector,unsigned int cpu)117 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
118 unsigned int cpu)
119 {
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
121
122 lockdep_assert_held(&vector_lock);
123
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
129 }
130
apic_update_vector(struct irq_data * irqd,unsigned int newvec,unsigned int newcpu)131 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
132 unsigned int newcpu)
133 {
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143 /*
144 * If there is no vector associated or if the associated vector is
145 * the shutdown vector, which is associated to make PCI/MSI
146 * shutdown mode work, then there is nothing to release. Clear out
147 * prev_vector for this and the offlined target case.
148 */
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
151 goto setnew;
152 /*
153 * If the target CPU of the previous vector is online, then mark
154 * the vector as move in progress and store it for cleanup when the
155 * first interrupt on the new vector arrives. If the target CPU is
156 * offline then the regular release mechanism via the cleanup
157 * vector is not possible and the vector can be immediately freed
158 * in the underlying matrix allocator.
159 */
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
164 WARN_ON_ONCE(apicd->cpu == newcpu);
165 } else {
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
168 }
169
170 setnew:
171 apicd->vector = newvec;
172 apicd->cpu = newcpu;
173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
174 per_cpu(vector_irq, newcpu)[newvec] = desc;
175 }
176
vector_assign_managed_shutdown(struct irq_data * irqd)177 static void vector_assign_managed_shutdown(struct irq_data *irqd)
178 {
179 unsigned int cpu = cpumask_first(cpu_online_mask);
180
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
182 }
183
reserve_managed_vector(struct irq_data * irqd)184 static int reserve_managed_vector(struct irq_data *irqd)
185 {
186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
187 struct apic_chip_data *apicd = apic_chip_data(irqd);
188 unsigned long flags;
189 int ret;
190
191 raw_spin_lock_irqsave(&vector_lock, flags);
192 apicd->is_managed = true;
193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
194 raw_spin_unlock_irqrestore(&vector_lock, flags);
195 trace_vector_reserve_managed(irqd->irq, ret);
196 return ret;
197 }
198
reserve_irq_vector_locked(struct irq_data * irqd)199 static void reserve_irq_vector_locked(struct irq_data *irqd)
200 {
201 struct apic_chip_data *apicd = apic_chip_data(irqd);
202
203 irq_matrix_reserve(vector_matrix);
204 apicd->can_reserve = true;
205 apicd->has_reserved = true;
206 irqd_set_can_reserve(irqd);
207 trace_vector_reserve(irqd->irq, 0);
208 vector_assign_managed_shutdown(irqd);
209 }
210
reserve_irq_vector(struct irq_data * irqd)211 static int reserve_irq_vector(struct irq_data *irqd)
212 {
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&vector_lock, flags);
216 reserve_irq_vector_locked(irqd);
217 raw_spin_unlock_irqrestore(&vector_lock, flags);
218 return 0;
219 }
220
221 static int
assign_vector_locked(struct irq_data * irqd,const struct cpumask * dest)222 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
223 {
224 struct apic_chip_data *apicd = apic_chip_data(irqd);
225 bool resvd = apicd->has_reserved;
226 unsigned int cpu = apicd->cpu;
227 int vector = apicd->vector;
228
229 lockdep_assert_held(&vector_lock);
230
231 /*
232 * If the current target CPU is online and in the new requested
233 * affinity mask, there is no point in moving the interrupt from
234 * one CPU to another.
235 */
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
237 return 0;
238
239 /*
240 * Careful here. @apicd might either have move_in_progress set or
241 * be enqueued for cleanup. Assigning a new vector would either
242 * leave a stale vector on some CPU around or in case of a pending
243 * cleanup corrupt the hlist.
244 */
245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
246 return -EBUSY;
247
248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
249 trace_vector_alloc(irqd->irq, vector, resvd, vector);
250 if (vector < 0)
251 return vector;
252 apic_update_vector(irqd, vector, cpu);
253 apic_update_irq_cfg(irqd, vector, cpu);
254
255 return 0;
256 }
257
assign_irq_vector(struct irq_data * irqd,const struct cpumask * dest)258 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
259 {
260 unsigned long flags;
261 int ret;
262
263 raw_spin_lock_irqsave(&vector_lock, flags);
264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
265 ret = assign_vector_locked(irqd, vector_searchmask);
266 raw_spin_unlock_irqrestore(&vector_lock, flags);
267 return ret;
268 }
269
assign_irq_vector_any_locked(struct irq_data * irqd)270 static int assign_irq_vector_any_locked(struct irq_data *irqd)
271 {
272 /* Get the affinity mask - either irq_default_affinity or (user) set */
273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
274 int node = irq_data_get_node(irqd);
275
276 if (node != NUMA_NO_NODE) {
277 /* Try the intersection of @affmsk and node mask */
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
280 return 0;
281 }
282
283 /* Try the full affinity mask */
284 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
285 if (!assign_vector_locked(irqd, vector_searchmask))
286 return 0;
287
288 if (node != NUMA_NO_NODE) {
289 /* Try the node mask */
290 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
291 return 0;
292 }
293
294 /* Try the full online mask */
295 return assign_vector_locked(irqd, cpu_online_mask);
296 }
297
298 static int
assign_irq_vector_policy(struct irq_data * irqd,struct irq_alloc_info * info)299 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
300 {
301 if (irqd_affinity_is_managed(irqd))
302 return reserve_managed_vector(irqd);
303 if (info->mask)
304 return assign_irq_vector(irqd, info->mask);
305 /*
306 * Make only a global reservation with no guarantee. A real vector
307 * is associated at activation time.
308 */
309 return reserve_irq_vector(irqd);
310 }
311
312 static int
assign_managed_vector(struct irq_data * irqd,const struct cpumask * dest)313 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
314 {
315 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
316 struct apic_chip_data *apicd = apic_chip_data(irqd);
317 int vector, cpu;
318
319 cpumask_and(vector_searchmask, dest, affmsk);
320
321 /* set_affinity might call here for nothing */
322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
323 return 0;
324 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
325 &cpu);
326 trace_vector_alloc_managed(irqd->irq, vector, vector);
327 if (vector < 0)
328 return vector;
329 apic_update_vector(irqd, vector, cpu);
330 apic_update_irq_cfg(irqd, vector, cpu);
331 return 0;
332 }
333
clear_irq_vector(struct irq_data * irqd)334 static void clear_irq_vector(struct irq_data *irqd)
335 {
336 struct apic_chip_data *apicd = apic_chip_data(irqd);
337 bool managed = irqd_affinity_is_managed(irqd);
338 unsigned int vector = apicd->vector;
339
340 lockdep_assert_held(&vector_lock);
341
342 if (!vector)
343 return;
344
345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
346 apicd->prev_cpu);
347
348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
350 apicd->vector = 0;
351
352 /* Clean up move in progress */
353 vector = apicd->prev_vector;
354 if (!vector)
355 return;
356
357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
359 apicd->prev_vector = 0;
360 apicd->move_in_progress = 0;
361 hlist_del_init(&apicd->clist);
362 }
363
x86_vector_deactivate(struct irq_domain * dom,struct irq_data * irqd)364 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
365 {
366 struct apic_chip_data *apicd = apic_chip_data(irqd);
367 unsigned long flags;
368
369 trace_vector_deactivate(irqd->irq, apicd->is_managed,
370 apicd->can_reserve, false);
371
372 /* Regular fixed assigned interrupt */
373 if (!apicd->is_managed && !apicd->can_reserve)
374 return;
375 /* If the interrupt has a global reservation, nothing to do */
376 if (apicd->has_reserved)
377 return;
378
379 raw_spin_lock_irqsave(&vector_lock, flags);
380 clear_irq_vector(irqd);
381 if (apicd->can_reserve)
382 reserve_irq_vector_locked(irqd);
383 else
384 vector_assign_managed_shutdown(irqd);
385 raw_spin_unlock_irqrestore(&vector_lock, flags);
386 }
387
activate_reserved(struct irq_data * irqd)388 static int activate_reserved(struct irq_data *irqd)
389 {
390 struct apic_chip_data *apicd = apic_chip_data(irqd);
391 int ret;
392
393 ret = assign_irq_vector_any_locked(irqd);
394 if (!ret) {
395 apicd->has_reserved = false;
396 /*
397 * Core might have disabled reservation mode after
398 * allocating the irq descriptor. Ideally this should
399 * happen before allocation time, but that would require
400 * completely convoluted ways of transporting that
401 * information.
402 */
403 if (!irqd_can_reserve(irqd))
404 apicd->can_reserve = false;
405 }
406
407 /*
408 * Check to ensure that the effective affinity mask is a subset
409 * the user supplied affinity mask, and warn the user if it is not
410 */
411 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
412 irq_data_get_affinity_mask(irqd))) {
413 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
414 irqd->irq);
415 }
416
417 return ret;
418 }
419
activate_managed(struct irq_data * irqd)420 static int activate_managed(struct irq_data *irqd)
421 {
422 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
423 int ret;
424
425 cpumask_and(vector_searchmask, dest, cpu_online_mask);
426 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
427 /* Something in the core code broke! Survive gracefully */
428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
429 return -EINVAL;
430 }
431
432 ret = assign_managed_vector(irqd, vector_searchmask);
433 /*
434 * This should not happen. The vector reservation got buggered. Handle
435 * it gracefully.
436 */
437 if (WARN_ON_ONCE(ret < 0)) {
438 pr_err("Managed startup irq %u, no vector available\n",
439 irqd->irq);
440 }
441 return ret;
442 }
443
x86_vector_activate(struct irq_domain * dom,struct irq_data * irqd,bool reserve)444 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
445 bool reserve)
446 {
447 struct apic_chip_data *apicd = apic_chip_data(irqd);
448 unsigned long flags;
449 int ret = 0;
450
451 trace_vector_activate(irqd->irq, apicd->is_managed,
452 apicd->can_reserve, reserve);
453
454 raw_spin_lock_irqsave(&vector_lock, flags);
455 if (!apicd->can_reserve && !apicd->is_managed)
456 assign_irq_vector_any_locked(irqd);
457 else if (reserve || irqd_is_managed_and_shutdown(irqd))
458 vector_assign_managed_shutdown(irqd);
459 else if (apicd->is_managed)
460 ret = activate_managed(irqd);
461 else if (apicd->has_reserved)
462 ret = activate_reserved(irqd);
463 raw_spin_unlock_irqrestore(&vector_lock, flags);
464 return ret;
465 }
466
vector_free_reserved_and_managed(struct irq_data * irqd)467 static void vector_free_reserved_and_managed(struct irq_data *irqd)
468 {
469 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
470 struct apic_chip_data *apicd = apic_chip_data(irqd);
471
472 trace_vector_teardown(irqd->irq, apicd->is_managed,
473 apicd->has_reserved);
474
475 if (apicd->has_reserved)
476 irq_matrix_remove_reserved(vector_matrix);
477 if (apicd->is_managed)
478 irq_matrix_remove_managed(vector_matrix, dest);
479 }
480
x86_vector_free_irqs(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)481 static void x86_vector_free_irqs(struct irq_domain *domain,
482 unsigned int virq, unsigned int nr_irqs)
483 {
484 struct apic_chip_data *apicd;
485 struct irq_data *irqd;
486 unsigned long flags;
487 int i;
488
489 for (i = 0; i < nr_irqs; i++) {
490 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
491 if (irqd && irqd->chip_data) {
492 raw_spin_lock_irqsave(&vector_lock, flags);
493 clear_irq_vector(irqd);
494 vector_free_reserved_and_managed(irqd);
495 apicd = irqd->chip_data;
496 irq_domain_reset_irq_data(irqd);
497 raw_spin_unlock_irqrestore(&vector_lock, flags);
498 free_apic_chip_data(apicd);
499 }
500 }
501 }
502
vector_configure_legacy(unsigned int virq,struct irq_data * irqd,struct apic_chip_data * apicd)503 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
504 struct apic_chip_data *apicd)
505 {
506 unsigned long flags;
507 bool realloc = false;
508
509 apicd->vector = ISA_IRQ_VECTOR(virq);
510 apicd->cpu = 0;
511
512 raw_spin_lock_irqsave(&vector_lock, flags);
513 /*
514 * If the interrupt is activated, then it must stay at this vector
515 * position. That's usually the timer interrupt (0).
516 */
517 if (irqd_is_activated(irqd)) {
518 trace_vector_setup(virq, true, 0);
519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
520 } else {
521 /* Release the vector */
522 apicd->can_reserve = true;
523 irqd_set_can_reserve(irqd);
524 clear_irq_vector(irqd);
525 realloc = true;
526 }
527 raw_spin_unlock_irqrestore(&vector_lock, flags);
528 return realloc;
529 }
530
x86_vector_alloc_irqs(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)531 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
532 unsigned int nr_irqs, void *arg)
533 {
534 struct irq_alloc_info *info = arg;
535 struct apic_chip_data *apicd;
536 struct irq_data *irqd;
537 int i, err, node;
538
539 if (disable_apic)
540 return -ENXIO;
541
542 /* Currently vector allocator can't guarantee contiguous allocations */
543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
544 return -ENOSYS;
545
546 for (i = 0; i < nr_irqs; i++) {
547 irqd = irq_domain_get_irq_data(domain, virq + i);
548 BUG_ON(!irqd);
549 node = irq_data_get_node(irqd);
550 WARN_ON_ONCE(irqd->chip_data);
551 apicd = alloc_apic_chip_data(node);
552 if (!apicd) {
553 err = -ENOMEM;
554 goto error;
555 }
556
557 apicd->irq = virq + i;
558 irqd->chip = &lapic_controller;
559 irqd->chip_data = apicd;
560 irqd->hwirq = virq + i;
561 irqd_set_single_target(irqd);
562 /*
563 * Prevent that any of these interrupts is invoked in
564 * non interrupt context via e.g. generic_handle_irq()
565 * as that can corrupt the affinity move state.
566 */
567 irqd_set_handle_enforce_irqctx(irqd);
568
569 /* Don't invoke affinity setter on deactivated interrupts */
570 irqd_set_affinity_on_activate(irqd);
571
572 /*
573 * Legacy vectors are already assigned when the IOAPIC
574 * takes them over. They stay on the same vector. This is
575 * required for check_timer() to work correctly as it might
576 * switch back to legacy mode. Only update the hardware
577 * config.
578 */
579 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
580 if (!vector_configure_legacy(virq + i, irqd, apicd))
581 continue;
582 }
583
584 err = assign_irq_vector_policy(irqd, info);
585 trace_vector_setup(virq + i, false, err);
586 if (err) {
587 irqd->chip_data = NULL;
588 free_apic_chip_data(apicd);
589 goto error;
590 }
591 }
592
593 return 0;
594
595 error:
596 x86_vector_free_irqs(domain, virq, i);
597 return err;
598 }
599
600 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
x86_vector_debug_show(struct seq_file * m,struct irq_domain * d,struct irq_data * irqd,int ind)601 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
602 struct irq_data *irqd, int ind)
603 {
604 struct apic_chip_data apicd;
605 unsigned long flags;
606 int irq;
607
608 if (!irqd) {
609 irq_matrix_debug_show(m, vector_matrix, ind);
610 return;
611 }
612
613 irq = irqd->irq;
614 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
615 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
616 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
617 return;
618 }
619
620 if (!irqd->chip_data) {
621 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
622 return;
623 }
624
625 raw_spin_lock_irqsave(&vector_lock, flags);
626 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
627 raw_spin_unlock_irqrestore(&vector_lock, flags);
628
629 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
630 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
631 if (apicd.prev_vector) {
632 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
633 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
634 }
635 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
636 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
637 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
638 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
639 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
640 }
641 #endif
642
643 static const struct irq_domain_ops x86_vector_domain_ops = {
644 .alloc = x86_vector_alloc_irqs,
645 .free = x86_vector_free_irqs,
646 .activate = x86_vector_activate,
647 .deactivate = x86_vector_deactivate,
648 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
649 .debug_show = x86_vector_debug_show,
650 #endif
651 };
652
arch_probe_nr_irqs(void)653 int __init arch_probe_nr_irqs(void)
654 {
655 int nr;
656
657 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
658 nr_irqs = NR_VECTORS * nr_cpu_ids;
659
660 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
661 #if defined(CONFIG_PCI_MSI)
662 /*
663 * for MSI and HT dyn irq
664 */
665 if (gsi_top <= NR_IRQS_LEGACY)
666 nr += 8 * nr_cpu_ids;
667 else
668 nr += gsi_top * 16;
669 #endif
670 if (nr < nr_irqs)
671 nr_irqs = nr;
672
673 /*
674 * We don't know if PIC is present at this point so we need to do
675 * probe() to get the right number of legacy IRQs.
676 */
677 return legacy_pic->probe();
678 }
679
lapic_assign_legacy_vector(unsigned int irq,bool replace)680 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
681 {
682 /*
683 * Use assign system here so it wont get accounted as allocated
684 * and moveable in the cpu hotplug check and it prevents managed
685 * irq reservation from touching it.
686 */
687 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
688 }
689
lapic_assign_system_vectors(void)690 void __init lapic_assign_system_vectors(void)
691 {
692 unsigned int i, vector = 0;
693
694 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
695 irq_matrix_assign_system(vector_matrix, vector, false);
696
697 if (nr_legacy_irqs() > 1)
698 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
699
700 /* System vectors are reserved, online it */
701 irq_matrix_online(vector_matrix);
702
703 /* Mark the preallocated legacy interrupts */
704 for (i = 0; i < nr_legacy_irqs(); i++) {
705 if (i != PIC_CASCADE_IR)
706 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
707 }
708 }
709
arch_early_irq_init(void)710 int __init arch_early_irq_init(void)
711 {
712 struct fwnode_handle *fn;
713
714 fn = irq_domain_alloc_named_fwnode("VECTOR");
715 BUG_ON(!fn);
716 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
717 NULL);
718 BUG_ON(x86_vector_domain == NULL);
719 irq_set_default_host(x86_vector_domain);
720
721 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
722
723 /*
724 * Allocate the vector matrix allocator data structure and limit the
725 * search area.
726 */
727 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
728 FIRST_SYSTEM_VECTOR);
729 BUG_ON(!vector_matrix);
730
731 return arch_early_ioapic_init();
732 }
733
734 #ifdef CONFIG_SMP
735
__setup_vector_irq(int vector)736 static struct irq_desc *__setup_vector_irq(int vector)
737 {
738 int isairq = vector - ISA_IRQ_VECTOR(0);
739
740 /* Check whether the irq is in the legacy space */
741 if (isairq < 0 || isairq >= nr_legacy_irqs())
742 return VECTOR_UNUSED;
743 /* Check whether the irq is handled by the IOAPIC */
744 if (test_bit(isairq, &io_apic_irqs))
745 return VECTOR_UNUSED;
746 return irq_to_desc(isairq);
747 }
748
749 /* Online the local APIC infrastructure and initialize the vectors */
lapic_online(void)750 void lapic_online(void)
751 {
752 unsigned int vector;
753
754 lockdep_assert_held(&vector_lock);
755
756 /* Online the vector matrix array for this CPU */
757 irq_matrix_online(vector_matrix);
758
759 /*
760 * The interrupt affinity logic never targets interrupts to offline
761 * CPUs. The exception are the legacy PIC interrupts. In general
762 * they are only targeted to CPU0, but depending on the platform
763 * they can be distributed to any online CPU in hardware. The
764 * kernel has no influence on that. So all active legacy vectors
765 * must be installed on all CPUs. All non legacy interrupts can be
766 * cleared.
767 */
768 for (vector = 0; vector < NR_VECTORS; vector++)
769 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
770 }
771
lapic_offline(void)772 void lapic_offline(void)
773 {
774 lock_vector_lock();
775 irq_matrix_offline(vector_matrix);
776 unlock_vector_lock();
777 }
778
apic_set_affinity(struct irq_data * irqd,const struct cpumask * dest,bool force)779 static int apic_set_affinity(struct irq_data *irqd,
780 const struct cpumask *dest, bool force)
781 {
782 int err;
783
784 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
785 return -EIO;
786
787 raw_spin_lock(&vector_lock);
788 cpumask_and(vector_searchmask, dest, cpu_online_mask);
789 if (irqd_affinity_is_managed(irqd))
790 err = assign_managed_vector(irqd, vector_searchmask);
791 else
792 err = assign_vector_locked(irqd, vector_searchmask);
793 raw_spin_unlock(&vector_lock);
794 return err ? err : IRQ_SET_MASK_OK;
795 }
796
797 #else
798 # define apic_set_affinity NULL
799 #endif
800
apic_retrigger_irq(struct irq_data * irqd)801 static int apic_retrigger_irq(struct irq_data *irqd)
802 {
803 struct apic_chip_data *apicd = apic_chip_data(irqd);
804 unsigned long flags;
805
806 raw_spin_lock_irqsave(&vector_lock, flags);
807 apic->send_IPI(apicd->cpu, apicd->vector);
808 raw_spin_unlock_irqrestore(&vector_lock, flags);
809
810 return 1;
811 }
812
apic_ack_irq(struct irq_data * irqd)813 void apic_ack_irq(struct irq_data *irqd)
814 {
815 irq_move_irq(irqd);
816 ack_APIC_irq();
817 }
818
apic_ack_edge(struct irq_data * irqd)819 void apic_ack_edge(struct irq_data *irqd)
820 {
821 irq_complete_move(irqd_cfg(irqd));
822 apic_ack_irq(irqd);
823 }
824
825 static struct irq_chip lapic_controller = {
826 .name = "APIC",
827 .irq_ack = apic_ack_edge,
828 .irq_set_affinity = apic_set_affinity,
829 .irq_compose_msi_msg = x86_vector_msi_compose_msg,
830 .irq_retrigger = apic_retrigger_irq,
831 };
832
833 #ifdef CONFIG_SMP
834
free_moved_vector(struct apic_chip_data * apicd)835 static void free_moved_vector(struct apic_chip_data *apicd)
836 {
837 unsigned int vector = apicd->prev_vector;
838 unsigned int cpu = apicd->prev_cpu;
839 bool managed = apicd->is_managed;
840
841 /*
842 * Managed interrupts are usually not migrated away
843 * from an online CPU, but CPU isolation 'managed_irq'
844 * can make that happen.
845 * 1) Activation does not take the isolation into account
846 * to keep the code simple
847 * 2) Migration away from an isolated CPU can happen when
848 * a non-isolated CPU which is in the calculated
849 * affinity mask comes online.
850 */
851 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
852 irq_matrix_free(vector_matrix, cpu, vector, managed);
853 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
854 hlist_del_init(&apicd->clist);
855 apicd->prev_vector = 0;
856 apicd->move_in_progress = 0;
857 }
858
DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)859 DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
860 {
861 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
862 struct apic_chip_data *apicd;
863 struct hlist_node *tmp;
864
865 ack_APIC_irq();
866 /* Prevent vectors vanishing under us */
867 raw_spin_lock(&vector_lock);
868
869 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
870 unsigned int irr, vector = apicd->prev_vector;
871
872 /*
873 * Paranoia: Check if the vector that needs to be cleaned
874 * up is registered at the APICs IRR. If so, then this is
875 * not the best time to clean it up. Clean it up in the
876 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
877 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
878 * priority external vector, so on return from this
879 * interrupt the device interrupt will happen first.
880 */
881 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
882 if (irr & (1U << (vector % 32))) {
883 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
884 continue;
885 }
886 free_moved_vector(apicd);
887 }
888
889 raw_spin_unlock(&vector_lock);
890 }
891
__send_cleanup_vector(struct apic_chip_data * apicd)892 static void __send_cleanup_vector(struct apic_chip_data *apicd)
893 {
894 unsigned int cpu;
895
896 raw_spin_lock(&vector_lock);
897 apicd->move_in_progress = 0;
898 cpu = apicd->prev_cpu;
899 if (cpu_online(cpu)) {
900 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
901 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
902 } else {
903 apicd->prev_vector = 0;
904 }
905 raw_spin_unlock(&vector_lock);
906 }
907
send_cleanup_vector(struct irq_cfg * cfg)908 void send_cleanup_vector(struct irq_cfg *cfg)
909 {
910 struct apic_chip_data *apicd;
911
912 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
913 if (apicd->move_in_progress)
914 __send_cleanup_vector(apicd);
915 }
916
irq_complete_move(struct irq_cfg * cfg)917 void irq_complete_move(struct irq_cfg *cfg)
918 {
919 struct apic_chip_data *apicd;
920
921 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
922 if (likely(!apicd->move_in_progress))
923 return;
924
925 /*
926 * If the interrupt arrived on the new target CPU, cleanup the
927 * vector on the old target CPU. A vector check is not required
928 * because an interrupt can never move from one vector to another
929 * on the same CPU.
930 */
931 if (apicd->cpu == smp_processor_id())
932 __send_cleanup_vector(apicd);
933 }
934
935 /*
936 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
937 */
irq_force_complete_move(struct irq_desc * desc)938 void irq_force_complete_move(struct irq_desc *desc)
939 {
940 struct apic_chip_data *apicd;
941 struct irq_data *irqd;
942 unsigned int vector;
943
944 /*
945 * The function is called for all descriptors regardless of which
946 * irqdomain they belong to. For example if an IRQ is provided by
947 * an irq_chip as part of a GPIO driver, the chip data for that
948 * descriptor is specific to the irq_chip in question.
949 *
950 * Check first that the chip_data is what we expect
951 * (apic_chip_data) before touching it any further.
952 */
953 irqd = irq_domain_get_irq_data(x86_vector_domain,
954 irq_desc_get_irq(desc));
955 if (!irqd)
956 return;
957
958 raw_spin_lock(&vector_lock);
959 apicd = apic_chip_data(irqd);
960 if (!apicd)
961 goto unlock;
962
963 /*
964 * If prev_vector is empty, no action required.
965 */
966 vector = apicd->prev_vector;
967 if (!vector)
968 goto unlock;
969
970 /*
971 * This is tricky. If the cleanup of the old vector has not been
972 * done yet, then the following setaffinity call will fail with
973 * -EBUSY. This can leave the interrupt in a stale state.
974 *
975 * All CPUs are stuck in stop machine with interrupts disabled so
976 * calling __irq_complete_move() would be completely pointless.
977 *
978 * 1) The interrupt is in move_in_progress state. That means that we
979 * have not seen an interrupt since the io_apic was reprogrammed to
980 * the new vector.
981 *
982 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
983 * have not been processed yet.
984 */
985 if (apicd->move_in_progress) {
986 /*
987 * In theory there is a race:
988 *
989 * set_ioapic(new_vector) <-- Interrupt is raised before update
990 * is effective, i.e. it's raised on
991 * the old vector.
992 *
993 * So if the target cpu cannot handle that interrupt before
994 * the old vector is cleaned up, we get a spurious interrupt
995 * and in the worst case the ioapic irq line becomes stale.
996 *
997 * But in case of cpu hotplug this should be a non issue
998 * because if the affinity update happens right before all
999 * cpus rendevouz in stop machine, there is no way that the
1000 * interrupt can be blocked on the target cpu because all cpus
1001 * loops first with interrupts enabled in stop machine, so the
1002 * old vector is not yet cleaned up when the interrupt fires.
1003 *
1004 * So the only way to run into this issue is if the delivery
1005 * of the interrupt on the apic/system bus would be delayed
1006 * beyond the point where the target cpu disables interrupts
1007 * in stop machine. I doubt that it can happen, but at least
1008 * there is a theroretical chance. Virtualization might be
1009 * able to expose this, but AFAICT the IOAPIC emulation is not
1010 * as stupid as the real hardware.
1011 *
1012 * Anyway, there is nothing we can do about that at this point
1013 * w/o refactoring the whole fixup_irq() business completely.
1014 * We print at least the irq number and the old vector number,
1015 * so we have the necessary information when a problem in that
1016 * area arises.
1017 */
1018 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1019 irqd->irq, vector);
1020 }
1021 free_moved_vector(apicd);
1022 unlock:
1023 raw_spin_unlock(&vector_lock);
1024 }
1025
1026 #ifdef CONFIG_HOTPLUG_CPU
1027 /*
1028 * Note, this is not accurate accounting, but at least good enough to
1029 * prevent that the actual interrupt move will run out of vectors.
1030 */
lapic_can_unplug_cpu(void)1031 int lapic_can_unplug_cpu(void)
1032 {
1033 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1034 int ret = 0;
1035
1036 raw_spin_lock(&vector_lock);
1037 tomove = irq_matrix_allocated(vector_matrix);
1038 avl = irq_matrix_available(vector_matrix, true);
1039 if (avl < tomove) {
1040 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1041 cpu, tomove, avl);
1042 ret = -ENOSPC;
1043 goto out;
1044 }
1045 rsvd = irq_matrix_reserved(vector_matrix);
1046 if (avl < rsvd) {
1047 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1048 rsvd, avl);
1049 }
1050 out:
1051 raw_spin_unlock(&vector_lock);
1052 return ret;
1053 }
1054 #endif /* HOTPLUG_CPU */
1055 #endif /* SMP */
1056
print_APIC_field(int base)1057 static void __init print_APIC_field(int base)
1058 {
1059 int i;
1060
1061 printk(KERN_DEBUG);
1062
1063 for (i = 0; i < 8; i++)
1064 pr_cont("%08x", apic_read(base + i*0x10));
1065
1066 pr_cont("\n");
1067 }
1068
print_local_APIC(void * dummy)1069 static void __init print_local_APIC(void *dummy)
1070 {
1071 unsigned int i, v, ver, maxlvt;
1072 u64 icr;
1073
1074 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1075 smp_processor_id(), hard_smp_processor_id());
1076 v = apic_read(APIC_ID);
1077 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1078 v = apic_read(APIC_LVR);
1079 pr_info("... APIC VERSION: %08x\n", v);
1080 ver = GET_APIC_VERSION(v);
1081 maxlvt = lapic_get_maxlvt();
1082
1083 v = apic_read(APIC_TASKPRI);
1084 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1085
1086 /* !82489DX */
1087 if (APIC_INTEGRATED(ver)) {
1088 if (!APIC_XAPIC(ver)) {
1089 v = apic_read(APIC_ARBPRI);
1090 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1091 v, v & APIC_ARBPRI_MASK);
1092 }
1093 v = apic_read(APIC_PROCPRI);
1094 pr_debug("... APIC PROCPRI: %08x\n", v);
1095 }
1096
1097 /*
1098 * Remote read supported only in the 82489DX and local APIC for
1099 * Pentium processors.
1100 */
1101 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1102 v = apic_read(APIC_RRR);
1103 pr_debug("... APIC RRR: %08x\n", v);
1104 }
1105
1106 v = apic_read(APIC_LDR);
1107 pr_debug("... APIC LDR: %08x\n", v);
1108 if (!x2apic_enabled()) {
1109 v = apic_read(APIC_DFR);
1110 pr_debug("... APIC DFR: %08x\n", v);
1111 }
1112 v = apic_read(APIC_SPIV);
1113 pr_debug("... APIC SPIV: %08x\n", v);
1114
1115 pr_debug("... APIC ISR field:\n");
1116 print_APIC_field(APIC_ISR);
1117 pr_debug("... APIC TMR field:\n");
1118 print_APIC_field(APIC_TMR);
1119 pr_debug("... APIC IRR field:\n");
1120 print_APIC_field(APIC_IRR);
1121
1122 /* !82489DX */
1123 if (APIC_INTEGRATED(ver)) {
1124 /* Due to the Pentium erratum 3AP. */
1125 if (maxlvt > 3)
1126 apic_write(APIC_ESR, 0);
1127
1128 v = apic_read(APIC_ESR);
1129 pr_debug("... APIC ESR: %08x\n", v);
1130 }
1131
1132 icr = apic_icr_read();
1133 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1134 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1135
1136 v = apic_read(APIC_LVTT);
1137 pr_debug("... APIC LVTT: %08x\n", v);
1138
1139 if (maxlvt > 3) {
1140 /* PC is LVT#4. */
1141 v = apic_read(APIC_LVTPC);
1142 pr_debug("... APIC LVTPC: %08x\n", v);
1143 }
1144 v = apic_read(APIC_LVT0);
1145 pr_debug("... APIC LVT0: %08x\n", v);
1146 v = apic_read(APIC_LVT1);
1147 pr_debug("... APIC LVT1: %08x\n", v);
1148
1149 if (maxlvt > 2) {
1150 /* ERR is LVT#3. */
1151 v = apic_read(APIC_LVTERR);
1152 pr_debug("... APIC LVTERR: %08x\n", v);
1153 }
1154
1155 v = apic_read(APIC_TMICT);
1156 pr_debug("... APIC TMICT: %08x\n", v);
1157 v = apic_read(APIC_TMCCT);
1158 pr_debug("... APIC TMCCT: %08x\n", v);
1159 v = apic_read(APIC_TDCR);
1160 pr_debug("... APIC TDCR: %08x\n", v);
1161
1162 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1163 v = apic_read(APIC_EFEAT);
1164 maxlvt = (v >> 16) & 0xff;
1165 pr_debug("... APIC EFEAT: %08x\n", v);
1166 v = apic_read(APIC_ECTRL);
1167 pr_debug("... APIC ECTRL: %08x\n", v);
1168 for (i = 0; i < maxlvt; i++) {
1169 v = apic_read(APIC_EILVTn(i));
1170 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1171 }
1172 }
1173 pr_cont("\n");
1174 }
1175
print_local_APICs(int maxcpu)1176 static void __init print_local_APICs(int maxcpu)
1177 {
1178 int cpu;
1179
1180 if (!maxcpu)
1181 return;
1182
1183 preempt_disable();
1184 for_each_online_cpu(cpu) {
1185 if (cpu >= maxcpu)
1186 break;
1187 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1188 }
1189 preempt_enable();
1190 }
1191
print_PIC(void)1192 static void __init print_PIC(void)
1193 {
1194 unsigned int v;
1195 unsigned long flags;
1196
1197 if (!nr_legacy_irqs())
1198 return;
1199
1200 pr_debug("\nprinting PIC contents\n");
1201
1202 raw_spin_lock_irqsave(&i8259A_lock, flags);
1203
1204 v = inb(0xa1) << 8 | inb(0x21);
1205 pr_debug("... PIC IMR: %04x\n", v);
1206
1207 v = inb(0xa0) << 8 | inb(0x20);
1208 pr_debug("... PIC IRR: %04x\n", v);
1209
1210 outb(0x0b, 0xa0);
1211 outb(0x0b, 0x20);
1212 v = inb(0xa0) << 8 | inb(0x20);
1213 outb(0x0a, 0xa0);
1214 outb(0x0a, 0x20);
1215
1216 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1217
1218 pr_debug("... PIC ISR: %04x\n", v);
1219
1220 v = inb(0x4d1) << 8 | inb(0x4d0);
1221 pr_debug("... PIC ELCR: %04x\n", v);
1222 }
1223
1224 static int show_lapic __initdata = 1;
setup_show_lapic(char * arg)1225 static __init int setup_show_lapic(char *arg)
1226 {
1227 int num = -1;
1228
1229 if (strcmp(arg, "all") == 0) {
1230 show_lapic = CONFIG_NR_CPUS;
1231 } else {
1232 get_option(&arg, &num);
1233 if (num >= 0)
1234 show_lapic = num;
1235 }
1236
1237 return 1;
1238 }
1239 __setup("show_lapic=", setup_show_lapic);
1240
print_ICs(void)1241 static int __init print_ICs(void)
1242 {
1243 if (apic_verbosity == APIC_QUIET)
1244 return 0;
1245
1246 print_PIC();
1247
1248 /* don't print out if apic is not there */
1249 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1250 return 0;
1251
1252 print_local_APICs(show_lapic);
1253 print_IO_APICs();
1254
1255 return 0;
1256 }
1257
1258 late_initcall(print_ICs);
1259