Lines Matching +full:interrupt +full:- +full:affinity
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 * This file contains the interrupt descriptor management code. Detailed
7 * information is available in Documentation/core-api/genericirq.rst
13 #include <linux/interrupt.h>
15 #include <linux/radix-tree.h>
23 * lockdep: we want to handle all irq_desc locks as a single lock-class:
57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
59 return -ENOMEM; in alloc_masks()
62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks()
64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
65 return -ENOMEM; in alloc_masks()
70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
72 free_cpumask_var(desc->irq_common_data.effective_affinity); in alloc_masks()
74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
75 return -ENOMEM; in alloc_masks()
82 const struct cpumask *affinity) in desc_smp_init() argument
84 if (!affinity) in desc_smp_init()
85 affinity = irq_default_affinity; in desc_smp_init()
86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
89 cpumask_clear(desc->pending_mask); in desc_smp_init()
92 desc->irq_common_data.node = node; in desc_smp_init()
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument
108 desc->irq_common_data.handler_data = NULL; in desc_set_defaults()
109 desc->irq_common_data.msi_desc = NULL; in desc_set_defaults()
111 desc->irq_data.common = &desc->irq_common_data; in desc_set_defaults()
112 desc->irq_data.irq = irq; in desc_set_defaults()
113 desc->irq_data.chip = &no_irq_chip; in desc_set_defaults()
114 desc->irq_data.chip_data = NULL; in desc_set_defaults()
116 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in desc_set_defaults()
117 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); in desc_set_defaults()
118 desc->handle_irq = handle_bad_irq; in desc_set_defaults()
119 desc->depth = 1; in desc_set_defaults()
120 desc->irq_count = 0; in desc_set_defaults()
121 desc->irqs_unhandled = 0; in desc_set_defaults()
122 desc->tot_count = 0; in desc_set_defaults()
123 desc->name = NULL; in desc_set_defaults()
124 desc->owner = owner; in desc_set_defaults()
126 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults()
127 desc_smp_init(desc, node, affinity); in desc_set_defaults()
157 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); in per_cpu_count_show()
161 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); in per_cpu_count_show()
172 raw_spin_lock_irq(&desc->lock); in chip_name_show()
173 if (desc->irq_data.chip && desc->irq_data.chip->name) { in chip_name_show()
175 desc->irq_data.chip->name); in chip_name_show()
177 raw_spin_unlock_irq(&desc->lock); in chip_name_show()
189 raw_spin_lock_irq(&desc->lock); in hwirq_show()
190 if (desc->irq_data.domain) in hwirq_show()
191 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); in hwirq_show()
192 raw_spin_unlock_irq(&desc->lock); in hwirq_show()
204 raw_spin_lock_irq(&desc->lock); in type_show()
206 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); in type_show()
207 raw_spin_unlock_irq(&desc->lock); in type_show()
220 raw_spin_lock_irq(&desc->lock); in wakeup_show()
222 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); in wakeup_show()
223 raw_spin_unlock_irq(&desc->lock); in wakeup_show()
236 raw_spin_lock_irq(&desc->lock); in name_show()
237 if (desc->name) in name_show()
238 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); in name_show()
239 raw_spin_unlock_irq(&desc->lock); in name_show()
253 raw_spin_lock_irq(&desc->lock); in actions_show()
255 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", in actions_show()
256 p, action->name); in actions_show()
259 raw_spin_unlock_irq(&desc->lock); in actions_show()
262 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); in actions_show()
293 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) in irq_sysfs_add()
307 kobject_del(&desc->kobj); in irq_sysfs_del()
321 return -ENOMEM; in irq_sysfs_init()
368 free_cpumask_var(desc->pending_mask); in free_masks()
370 free_cpumask_var(desc->irq_common_data.affinity); in free_masks()
372 free_cpumask_var(desc->irq_common_data.effective_affinity); in free_masks()
390 const struct cpumask *affinity, in alloc_desc() argument
399 desc->kstat_irqs = alloc_percpu(unsigned int); in alloc_desc()
400 if (!desc->kstat_irqs) in alloc_desc()
406 raw_spin_lock_init(&desc->lock); in alloc_desc()
407 lockdep_set_class(&desc->lock, &irq_desc_lock_class); in alloc_desc()
408 mutex_init(&desc->request_mutex); in alloc_desc()
409 init_rcu_head(&desc->rcu); in alloc_desc()
410 init_waitqueue_head(&desc->wait_for_threads); in alloc_desc()
412 desc_set_defaults(irq, desc, node, affinity, owner); in alloc_desc()
413 irqd_set(&desc->irq_data, flags); in alloc_desc()
414 kobject_init(&desc->kobj, &irq_kobj_type); in alloc_desc()
419 free_percpu(desc->kstat_irqs); in alloc_desc()
430 free_percpu(desc->kstat_irqs); in irq_kobj_release()
438 kobject_put(&desc->kobj); in delayed_free_desc()
466 call_rcu(&desc->rcu, delayed_free_desc); in free_desc()
470 const struct irq_affinity_desc *affinity, in alloc_descs() argument
476 /* Validate affinity mask(s) */ in alloc_descs()
477 if (affinity) { in alloc_descs()
479 if (cpumask_empty(&affinity[i].mask)) in alloc_descs()
480 return -EINVAL; in alloc_descs()
488 if (affinity) { in alloc_descs()
489 if (affinity->is_managed) { in alloc_descs()
493 mask = &affinity->mask; in alloc_descs()
495 affinity++; in alloc_descs()
509 for (i--; i >= 0; i--) in alloc_descs()
511 return -ENOMEM; in alloc_descs()
517 return -ENOMEM; in irq_expand_nr_irqs()
554 [0 ... NR_IRQS-1] = {
557 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
596 raw_spin_lock_irqsave(&desc->lock, flags); in free_desc()
598 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_desc()
602 const struct irq_affinity_desc *affinity, in alloc_descs() argument
610 desc->owner = owner; in alloc_descs()
618 return -ENOMEM; in irq_expand_nr_irqs()
642 return -EINVAL; in handle_irq_desc()
646 return -EPERM; in handle_irq_desc()
653 * generic_handle_irq - Invoke the handler for a particular irq
656 * Returns: 0 on success, or -EINVAL if conversion has failed
668 * generic_handle_irq_safe - Invoke the handler for a particular irq from any
676 * marked to enforce IRQ-context only.
692 * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
697 * Returns: 0 on success, or -EINVAL if conversion has failed
709 * generic_handle_irq_safe - Invoke the handler for a HW irq belonging
717 * context). If the interrupt is marked as 'enforce IRQ-context only' then
718 * the function must be invoked from hard interrupt context.
733 * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
738 * Returns: 0 on success, or -EINVAL if conversion has failed
750 /* Dynamic interrupt handling */
753 * irq_free_descs - free irq descriptors
774 * __irq_alloc_descs - allocate and initialize a range of irq descriptors
780 * @affinity: Optional pointer to an affinity mask array of size @cnt which
788 struct module *owner, const struct irq_affinity_desc *affinity) in __irq_alloc_descs() argument
793 return -EINVAL; in __irq_alloc_descs()
797 return -EINVAL; in __irq_alloc_descs()
812 ret = -EEXIST; in __irq_alloc_descs()
821 ret = alloc_descs(start, cnt, node, affinity, owner); in __irq_alloc_descs()
829 * irq_get_next_irq - get next allocated irq number
858 raw_spin_lock_irqsave(&desc->lock, *flags); in __irq_get_desc_lock()
864 __releases(&desc->lock) in __irq_put_desc_unlock()
866 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_put_desc_unlock()
872 const struct cpumask *affinity) in irq_set_percpu_devid_partition() argument
877 return -EINVAL; in irq_set_percpu_devid_partition()
879 if (desc->percpu_enabled) in irq_set_percpu_devid_partition()
880 return -EINVAL; in irq_set_percpu_devid_partition()
882 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); in irq_set_percpu_devid_partition()
884 if (!desc->percpu_enabled) in irq_set_percpu_devid_partition()
885 return -ENOMEM; in irq_set_percpu_devid_partition()
887 if (affinity) in irq_set_percpu_devid_partition()
888 desc->percpu_affinity = affinity; in irq_set_percpu_devid_partition()
890 desc->percpu_affinity = cpu_possible_mask; in irq_set_percpu_devid_partition()
901 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) in irq_get_percpu_devid_partition() argument
905 if (!desc || !desc->percpu_enabled) in irq_get_percpu_devid_partition()
906 return -EINVAL; in irq_get_percpu_devid_partition()
908 if (affinity) in irq_get_percpu_devid_partition()
909 cpumask_copy(affinity, desc->percpu_affinity); in irq_get_percpu_devid_partition()
921 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
922 * @irq: The interrupt number
925 * Returns the sum of interrupt counts on @cpu since boot for
926 * @irq. The caller must ensure that the interrupt is not removed
933 return desc && desc->kstat_irqs ? in kstat_irqs_cpu()
934 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu()
939 return desc->istate & IRQS_NMI; in irq_is_nmi()
948 if (!desc || !desc->kstat_irqs) in kstat_irqs()
953 return data_race(desc->tot_count); in kstat_irqs()
956 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); in kstat_irqs()
961 * kstat_irqs_usr - Get the statistics for an interrupt from thread context
962 * @irq: The interrupt number
964 * Returns the sum of interrupt counts on all cpus since boot for @irq.
967 * interrupt descriptor is observing an rcu grace period before
987 lockdep_set_class(&desc->lock, lock_class); in __irq_set_lockdep_class()
988 lockdep_set_class(&desc->request_mutex, request_class); in __irq_set_lockdep_class()