Lines Matching full:desc
55 static int alloc_masks(struct irq_desc *desc, int node) in alloc_masks() argument
57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks()
64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
72 free_cpumask_var(desc->irq_common_data.effective_affinity); in alloc_masks()
74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
81 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument
86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
89 cpumask_clear(desc->pending_mask); in desc_smp_init()
92 desc->irq_common_data.node = node; in desc_smp_init()
98 alloc_masks(struct irq_desc *desc, int node) { return 0; } in alloc_masks() argument
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
108 desc->irq_common_data.handler_data = NULL; in desc_set_defaults()
109 desc->irq_common_data.msi_desc = NULL; in desc_set_defaults()
111 desc->irq_data.common = &desc->irq_common_data; in desc_set_defaults()
112 desc->irq_data.irq = irq; in desc_set_defaults()
113 desc->irq_data.chip = &no_irq_chip; in desc_set_defaults()
114 desc->irq_data.chip_data = NULL; in desc_set_defaults()
115 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); in desc_set_defaults()
116 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in desc_set_defaults()
117 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); in desc_set_defaults()
118 desc->handle_irq = handle_bad_irq; in desc_set_defaults()
119 desc->depth = 1; in desc_set_defaults()
120 desc->irq_count = 0; in desc_set_defaults()
121 desc->irqs_unhandled = 0; in desc_set_defaults()
122 desc->tot_count = 0; in desc_set_defaults()
123 desc->name = NULL; in desc_set_defaults()
124 desc->owner = owner; in desc_set_defaults()
126 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults()
127 desc_smp_init(desc, node, affinity); in desc_set_defaults()
149 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in per_cpu_count_show() local
155 unsigned int c = irq_desc_kstat_cpu(desc, cpu); in per_cpu_count_show()
169 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in chip_name_show() local
172 raw_spin_lock_irq(&desc->lock); in chip_name_show()
173 if (desc->irq_data.chip && desc->irq_data.chip->name) { in chip_name_show()
175 desc->irq_data.chip->name); in chip_name_show()
177 raw_spin_unlock_irq(&desc->lock); in chip_name_show()
186 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in hwirq_show() local
189 raw_spin_lock_irq(&desc->lock); in hwirq_show()
190 if (desc->irq_data.domain) in hwirq_show()
191 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); in hwirq_show()
192 raw_spin_unlock_irq(&desc->lock); in hwirq_show()
201 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in type_show() local
204 raw_spin_lock_irq(&desc->lock); in type_show()
206 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); in type_show()
207 raw_spin_unlock_irq(&desc->lock); in type_show()
217 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in wakeup_show() local
220 raw_spin_lock_irq(&desc->lock); in wakeup_show()
222 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); in wakeup_show()
223 raw_spin_unlock_irq(&desc->lock); in wakeup_show()
233 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in name_show() local
236 raw_spin_lock_irq(&desc->lock); in name_show()
237 if (desc->name) in name_show()
238 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); in name_show()
239 raw_spin_unlock_irq(&desc->lock); in name_show()
248 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in actions_show() local
253 raw_spin_lock_irq(&desc->lock); in actions_show()
254 for (action = desc->action; action != NULL; action = action->next) { in actions_show()
259 raw_spin_unlock_irq(&desc->lock); in actions_show()
286 static void irq_sysfs_add(int irq, struct irq_desc *desc) in irq_sysfs_add() argument
293 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) in irq_sysfs_add()
298 static void irq_sysfs_del(struct irq_desc *desc) in irq_sysfs_del() argument
307 kobject_del(&desc->kobj); in irq_sysfs_del()
312 struct irq_desc *desc; in irq_sysfs_init() local
325 for_each_irq_desc(irq, desc) in irq_sysfs_init()
326 irq_sysfs_add(irq, desc); in irq_sysfs_init()
339 static void irq_sysfs_add(int irq, struct irq_desc *desc) {} in irq_sysfs_add() argument
340 static void irq_sysfs_del(struct irq_desc *desc) {} in irq_sysfs_del() argument
346 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) in irq_insert_desc() argument
348 radix_tree_insert(&irq_desc_tree, irq, desc); in irq_insert_desc()
365 static void free_masks(struct irq_desc *desc) in free_masks() argument
368 free_cpumask_var(desc->pending_mask); in free_masks()
370 free_cpumask_var(desc->irq_common_data.affinity); in free_masks()
372 free_cpumask_var(desc->irq_common_data.effective_affinity); in free_masks()
376 static inline void free_masks(struct irq_desc *desc) { } in free_masks() argument
393 struct irq_desc *desc; in alloc_desc() local
395 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); in alloc_desc()
396 if (!desc) in alloc_desc()
399 desc->kstat_irqs = alloc_percpu(unsigned int); in alloc_desc()
400 if (!desc->kstat_irqs) in alloc_desc()
403 if (alloc_masks(desc, node)) in alloc_desc()
406 raw_spin_lock_init(&desc->lock); in alloc_desc()
407 lockdep_set_class(&desc->lock, &irq_desc_lock_class); in alloc_desc()
408 mutex_init(&desc->request_mutex); in alloc_desc()
409 init_rcu_head(&desc->rcu); in alloc_desc()
411 desc_set_defaults(irq, desc, node, affinity, owner); in alloc_desc()
412 irqd_set(&desc->irq_data, flags); in alloc_desc()
413 kobject_init(&desc->kobj, &irq_kobj_type); in alloc_desc()
415 return desc; in alloc_desc()
418 free_percpu(desc->kstat_irqs); in alloc_desc()
420 kfree(desc); in alloc_desc()
426 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in irq_kobj_release() local
428 free_masks(desc); in irq_kobj_release()
429 free_percpu(desc->kstat_irqs); in irq_kobj_release()
430 kfree(desc); in irq_kobj_release()
435 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); in delayed_free_desc() local
437 kobject_put(&desc->kobj); in delayed_free_desc()
442 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
444 irq_remove_debugfs_entry(desc); in free_desc()
445 unregister_irq_proc(irq, desc); in free_desc()
456 irq_sysfs_del(desc); in free_desc()
465 call_rcu(&desc->rcu, delayed_free_desc); in free_desc()
472 struct irq_desc *desc; in alloc_descs() local
497 desc = alloc_desc(start + i, node, flags, mask, owner); in alloc_descs()
498 if (!desc) in alloc_descs()
500 irq_insert_desc(start + i, desc); in alloc_descs()
501 irq_sysfs_add(start + i, desc); in alloc_descs()
502 irq_add_debugfs_entry(start + i, desc); in alloc_descs()
524 struct irq_desc *desc; in early_irq_init() local
543 desc = alloc_desc(i, node, 0, NULL, NULL); in early_irq_init()
545 irq_insert_desc(i, desc); in early_irq_init()
563 struct irq_desc *desc; in early_irq_init() local
569 desc = irq_desc; in early_irq_init()
573 desc[i].kstat_irqs = alloc_percpu(unsigned int); in early_irq_init()
574 alloc_masks(&desc[i], node); in early_irq_init()
575 raw_spin_lock_init(&desc[i].lock); in early_irq_init()
576 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); in early_irq_init()
577 mutex_init(&desc[i].request_mutex); in early_irq_init()
578 desc_set_defaults(i, &desc[i], node, NULL, NULL); in early_irq_init()
591 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
594 raw_spin_lock_irqsave(&desc->lock, flags); in free_desc()
595 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); in free_desc()
596 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_desc()
606 struct irq_desc *desc = irq_to_desc(start + i); in alloc_descs() local
608 desc->owner = owner; in alloc_descs()
635 int handle_irq_desc(struct irq_desc *desc) in handle_irq_desc() argument
639 if (!desc) in handle_irq_desc()
642 data = irq_desc_get_irq_data(desc); in handle_irq_desc()
646 generic_handle_irq_desc(desc); in handle_irq_desc()
693 struct irq_desc *desc; in handle_domain_irq() local
699 desc = irq_resolve_mapping(domain, hwirq); in handle_domain_irq()
700 if (likely(desc)) in handle_domain_irq()
701 handle_irq_desc(desc); in handle_domain_irq()
724 struct irq_desc *desc; in handle_domain_nmi() local
732 desc = irq_resolve_mapping(domain, hwirq); in handle_domain_nmi()
738 if (likely(desc)) in handle_domain_nmi()
739 handle_irq_desc(desc); in handle_domain_nmi()
842 struct irq_desc *desc = irq_to_desc(irq); in __irq_get_desc_lock() local
844 if (desc) { in __irq_get_desc_lock()
847 !irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
851 irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
856 chip_bus_lock(desc); in __irq_get_desc_lock()
857 raw_spin_lock_irqsave(&desc->lock, *flags); in __irq_get_desc_lock()
859 return desc; in __irq_get_desc_lock()
862 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) in __irq_put_desc_unlock() argument
863 __releases(&desc->lock) in __irq_put_desc_unlock()
865 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_put_desc_unlock()
867 chip_bus_sync_unlock(desc); in __irq_put_desc_unlock()
873 struct irq_desc *desc = irq_to_desc(irq); in irq_set_percpu_devid_partition() local
875 if (!desc) in irq_set_percpu_devid_partition()
878 if (desc->percpu_enabled) in irq_set_percpu_devid_partition()
881 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); in irq_set_percpu_devid_partition()
883 if (!desc->percpu_enabled) in irq_set_percpu_devid_partition()
887 desc->percpu_affinity = affinity; in irq_set_percpu_devid_partition()
889 desc->percpu_affinity = cpu_possible_mask; in irq_set_percpu_devid_partition()
902 struct irq_desc *desc = irq_to_desc(irq); in irq_get_percpu_devid_partition() local
904 if (!desc || !desc->percpu_enabled) in irq_get_percpu_devid_partition()
908 cpumask_copy(affinity, desc->percpu_affinity); in irq_get_percpu_devid_partition()
930 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs_cpu() local
932 return desc && desc->kstat_irqs ? in kstat_irqs_cpu()
933 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu()
936 static bool irq_is_nmi(struct irq_desc *desc) in irq_is_nmi() argument
938 return desc->istate & IRQS_NMI; in irq_is_nmi()
943 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs() local
947 if (!desc || !desc->kstat_irqs) in kstat_irqs()
949 if (!irq_settings_is_per_cpu_devid(desc) && in kstat_irqs()
950 !irq_settings_is_per_cpu(desc) && in kstat_irqs()
951 !irq_is_nmi(desc)) in kstat_irqs()
952 return data_race(desc->tot_count); in kstat_irqs()
955 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); in kstat_irqs()
983 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_lockdep_class() local
985 if (desc) { in __irq_set_lockdep_class()
986 lockdep_set_class(&desc->lock, lock_class); in __irq_set_lockdep_class()
987 lockdep_set_class(&desc->request_mutex, request_class); in __irq_set_lockdep_class()