Lines Matching full:desc

38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)  in __synchronize_hardirq()  argument
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
102 if (desc) { in synchronize_hardirq()
103 __synchronize_hardirq(desc, false); in synchronize_hardirq()
104 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
128 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
130 if (desc) { in synchronize_irq()
131 __synchronize_hardirq(desc, true); in synchronize_irq()
137 wait_event(desc->wait_for_threads, in synchronize_irq()
138 !atomic_read(&desc->threads_active)); in synchronize_irq()
146 static bool __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
148 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
173 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr() local
175 return __irq_can_set_affinity(desc) && in irq_can_set_affinity_usr()
176 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
181 * @desc: irq descriptor which has affinity changed
185 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
192 for_each_action_of_desc(desc, action) in irq_set_thread_affinity()
223 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
272 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
276 irq_set_thread_affinity(desc); in irq_do_set_affinity()
287 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_pending() local
290 irq_copy_pending(desc, dest); in irq_set_affinity_pending()
319 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_deactivated() local
334 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
344 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
357 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
360 if (desc->affinity_notify) { in irq_set_affinity_locked()
361 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
362 if (!schedule_work(&desc->affinity_notify->work)) { in irq_set_affinity_locked()
364 kref_put(&desc->affinity_notify->kref, in irq_set_affinity_locked()
365 desc->affinity_notify->release); in irq_set_affinity_locked()
391 struct irq_desc *desc; in irq_update_affinity_desc() local
403 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_update_affinity_desc()
404 if (!desc) in irq_update_affinity_desc()
408 if (irqd_is_started(&desc->irq_data)) { in irq_update_affinity_desc()
414 if (irqd_affinity_is_managed(&desc->irq_data)) { in irq_update_affinity_desc()
423 activated = irqd_is_activated(&desc->irq_data); in irq_update_affinity_desc()
425 irq_domain_deactivate_irq(&desc->irq_data); in irq_update_affinity_desc()
428 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); in irq_update_affinity_desc()
429 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); in irq_update_affinity_desc()
432 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); in irq_update_affinity_desc()
436 irq_domain_activate_irq(&desc->irq_data, false); in irq_update_affinity_desc()
439 irq_put_desc_busunlock(desc, flags); in irq_update_affinity_desc()
446 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
450 if (!desc) in __irq_set_affinity()
453 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
454 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
455 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
492 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint() local
494 if (!desc) in irq_set_affinity_hint()
496 desc->affinity_hint = m; in irq_set_affinity_hint()
497 irq_put_desc_unlock(desc, flags); in irq_set_affinity_hint()
509 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
513 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
516 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
517 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
518 irq_get_pending(cpumask, desc); in irq_affinity_notify()
520 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
521 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
544 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
551 if (!desc || desc->istate & IRQS_NMI) in irq_set_affinity_notifier()
561 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
562 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
563 desc->affinity_notify = notify; in irq_set_affinity_notifier()
564 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
582 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
585 int ret, node = irq_desc_get_node(desc); in irq_setup_affinity()
590 if (!__irq_can_set_affinity(desc)) in irq_setup_affinity()
598 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
599 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
600 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
602 set = desc->irq_common_data.affinity; in irq_setup_affinity()
604 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
618 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
624 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
626 return irq_select_affinity(irq_desc_get_irq(desc)); in irq_setup_affinity()
646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
651 if (!desc) in irq_set_vcpu_affinity()
654 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
668 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
674 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
676 if (!desc->depth++) in __disable_irq()
677 irq_disable(desc); in __disable_irq()
683 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
685 if (!desc) in __disable_irq_nosync()
687 __disable_irq(desc); in __disable_irq_nosync()
688 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
769 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
771 switch (desc->depth) { in __enable_irq()
775 irq_desc_get_irq(desc)); in __enable_irq()
778 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
781 irq_settings_set_noprobe(desc); in __enable_irq()
789 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); in __enable_irq()
793 desc->depth--; in __enable_irq()
806 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
811 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
813 if (!desc) in enable_irq()
815 if (WARN(!desc->irq_data.chip, in enable_irq()
819 __enable_irq(desc); in enable_irq()
821 irq_put_desc_busunlock(desc, flags); in enable_irq()
841 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
844 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
847 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
848 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
875 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
878 if (!desc) in irq_set_irq_wake()
882 if (desc->istate & IRQS_NMI) { in irq_set_irq_wake()
891 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
894 desc->wake_depth = 0; in irq_set_irq_wake()
896 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
899 if (desc->wake_depth == 0) { in irq_set_irq_wake()
901 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
904 desc->wake_depth = 1; in irq_set_irq_wake()
906 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
911 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
924 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
927 if (!desc) in can_request_irq()
930 if (irq_settings_can_request(desc)) { in can_request_irq()
931 if (!desc->action || in can_request_irq()
932 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
935 irq_put_desc_unlock(desc, flags); in can_request_irq()
939 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
941 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
950 irq_desc_get_irq(desc), in __irq_set_trigger()
956 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
957 mask_irq(desc); in __irq_set_trigger()
958 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
964 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
969 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
970 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
974 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
975 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
976 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
977 irq_settings_clr_level(desc); in __irq_set_trigger()
979 irq_settings_set_level(desc); in __irq_set_trigger()
980 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
987 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
990 unmask_irq(desc); in __irq_set_trigger()
998 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
1000 if (!desc) in irq_set_parent()
1003 desc->parent_irq = parent_irq; in irq_set_parent()
1005 irq_put_desc_unlock(desc, flags); in irq_set_parent()
1067 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
1070 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
1074 chip_bus_lock(desc); in irq_finalize_oneshot()
1075 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
1087 * versus "desc->threads_oneshot |= action->thread_mask;" in in irq_finalize_oneshot()
1091 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1092 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1093 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1106 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
1108 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1109 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1110 unmask_threaded_irq(desc); in irq_finalize_oneshot()
1113 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1114 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1122 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
1139 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
1144 if (cpumask_available(desc->irq_common_data.affinity)) { in irq_thread_check_affinity()
1147 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1152 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
1160 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
1170 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
1179 atomic_inc(&desc->threads_handled); in irq_forced_thread_fn()
1181 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
1193 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
1200 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1202 irq_finalize_oneshot(desc, action); in irq_thread_fn()
1206 static void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
1208 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1209 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1215 struct irq_desc *desc; in irq_thread_dtor() local
1227 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1230 * desc->threads_active and wake possible waiters. in irq_thread_dtor()
1233 wake_threads_waitq(desc); in irq_thread_dtor()
1235 /* Prevent a stale desc->threads_oneshot */ in irq_thread_dtor()
1236 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
1239 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
1246 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
1247 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
1248 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
1258 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
1259 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
1271 irq_thread_check_affinity(desc, action); in irq_thread()
1276 irq_thread_check_affinity(desc, action); in irq_thread()
1278 action_ret = handler_fn(desc, action); in irq_thread()
1280 irq_wake_secondary(desc, action); in irq_thread()
1282 wake_threads_waitq(desc); in irq_thread()
1303 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
1307 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
1310 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
1311 for_each_action_of_desc(desc, action) { in irq_wake_thread()
1314 __irq_wake_thread(desc, action); in irq_wake_thread()
1318 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1361 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1363 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1369 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1371 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1378 static bool irq_supports_nmi(struct irq_desc *desc) in irq_supports_nmi() argument
1380 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1394 static int irq_nmi_setup(struct irq_desc *desc) in irq_nmi_setup() argument
1396 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1402 static void irq_nmi_teardown(struct irq_desc *desc) in irq_nmi_teardown() argument
1404 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1454 * desc->request_mutex Provides serialization against a concurrent free_irq()
1456 * desc->lock Provides serialization against hard interrupts
1458 * chip_bus_lock and desc->lock are sufficient for all other management and
1459 * interrupt related functions. desc->request_mutex solely serializes
1463 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1469 if (!desc) in __setup_irq()
1472 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1474 if (!try_module_get(desc->owner)) in __setup_irq()
1484 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1490 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1503 if (irq_settings_can_thread(desc)) { in __setup_irq()
1535 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1541 * chip bus lock and desc->lock. Also protects against handing out in __setup_irq()
1545 mutex_lock(&desc->request_mutex); in __setup_irq()
1552 chip_bus_lock(desc); in __setup_irq()
1555 if (!desc->action) { in __setup_irq()
1556 ret = irq_request_resources(desc); in __setup_irq()
1559 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1568 * desc->request_mutex or the optional bus lock. in __setup_irq()
1570 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1571 old_ptr = &desc->action; in __setup_irq()
1584 if (desc->istate & IRQS_NMI) { in __setup_irq()
1586 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1595 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1596 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1599 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1642 * desc->thread_active to indicate that the in __setup_irq()
1646 * line have completed desc->threads_active becomes in __setup_irq()
1651 * interrupt handlers, then desc->threads_active is in __setup_irq()
1663 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1686 init_waitqueue_head(&desc->wait_for_threads); in __setup_irq()
1690 ret = __irq_set_trigger(desc, in __setup_irq()
1708 ret = irq_activate(desc); in __setup_irq()
1712 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1714 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1717 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1718 irq_settings_set_per_cpu(desc); in __setup_irq()
1720 irq_settings_set_no_debug(desc); in __setup_irq()
1724 irq_settings_set_no_debug(desc); in __setup_irq()
1727 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1731 irq_settings_set_no_balancing(desc); in __setup_irq()
1732 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1736 irq_settings_can_autoenable(desc)) { in __setup_irq()
1737 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); in __setup_irq()
1747 desc->depth = 1; in __setup_irq()
1752 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1762 irq_pm_install_action(desc, new); in __setup_irq()
1765 desc->irq_count = 0; in __setup_irq()
1766 desc->irqs_unhandled = 0; in __setup_irq()
1772 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1773 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1774 __enable_irq(desc); in __setup_irq()
1777 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1778 chip_bus_sync_unlock(desc); in __setup_irq()
1779 mutex_unlock(&desc->request_mutex); in __setup_irq()
1781 irq_setup_timings(desc, new); in __setup_irq()
1792 register_irq_proc(irq, desc); in __setup_irq()
1808 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1810 if (!desc->action) in __setup_irq()
1811 irq_release_resources(desc); in __setup_irq()
1813 chip_bus_sync_unlock(desc); in __setup_irq()
1814 mutex_unlock(&desc->request_mutex); in __setup_irq()
1832 module_put(desc->owner); in __setup_irq()
1840 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) in __free_irq() argument
1842 unsigned irq = desc->irq_data.irq; in __free_irq()
1848 mutex_lock(&desc->request_mutex); in __free_irq()
1849 chip_bus_lock(desc); in __free_irq()
1850 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1856 action_ptr = &desc->action; in __free_irq()
1862 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1863 chip_bus_sync_unlock(desc); in __free_irq()
1864 mutex_unlock(&desc->request_mutex); in __free_irq()
1876 irq_pm_remove_action(desc, action); in __free_irq()
1879 if (!desc->action) { in __free_irq()
1880 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1882 irq_shutdown(desc); in __free_irq()
1887 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1888 desc->affinity_hint = NULL; in __free_irq()
1891 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1902 * The still held desc->request_mutex() protects against a in __free_irq()
1906 chip_bus_sync_unlock(desc); in __free_irq()
1915 __synchronize_hardirq(desc, true); in __free_irq()
1949 if (!desc->action) { in __free_irq()
1954 chip_bus_lock(desc); in __free_irq()
1959 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1960 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1961 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1963 irq_release_resources(desc); in __free_irq()
1964 chip_bus_sync_unlock(desc); in __free_irq()
1965 irq_remove_timings(desc); in __free_irq()
1968 mutex_unlock(&desc->request_mutex); in __free_irq()
1970 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1971 module_put(desc->owner); in __free_irq()
1994 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
1998 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
2002 if (WARN_ON(desc->affinity_notify)) in free_irq()
2003 desc->affinity_notify = NULL; in free_irq()
2006 action = __free_irq(desc, dev_id); in free_irq()
2017 /* This function must be called with desc->lock held */
2018 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
2022 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
2024 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
2025 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
2026 devname = desc->action->name; in __cleanup_nmi()
2027 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
2029 kfree(desc->action); in __cleanup_nmi()
2030 desc->action = NULL; in __cleanup_nmi()
2033 irq_settings_clr_disable_unlazy(desc); in __cleanup_nmi()
2034 irq_shutdown_and_deactivate(desc); in __cleanup_nmi()
2036 irq_release_resources(desc); in __cleanup_nmi()
2038 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2039 module_put(desc->owner); in __cleanup_nmi()
2046 struct irq_desc *desc = irq_to_desc(irq); in free_nmi() local
2050 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) in free_nmi()
2053 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_nmi()
2057 if (WARN_ON(desc->depth == 0)) in free_nmi()
2060 raw_spin_lock_irqsave(&desc->lock, flags); in free_nmi()
2062 irq_nmi_teardown(desc); in free_nmi()
2063 devname = __cleanup_nmi(irq, desc); in free_nmi()
2065 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_nmi()
2117 struct irq_desc *desc; in request_threaded_irq() local
2142 desc = irq_to_desc(irq); in request_threaded_irq()
2143 if (!desc) in request_threaded_irq()
2146 if (!irq_settings_can_request(desc) || in request_threaded_irq()
2147 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
2166 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2172 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2175 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2223 struct irq_desc *desc; in request_any_context_irq() local
2229 desc = irq_to_desc(irq); in request_any_context_irq()
2230 if (!desc) in request_any_context_irq()
2233 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
2274 struct irq_desc *desc; in request_nmi() local
2291 desc = irq_to_desc(irq); in request_nmi()
2293 if (!desc || (irq_settings_can_autoenable(desc) && in request_nmi()
2295 !irq_settings_can_request(desc) || in request_nmi()
2296 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || in request_nmi()
2297 !irq_supports_nmi(desc)) in request_nmi()
2309 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2313 retval = __setup_irq(irq, desc, action); in request_nmi()
2317 raw_spin_lock_irqsave(&desc->lock, flags); in request_nmi()
2320 desc->istate |= IRQS_NMI; in request_nmi()
2321 retval = irq_nmi_setup(desc); in request_nmi()
2323 __cleanup_nmi(irq, desc); in request_nmi()
2324 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2328 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2333 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2344 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
2346 if (!desc) in enable_percpu_irq()
2355 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2360 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
2368 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
2370 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
2389 struct irq_desc *desc; in irq_percpu_is_enabled() local
2393 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2394 if (!desc) in irq_percpu_is_enabled()
2397 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in irq_percpu_is_enabled()
2398 irq_put_desc_unlock(desc, flags); in irq_percpu_is_enabled()
2408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
2410 if (!desc) in disable_percpu_irq()
2413 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
2414 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
2428 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
2434 if (!desc) in __free_percpu_irq()
2437 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
2439 action = desc->action; in __free_percpu_irq()
2445 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
2447 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2452 desc->action = NULL; in __free_percpu_irq()
2454 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2456 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2460 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2461 module_put(desc->owner); in __free_percpu_irq()
2465 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2478 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
2480 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
2498 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
2500 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
2503 chip_bus_lock(desc); in free_percpu_irq()
2505 chip_bus_sync_unlock(desc); in free_percpu_irq()
2511 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi() local
2513 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_nmi()
2516 if (WARN_ON(!(desc->istate & IRQS_NMI))) in free_percpu_nmi()
2531 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
2534 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
2537 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2541 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2544 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2571 struct irq_desc *desc; in __request_percpu_irq() local
2577 desc = irq_to_desc(irq); in __request_percpu_irq()
2578 if (!desc || !irq_settings_can_request(desc) || in __request_percpu_irq()
2579 !irq_settings_is_per_cpu_devid(desc)) in __request_percpu_irq()
2594 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2600 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2603 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2636 struct irq_desc *desc; in request_percpu_nmi() local
2643 desc = irq_to_desc(irq); in request_percpu_nmi()
2645 if (!desc || !irq_settings_can_request(desc) || in request_percpu_nmi()
2646 !irq_settings_is_per_cpu_devid(desc) || in request_percpu_nmi()
2647 irq_settings_can_autoenable(desc) || in request_percpu_nmi()
2648 !irq_supports_nmi(desc)) in request_percpu_nmi()
2652 if (desc->istate & IRQS_NMI) in request_percpu_nmi()
2665 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2669 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2673 raw_spin_lock_irqsave(&desc->lock, flags); in request_percpu_nmi()
2674 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2675 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_percpu_nmi()
2680 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2703 struct irq_desc *desc; in prepare_percpu_nmi() local
2708 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2710 if (!desc) in prepare_percpu_nmi()
2713 if (WARN(!(desc->istate & IRQS_NMI), in prepare_percpu_nmi()
2720 ret = irq_nmi_setup(desc); in prepare_percpu_nmi()
2727 irq_put_desc_unlock(desc, flags); in prepare_percpu_nmi()
2746 struct irq_desc *desc; in teardown_percpu_nmi() local
2750 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2752 if (!desc) in teardown_percpu_nmi()
2755 if (WARN_ON(!(desc->istate & IRQS_NMI))) in teardown_percpu_nmi()
2758 irq_nmi_teardown(desc); in teardown_percpu_nmi()
2760 irq_put_desc_unlock(desc, flags); in teardown_percpu_nmi()
2803 struct irq_desc *desc; in irq_get_irqchip_state() local
2808 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2809 if (!desc) in irq_get_irqchip_state()
2812 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
2816 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
2836 struct irq_desc *desc; in irq_set_irqchip_state() local
2842 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
2843 if (!desc) in irq_set_irqchip_state()
2846 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2867 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()
2898 struct irq_desc *desc; in irq_check_status_bit() local
2902 desc = irq_to_desc(irq); in irq_check_status_bit()
2903 if (desc) in irq_check_status_bit()
2904 res = !!(desc->status_use_accessors & bitmask); in irq_check_status_bit()