Lines Matching refs:irq_data
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
148 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
176 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
197 static void irq_validate_effective_affinity(struct irq_data *data) in irq_validate_effective_affinity()
210 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity()
236 static inline int irq_set_affinity_pending(struct irq_data *data, in irq_set_affinity_pending()
246 static inline int irq_set_affinity_pending(struct irq_data *data, in irq_set_affinity_pending()
253 static int irq_try_set_affinity(struct irq_data *data, in irq_try_set_affinity()
268 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked()
337 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
415 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
416 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
421 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
435 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
478 struct irq_data *data; in irq_set_vcpu_affinity()
646 if (WARN(!desc->irq_data.chip, in enable_irq()
678 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
679 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
720 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
730 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
765 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
780 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
782 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
788 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
793 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
794 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
798 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
800 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
804 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
915 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
932 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
933 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
971 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1183 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1191 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1200 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1216 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1224 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1296 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1308 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1359 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1383 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1410 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1419 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1420 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1423 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1487 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1538 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1541 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1551 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1570 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1669 retval = irq_chip_pm_get(&desc->irq_data); in setup_irq()
1676 irq_chip_pm_put(&desc->irq_data); in setup_irq()
1688 unsigned irq = desc->irq_data.irq; in __free_irq()
1806 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1816 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1900 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2023 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2032 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2165 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2189 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2211 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2316 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2393 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2400 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2450 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2459 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2521 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2536 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2619 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, in __irq_get_irqchip_state()
2658 struct irq_data *data; in irq_get_irqchip_state()
2691 struct irq_data *data; in irq_set_irqchip_state()