Lines Matching refs:irq

61 	struct vgic_irq *irq = NULL;  in vgic_get_lpi()  local
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_get_lpi()
67 if (irq->intid != intid) in vgic_get_lpi()
74 vgic_get_irq_kref(irq); in vgic_get_lpi()
77 irq = NULL; in vgic_get_lpi()
82 return irq; in vgic_get_lpi()
124 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_lpi_locked() argument
128 if (!kref_put(&irq->refcount, vgic_irq_release)) in __vgic_put_lpi_locked()
131 list_del(&irq->lpi_list); in __vgic_put_lpi_locked()
134 kfree(irq); in __vgic_put_lpi_locked()
137 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
142 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
146 __vgic_put_lpi_locked(kvm, irq); in vgic_put_irq()
153 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
158 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
159 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
160 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
161 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
162 irq->vcpu = NULL; in vgic_flush_pending_lpis()
163 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
164 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
171 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
173 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
178 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
182 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
184 if (irq->ops && irq->ops->get_input_level) in vgic_get_phys_line_level()
185 return irq->ops->get_input_level(irq->intid); in vgic_get_phys_line_level()
187 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
194 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
197 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
198 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
214 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
216 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
219 if (irq->active) in vgic_target_oracle()
220 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
228 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
229 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
230 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
233 return irq->target_vcpu; in vgic_target_oracle()
311 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
313 if (irq->owner != owner) in vgic_validate_injection()
316 switch (irq->config) { in vgic_validate_injection()
318 return irq->line_level != level; in vgic_validate_injection()
334 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
339 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
342 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
343 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
353 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
375 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
380 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
394 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
395 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
399 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
407 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
408 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
409 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
411 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
441 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
455 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
456 if (!irq) in kvm_vgic_inject_irq()
459 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
461 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
463 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
464 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
468 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
469 irq->line_level = level; in kvm_vgic_inject_irq()
471 irq->pending_latch = true; in kvm_vgic_inject_irq()
473 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
474 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
480 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
499 irq->hw = true; in kvm_vgic_map_irq()
500 irq->host_irq = host_irq; in kvm_vgic_map_irq()
501 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
502 irq->ops = ops; in kvm_vgic_map_irq()
507 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
509 irq->hw = false; in kvm_vgic_unmap_irq()
510 irq->hwintid = 0; in kvm_vgic_unmap_irq()
511 irq->ops = NULL; in kvm_vgic_unmap_irq()
517 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
521 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
523 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
524 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); in kvm_vgic_map_phys_irq()
525 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
526 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
542 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
545 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
548 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
549 irq->active = false; in kvm_vgic_reset_mapped_irq()
550 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
551 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
552 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
554 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
559 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
565 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
566 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
568 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
569 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
570 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
571 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
588 struct vgic_irq *irq; in kvm_vgic_set_owner() local
599 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
600 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
601 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
604 irq->owner = owner; in kvm_vgic_set_owner()
605 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
621 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
628 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
632 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
634 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
636 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
643 list_del(&irq->ap_list); in vgic_prune_ap_list()
644 irq->vcpu = NULL; in vgic_prune_ap_list()
645 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
654 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
660 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
666 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
684 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
695 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
698 list_del(&irq->ap_list); in vgic_prune_ap_list()
699 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
700 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
704 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
729 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
731 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
734 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
736 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
760 struct vgic_irq *irq; in compute_ap_list_depth() local
767 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
770 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
772 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
773 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
785 struct vgic_irq *irq; in vgic_flush_lr_state() local
799 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
800 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
809 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
810 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
814 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
815 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
817 if (irq->source) in vgic_flush_lr_state()
818 prio = irq->priority; in vgic_flush_lr_state()
821 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
824 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
959 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
974 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
975 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
976 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
977 !irq->active && in kvm_vgic_vcpu_pending_irq()
978 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
979 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1009 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1016 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
1017 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1018 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1019 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1020 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()
1045 void vgic_irq_handle_resampling(struct vgic_irq *irq, in vgic_irq_handle_resampling() argument
1048 if (vgic_irq_is_mapped_level(irq)) { in vgic_irq_handle_resampling()
1051 if (unlikely(vgic_irq_needs_resampling(irq))) { in vgic_irq_handle_resampling()
1052 resample = !(irq->active || irq->pending_latch); in vgic_irq_handle_resampling()
1053 } else if (lr_pending || (lr_deactivated && irq->line_level)) { in vgic_irq_handle_resampling()
1054 irq->line_level = vgic_get_phys_line_level(irq); in vgic_irq_handle_resampling()
1055 resample = !irq->line_level; in vgic_irq_handle_resampling()
1059 vgic_irq_set_phys_active(irq, false); in vgic_irq_handle_resampling()