Lines Matching refs:irq

72 	struct vgic_irq *irq = NULL;  in vgic_get_lpi()  local
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_get_lpi()
78 if (irq->intid != intid) in vgic_get_lpi()
85 vgic_get_irq_kref(irq); in vgic_get_lpi()
88 irq = NULL; in vgic_get_lpi()
93 return irq; in vgic_get_lpi()
133 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
138 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { in vgic_put_irq()
147 list_del(&irq->lpi_list); in vgic_put_irq()
151 kfree(irq); in vgic_put_irq()
154 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
156 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
161 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
165 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
167 if (irq->get_input_level) in vgic_get_phys_line_level()
168 return irq->get_input_level(irq->intid); in vgic_get_phys_line_level()
170 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
177 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
180 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
181 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
197 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
199 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); in vgic_target_oracle()
202 if (irq->active) in vgic_target_oracle()
203 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
211 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
212 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
213 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
216 return irq->target_vcpu; in vgic_target_oracle()
293 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
295 if (irq->owner != owner) in vgic_validate_injection()
298 switch (irq->config) { in vgic_validate_injection()
300 return irq->line_level != level; in vgic_validate_injection()
316 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
321 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); in vgic_queue_irq_unlock()
324 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
325 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
335 spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
357 spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
362 spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
376 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
377 spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
380 spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
388 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
389 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
390 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
392 spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
422 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
436 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
437 if (!irq) in kvm_vgic_inject_irq()
440 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
442 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
444 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
445 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
449 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
450 irq->line_level = level; in kvm_vgic_inject_irq()
452 irq->pending_latch = true; in kvm_vgic_inject_irq()
454 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
455 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
461 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
480 irq->hw = true; in kvm_vgic_map_irq()
481 irq->host_irq = host_irq; in kvm_vgic_map_irq()
482 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
483 irq->get_input_level = get_input_level; in kvm_vgic_map_irq()
488 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
490 irq->hw = false; in kvm_vgic_unmap_irq()
491 irq->hwintid = 0; in kvm_vgic_unmap_irq()
492 irq->get_input_level = NULL; in kvm_vgic_unmap_irq()
498 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
502 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
504 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
505 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); in kvm_vgic_map_phys_irq()
506 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
507 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
523 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
526 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
529 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
530 irq->active = false; in kvm_vgic_reset_mapped_irq()
531 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
532 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
533 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
535 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
540 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
546 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
547 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
549 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
550 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
551 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
552 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
569 struct vgic_irq *irq; in kvm_vgic_set_owner() local
580 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
581 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
582 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
585 irq->owner = owner; in kvm_vgic_set_owner()
586 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
602 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
609 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
613 spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
615 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
617 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
624 list_del(&irq->ap_list); in vgic_prune_ap_list()
625 irq->vcpu = NULL; in vgic_prune_ap_list()
626 spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
635 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
641 spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
647 spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
665 spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
676 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
679 list_del(&irq->ap_list); in vgic_prune_ap_list()
680 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
681 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
685 spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
710 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
712 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); in vgic_populate_lr()
715 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
717 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
741 struct vgic_irq *irq; in compute_ap_list_depth() local
748 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
751 spin_lock(&irq->irq_lock); in compute_ap_list_depth()
753 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
754 spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
766 struct vgic_irq *irq; in vgic_flush_lr_state() local
779 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
780 spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
789 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
790 spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
794 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
795 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
797 if (irq->source) in vgic_flush_lr_state()
798 prio = irq->priority; in vgic_flush_lr_state()
801 spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
804 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
926 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
938 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
939 spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
940 pending = irq_is_pending(irq) && irq->enabled; in kvm_vgic_vcpu_pending_irq()
941 spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
971 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
978 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
979 spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
980 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
981 spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
982 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()