bool active; /* not used for LPIs */
bool enabled;
bool hw; /* Tied to HW IRQ */
+ struct kref refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */
union {
u8 targets; /* GICv2 target VCPUs mask */
spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
+ kref_init(&irq->refcount);
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
irq->targets = 0;
else
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
irq->targets = 1U << vcpu->vcpu_id;
+ kref_init(&irq->refcount);
if (vgic_irq_is_sgi(i)) {
/* SGIs */
irq->enabled = 1;
irq->source |= 1U << source_vcpu->vcpu_id;
vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+ vgic_put_irq(source_vcpu->kvm, irq);
}
}
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->targets << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->source << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
}
irq->pending = false;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
} else {
spin_unlock(&irq->irq_lock);
}
+ vgic_put_irq(vcpu->kvm, irq);
}
}
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ unsigned long ret = 0;
if (!irq)
return 0;
/* The upper word is RAZ for us. */
- if (addr & 4)
- return 0;
+ if (!(addr & 4))
+ ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
- return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
+ vgic_put_irq(vcpu->kvm, irq);
+ return ret;
}
static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
unsigned long val)
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
-
- if (!irq)
- return;
+ struct vgic_irq *irq;
/* The upper word is WI for us since we don't implement Aff3. */
if (addr & 4)
return;
+ irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+ if (!irq)
+ return;
+
spin_lock(&irq->irq_lock);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
irq->pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
if (irq->enabled)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
spin_lock(&irq->irq_lock);
irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
}
irq->enabled = false;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
if (irq->pending)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
irq->soft_pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
if (irq->active)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false);
+ vgic_put_irq(vcpu->kvm, irq);
}
vgic_change_active_finish(vcpu, intid);
}
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true);
+ vgic_put_irq(vcpu->kvm, irq);
}
vgic_change_active_finish(vcpu, intid);
}
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->priority << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
spin_unlock(&irq->irq_lock);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
}
if (irq->config == VGIC_CONFIG_EDGE)
value |= (2U << (i * 2));
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
int i;
for (i = 0; i < len * 4; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq;
/*
* The configuration cannot be changed for SGIs in general,
if (intid + i < VGIC_NR_PRIVATE_IRQS)
continue;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock(&irq->irq_lock);
+
if (test_bit(i * 2 + 1, &val)) {
irq->config = VGIC_CONFIG_EDGE;
} else {
irq->config = VGIC_CONFIG_LEVEL;
irq->pending = irq->line_level | irq->soft_pending;
}
+
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
return NULL;
}
+static void vgic_get_irq_kref(struct vgic_irq *irq)
+{
+ if (irq->intid < VGIC_MIN_LPI)
+ return;
+
+ kref_get(&irq->refcount);
+}
+
+/* The refcount should never drop to 0 at the moment. */
+static void vgic_irq_release(struct kref *ref)
+{
+ WARN_ON(1);
+}
+
+void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+{
+ if (irq->intid < VGIC_MIN_LPI)
+ return;
+
+ kref_put(&irq->refcount, vgic_irq_release);
+}
+
/**
* kvm_vgic_target_oracle - compute the target vcpu for an irq
*
goto retry;
}
+ /*
+ * Grab a reference to the irq to reflect the fact that it is
+ * now in the ap_list.
+ */
+ vgic_get_irq_kref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
if (!irq)
return -EINVAL;
- if (irq->hw != mapped_irq)
+ if (irq->hw != mapped_irq) {
+ vgic_put_irq(kvm, irq);
return -EINVAL;
+ }
spin_lock(&irq->irq_lock);
if (!vgic_validate_injection(irq, level)) {
/* Nothing to see here, move along... */
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(kvm, irq);
return 0;
}
}
vgic_queue_irq_unlock(kvm, irq);
+ vgic_put_irq(kvm, irq);
return 0;
}
irq->hwintid = phys_irq;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return 0;
}
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
-
- BUG_ON(!irq);
+ struct vgic_irq *irq;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ BUG_ON(!irq);
+
spin_lock(&irq->irq_lock);
irq->hw = false;
irq->hwintid = 0;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return 0;
}
list_del(&irq->ap_list);
irq->vcpu = NULL;
spin_unlock(&irq->irq_lock);
+
+ /*
+ * This vgic_put_irq call matches the
+ * vgic_get_irq_kref in vgic_queue_irq_unlock,
+ * where we added the LPI to the ap_list. As
+ * we remove the irq from the list, we drop
+ * also drop the refcount.
+ */
+ vgic_put_irq(vcpu->kvm, irq);
continue;
}
spin_lock(&irq->irq_lock);
map_is_active = irq->hw && irq->active;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return map_is_active;
}
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
+void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
void vgic_kick_vcpus(struct kvm *kvm);