Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / virt / kvm / arm / vgic / vgic.c
index 69b61ab..39f3358 100644 (file)
@@ -33,10 +33,17 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
 
 /*
  * Locking order is always:
- *   vgic_cpu->ap_list_lock
- *     vgic_irq->irq_lock
+ * its->cmd_lock (mutex)
+ *   its->its_lock (mutex)
+ *     vgic_cpu->ap_list_lock
+ *       kvm->lpi_list_lock
+ *         vgic_irq->irq_lock
  *
- * (that is, always take the ap_list_lock before the struct vgic_irq lock).
+ * If you need to take multiple locks, always take the upper lock first,
+ * then the lower ones, e.g. first take the its_lock, then the irq_lock.
+ * If you are already holding a lock and need to take a higher one, you
+ * have to drop the lower ranking lock first and re-aquire it after having
+ * taken the upper one.
  *
  * When taking more than one ap_list_lock at the same time, always take the
  * lowest numbered VCPU's ap_list_lock first, so:
@@ -45,6 +52,41 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
  *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
  */
 
+/*
+ * Iterate over the VM's list of mapped LPIs to find the one with a
+ * matching interrupt ID and return a reference to the IRQ structure.
+ */
+static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct vgic_irq *irq = NULL;
+
+       spin_lock(&dist->lpi_list_lock);
+
+       list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+               if (irq->intid != intid)
+                       continue;
+
+               /*
+                * This increases the refcount, the caller is expected to
+                * call vgic_put_irq() later once it's finished with the IRQ.
+                */
+               vgic_get_irq_kref(irq);
+               goto out_unlock;
+       }
+       irq = NULL;
+
+out_unlock:
+       spin_unlock(&dist->lpi_list_lock);
+
+       return irq;
+}
+
+/*
+ * This looks up the virtual interrupt ID to get the corresponding
+ * struct vgic_irq. It also increases the refcount, so any caller is expected
+ * to call vgic_put_irq() once it's finished with this IRQ.
+ */
 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
                              u32 intid)
 {
@@ -56,14 +98,43 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
        if (intid <= VGIC_MAX_SPI)
                return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
 
-       /* LPIs are not yet covered */
+       /* LPIs */
        if (intid >= VGIC_MIN_LPI)
-               return NULL;
+               return vgic_get_lpi(kvm, intid);
 
        WARN(1, "Looking up struct vgic_irq for reserved INTID");
        return NULL;
 }
 
+/*
+ * We can't do anything in here, because we lack the kvm pointer to
+ * lock and remove the item from the lpi_list. So we keep this function
+ * empty and use the return value of kref_put() to trigger the freeing.
+ */
+static void vgic_irq_release(struct kref *ref)
+{
+}
+
+void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+{
+       struct vgic_dist *dist;
+
+       if (irq->intid < VGIC_MIN_LPI)
+               return;
+
+       if (!kref_put(&irq->refcount, vgic_irq_release))
+               return;
+
+       dist = &kvm->arch.vgic;
+
+       spin_lock(&dist->lpi_list_lock);
+       list_del(&irq->lpi_list);
+       dist->lpi_list_count--;
+       spin_unlock(&dist->lpi_list_lock);
+
+       kfree(irq);
+}
+
 /**
  * kvm_vgic_target_oracle - compute the target vcpu for an irq
  *
@@ -236,6 +307,11 @@ retry:
                goto retry;
        }
 
+       /*
+        * Grab a reference to the irq to reflect the fact that it is
+        * now in the ap_list.
+        */
+       vgic_get_irq_kref(irq);
        list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
        irq->vcpu = vcpu;
 
@@ -269,14 +345,17 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
        if (!irq)
                return -EINVAL;
 
-       if (irq->hw != mapped_irq)
+       if (irq->hw != mapped_irq) {
+               vgic_put_irq(kvm, irq);
                return -EINVAL;
+       }
 
        spin_lock(&irq->irq_lock);
 
        if (!vgic_validate_injection(irq, level)) {
                /* Nothing to see here, move along... */
                spin_unlock(&irq->irq_lock);
+               vgic_put_irq(kvm, irq);
                return 0;
        }
 
@@ -288,6 +367,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
        }
 
        vgic_queue_irq_unlock(kvm, irq);
+       vgic_put_irq(kvm, irq);
 
        return 0;
 }
@@ -330,25 +410,28 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
        irq->hwintid = phys_irq;
 
        spin_unlock(&irq->irq_lock);
+       vgic_put_irq(vcpu->kvm, irq);
 
        return 0;
 }
 
 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
-
-       BUG_ON(!irq);
+       struct vgic_irq *irq;
 
        if (!vgic_initialized(vcpu->kvm))
                return -EAGAIN;
 
+       irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       BUG_ON(!irq);
+
        spin_lock(&irq->irq_lock);
 
        irq->hw = false;
        irq->hwintid = 0;
 
        spin_unlock(&irq->irq_lock);
+       vgic_put_irq(vcpu->kvm, irq);
 
        return 0;
 }
@@ -386,6 +469,15 @@ retry:
                        list_del(&irq->ap_list);
                        irq->vcpu = NULL;
                        spin_unlock(&irq->irq_lock);
+
+                       /*
+                        * This vgic_put_irq call matches the
+                        * vgic_get_irq_kref in vgic_queue_irq_unlock,
+                        * where we added the LPI to the ap_list. As
+                        * we remove the irq from the list, we drop
+                        * also drop the refcount.
+                        */
+                       vgic_put_irq(vcpu->kvm, irq);
                        continue;
                }
 
@@ -614,6 +706,15 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
        spin_lock(&irq->irq_lock);
        map_is_active = irq->hw && irq->active;
        spin_unlock(&irq->irq_lock);
+       vgic_put_irq(vcpu->kvm, irq);
 
        return map_is_active;
 }
+
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+       if (vgic_has_its(kvm))
+               return vgic_its_inject_msi(kvm, msi);
+       else
+               return -ENODEV;
+}