KVM: Add kvm_inject_realmode_interrupt() wrapper
[cascardo/linux.git] / arch / x86 / kvm / x86.c
index 4196fc7..7d28805 100644 (file)
@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
        u32 prev_nr;
        int class1, class2;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        if (!vcpu->arch.exception.pending) {
        queue:
                vcpu->arch.exception.pending = true;
@@ -338,8 +340,19 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
        kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
 }
 
+void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+{
+       if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
+               vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+       else
+               vcpu->arch.mmu.inject_page_fault(vcpu);
+
+       vcpu->arch.fault.nested = false;
+}
+
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        vcpu->arch.nmi_pending = 1;
 }
 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -369,19 +382,50 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
 }
 EXPORT_SYMBOL_GPL(kvm_require_cpl);
 
+/*
+ * This function will be used to read from the physical memory of the currently
+ * running guest. The difference to kvm_read_guest_page is that this function
+ * can read from guest physical or from the guest's guest physical memory.
+ */
+int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                           gfn_t ngfn, void *data, int offset, int len,
+                           u32 access)
+{
+       gfn_t real_gfn;
+       gpa_t ngpa;
+
+       ngpa     = gfn_to_gpa(ngfn);
+       real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
+       if (real_gfn == UNMAPPED_GVA)
+               return -EFAULT;
+
+       real_gfn = gpa_to_gfn(real_gfn);
+
+       return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
+}
+EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
+
+int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+                              void *data, int offset, int len, u32 access)
+{
+       return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
+                                      data, offset, len, access);
+}
+
 /*
  * Load the pae pdptrs.  Return true is they are all valid.
  */
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 {
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        int ret;
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
 
-       ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
-                                 offset * sizeof(u64), sizeof(pdpte));
+       ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
+                                     offset * sizeof(u64), sizeof(pdpte),
+                                     PFERR_USER_MASK|PFERR_WRITE_MASK);
        if (ret < 0) {
                ret = 0;
                goto out;
@@ -395,7 +439,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
        }
        ret = 1;
 
-       memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
+       memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
        __set_bit(VCPU_EXREG_PDPTR,
                  (unsigned long *)&vcpu->arch.regs_avail);
        __set_bit(VCPU_EXREG_PDPTR,
@@ -408,8 +452,10 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
 
 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
        bool changed = true;
+       int offset;
+       gfn_t gfn;
        int r;
 
        if (is_long_mode(vcpu) || !is_pae(vcpu))
@@ -419,10 +465,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
                      (unsigned long *)&vcpu->arch.regs_avail))
                return true;
 
-       r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
+       gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
+       offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+       r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
+                                      PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
                goto out;
-       changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
+       changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
 out:
 
        return changed;
@@ -461,7 +510,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                                return 1;
                } else
 #endif
-               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                                vcpu->arch.cr3))
                        return 1;
        }
 
@@ -550,7 +600,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
                return 1;
 
        if (cr4 & X86_CR4_VMXE)
@@ -583,7 +633,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS)
                                return 1;
-                       if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
+                       if (is_paging(vcpu) &&
+                           !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
                                return 1;
                }
                /*
@@ -1044,6 +1095,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_kernel_ns = kernel_ns;
+       vcpu->last_guest_tsc = tsc_timestamp;
        vcpu->hv_clock.flags = 0;
 
        /*
@@ -2155,7 +2207,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
        /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
-               F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
                F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
                0 /* SKINIT */ | 0 /* WDT */;
@@ -2364,6 +2416,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                return -ENXIO;
 
        kvm_queue_interrupt(vcpu, irq->irq, false);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        return 0;
 }
@@ -2517,6 +2570,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
                vcpu->arch.sipi_vector = events->sipi_vector;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -2956,18 +3011,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[0],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[1],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_IOAPIC:
                r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -3453,6 +3508,22 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
        return gpa;
 }
 
+static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+{
+       gpa_t t_gpa;
+       u32 error;
+
+       BUG_ON(!mmu_is_nested(vcpu));
+
+       /* NPT walks are always user-walks */
+       access |= PFERR_USER_MASK;
+       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
+       if (t_gpa == UNMAPPED_GVA)
+               vcpu->arch.fault.nested = true;
+
+       return t_gpa;
+}
+
 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
@@ -4088,7 +4159,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
        if (ctxt->exception == PF_VECTOR)
-               kvm_inject_page_fault(vcpu);
+               kvm_propagate_fault(vcpu);
        else if (ctxt->error_code_valid)
                kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
        else
@@ -4117,6 +4188,35 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
 }
 
+int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
+{
+       struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+       int ret;
+
+       init_emulate_ctxt(vcpu);
+
+       vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
+       ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
+
+       if (ret != X86EMUL_CONTINUE)
+               return EMULATE_FAIL;
+
+       vcpu->arch.emulate_ctxt.eip = c->eip;
+       memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+       kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+
+       if (irq == NMI_VECTOR)
+               vcpu->arch.nmi_pending = false;
+       else
+               vcpu->arch.interrupt.pending = false;
+
+       return EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.insn_emulation_fail;
@@ -4179,6 +4279,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.perm_ok = false;
 
                r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+               if (r == X86EMUL_PROPAGATE_FAULT)
+                       goto done;
+
                trace_kvm_emulate_insn_start(vcpu);
 
                /* Only allow emulation of specific instructions on #UD
@@ -4237,6 +4340,7 @@ restart:
                return handle_emulation_failure(vcpu);
        }
 
+done:
        if (vcpu->arch.emulate_ctxt.exception >= 0) {
                inject_emulated_exception(vcpu);
                r = EMULATE_DONE;
@@ -4255,6 +4359,7 @@ restart:
 
        toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
        kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
 
@@ -4959,6 +5064,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (unlikely(r))
                goto out;
 
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               inject_pending_event(vcpu);
+
+               /* enable NMI/IRQ window open exits if needed */
+               if (vcpu->arch.nmi_pending)
+                       kvm_x86_ops->enable_nmi_window(vcpu);
+               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+                       kvm_x86_ops->enable_irq_window(vcpu);
+
+               if (kvm_lapic_enabled(vcpu)) {
+                       update_cr8_intercept(vcpu);
+                       kvm_lapic_sync_to_vapic(vcpu);
+               }
+       }
+
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -4977,23 +5097,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
+               kvm_x86_ops->cancel_injection(vcpu);
                r = 1;
                goto out;
        }
 
-       inject_pending_event(vcpu);
-
-       /* enable NMI/IRQ window open exits if needed */
-       if (vcpu->arch.nmi_pending)
-               kvm_x86_ops->enable_nmi_window(vcpu);
-       else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-               kvm_x86_ops->enable_irq_window(vcpu);
-
-       if (kvm_lapic_enabled(vcpu)) {
-               update_cr8_intercept(vcpu);
-               kvm_lapic_sync_to_vapic(vcpu);
-       }
-
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        kvm_guest_enter();
@@ -5231,6 +5339,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        vcpu->arch.exception.pending = false;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -5294,6 +5404,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
        vcpu->arch.mp_state = mp_state->mp_state;
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
 }
 
@@ -5315,6 +5426,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
        memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
        kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5350,7 +5462,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
                mmu_reset_needed = 1;
        }
 
@@ -5385,6 +5497,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
            !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -5617,6 +5731,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.dr6 = DR6_FIXED_1;
        vcpu->arch.dr7 = DR7_FIXED_1;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
 
@@ -5668,6 +5784,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        vcpu->arch.walk_mmu = &vcpu->arch.mmu;
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        vcpu->arch.mmu.translate_gpa = translate_gpa;
+       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
        if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
@@ -5926,6 +6043,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
            kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
                rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);