KVM: x86: use guest_exit_irqoff
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jun 2016 13:23:11 +0000 (15:23 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Jul 2016 09:03:38 +0000 (11:03 +0200)
This gains a few clock cycles per vmexit.  On Intel there is no need
anymore to enable the interrupts in vmx_handle_external_intr, since
we are using the "acknowledge interrupt on exit" feature.  AMD
needs to do that, and must be careful to avoid the interrupt shadow.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 5ff2927..5bfdbbf 100644 (file)
@@ -4935,6 +4935,12 @@ out:
 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
 {
        local_irq_enable();
+       /*
+        * We must have an instruction with interrupts enabled, so
+        * the timer interrupt isn't delayed by the interrupt shadow.
+        */
+       asm("nop");
+       local_irq_disable();
 }
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
index 1b413a5..c1d655c 100644 (file)
@@ -8574,7 +8574,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
                        "push %[sp]\n\t"
 #endif
                        "pushf\n\t"
-                       "orl $0x200, (%%" _ASM_SP ")\n\t"
                        __ASM_SIZE(push) " $%c[cs]\n\t"
                        "call *%[entry]\n\t"
                        :
@@ -8587,8 +8586,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
                        [ss]"i"(__KERNEL_DS),
                        [cs]"i"(__KERNEL_CS)
                        );
-       } else
-               local_irq_enable();
+       }
 }
 
 static bool vmx_has_high_real_mode_segbase(void)
index 618463a..0cc6cf8 100644 (file)
@@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        ++vcpu->stat.exits;
 
-       /*
-        * We must have an instruction between local_irq_enable() and
-        * kvm_guest_exit(), so the timer interrupt isn't delayed by
-        * the interrupt shadow.  The stat.exits increment will do nicely.
-        * But we need to prevent reordering, hence this barrier():
-        */
-       barrier();
-
-       guest_exit();
+       guest_exit_irqoff();
 
+       local_irq_enable();
        preempt_enable();
 
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);