Merge branch 'work.splice_read' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / arm64 / kvm / hyp / switch.c
index 5a84b45..83037cd 100644 (file)
  */
 
 #include <linux/types.h>
+#include <linux/jump_label.h>
+
 #include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 
 static bool __hyp_text __fpsimd_enabled_nvhe(void)
@@ -109,6 +112,15 @@ static hyp_alternate_select(__deactivate_traps_arch,
 
 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
 {
+       /*
+        * If we pended a virtual abort, preserve it until it gets
+        * cleared. See D1.14.3 (Virtual Interrupts) for details, but
+        * the crucial bit is "On taking a vSError interrupt,
+        * HCR_EL2.VSE is cleared to 0."
+        */
+       if (vcpu->arch.hcr_el2 & HCR_VSE)
+               vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+
        __deactivate_traps_arch()();
        write_sysreg(0, hstr_el2);
        write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
@@ -126,17 +138,13 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
        write_sysreg(0, vttbr_el2);
 }
 
-static hyp_alternate_select(__vgic_call_save_state,
-                           __vgic_v2_save_state, __vgic_v3_save_state,
-                           ARM64_HAS_SYSREG_GIC_CPUIF);
-
-static hyp_alternate_select(__vgic_call_restore_state,
-                           __vgic_v2_restore_state, __vgic_v3_restore_state,
-                           ARM64_HAS_SYSREG_GIC_CPUIF);
-
 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
 {
-       __vgic_call_save_state()(vcpu);
+       if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+               __vgic_v3_save_state(vcpu);
+       else
+               __vgic_v2_save_state(vcpu);
+
        write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
 }
 
@@ -149,7 +157,10 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
        val |= vcpu->arch.irq_lines;
        write_sysreg(val, hcr_el2);
 
-       __vgic_call_restore_state()(vcpu);
+       if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+               __vgic_v3_restore_state(vcpu);
+       else
+               __vgic_v2_restore_state(vcpu);
 }
 
 static bool __hyp_text __true_value(void)
@@ -232,7 +243,22 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+{
+       *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+
+       if (vcpu_mode_is_32bit(vcpu)) {
+               vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+               kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
+       } else {
+               *vcpu_pc(vcpu) += 4;
+       }
+
+       write_sysreg_el2(*vcpu_pc(vcpu), elr);
+}
+
+int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpu_context *host_ctxt;
        struct kvm_cpu_context *guest_ctxt;
@@ -267,9 +293,43 @@ again:
        exit_code = __guest_enter(vcpu, host_ctxt);
        /* And we're baaack! */
 
+       /*
+        * We're using the raw exception code in order to only process
+        * the trap if no SError is pending. We will come back to the
+        * same PC once the SError has been injected, and replay the
+        * trapping instruction.
+        */
        if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
                goto again;
 
+       if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+           exit_code == ARM_EXCEPTION_TRAP) {
+               bool valid;
+
+               valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
+                       kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+                       kvm_vcpu_dabt_isvalid(vcpu) &&
+                       !kvm_vcpu_dabt_isextabt(vcpu) &&
+                       !kvm_vcpu_dabt_iss1tw(vcpu);
+
+               if (valid) {
+                       int ret = __vgic_v2_perform_cpuif_access(vcpu);
+
+                       if (ret == 1) {
+                               __skip_instr(vcpu);
+                               goto again;
+                       }
+
+                       if (ret == -1) {
+                               /* Promote an illegal access to an SError */
+                               __skip_instr(vcpu);
+                               exit_code = ARM_EXCEPTION_EL1_SERROR;
+                       }
+
+                       /* 0 falls through to be handler out of EL2 */
+               }
+       }
+
        fp_enabled = __fpsimd_enabled();
 
        __sysreg_save_guest_state(guest_ctxt);
@@ -293,8 +353,6 @@ again:
        return exit_code;
 }
 
-__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-
 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
 
 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)