2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/types.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_hyp.h>
22 static bool __hyp_text __fpsimd_enabled_nvhe(void)
24 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
27 static bool __hyp_text __fpsimd_enabled_vhe(void)
29 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
32 static hyp_alternate_select(__fpsimd_is_enabled,
33 __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
34 ARM64_HAS_VIRT_HOST_EXTN);
36 bool __hyp_text __fpsimd_enabled(void)
38 return __fpsimd_is_enabled()();
41 static void __hyp_text __activate_traps_vhe(void)
45 val = read_sysreg(cpacr_el1);
47 val &= ~CPACR_EL1_FPEN;
48 write_sysreg(val, cpacr_el1);
50 write_sysreg(__kvm_hyp_vector, vbar_el1);
53 static void __hyp_text __activate_traps_nvhe(void)
57 val = CPTR_EL2_DEFAULT;
58 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
59 write_sysreg(val, cptr_el2);
62 static hyp_alternate_select(__activate_traps_arch,
63 __activate_traps_nvhe, __activate_traps_vhe,
64 ARM64_HAS_VIRT_HOST_EXTN);
66 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
71 * We are about to set CPTR_EL2.TFP to trap all floating point
72 * register accesses to EL2, however, the ARM ARM clearly states that
73 * traps are only taken to EL2 if the operation would not otherwise
74 * trap to EL1. Therefore, always make sure that for 32-bit guests,
75 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
77 val = vcpu->arch.hcr_el2;
78 if (!(val & HCR_RW)) {
79 write_sysreg(1 << 30, fpexc32_el2);
82 write_sysreg(val, hcr_el2);
83 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
84 write_sysreg(1 << 15, hstr_el2);
85 /* Make sure we trap PMU access from EL0 to EL2 */
86 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
87 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
88 __activate_traps_arch()();
91 static void __hyp_text __deactivate_traps_vhe(void)
93 extern char vectors[]; /* kernel exception vectors */
95 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
96 write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
97 write_sysreg(vectors, vbar_el1);
100 static void __hyp_text __deactivate_traps_nvhe(void)
102 write_sysreg(HCR_RW, hcr_el2);
103 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
106 static hyp_alternate_select(__deactivate_traps_arch,
107 __deactivate_traps_nvhe, __deactivate_traps_vhe,
108 ARM64_HAS_VIRT_HOST_EXTN);
110 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
112 __deactivate_traps_arch()();
113 write_sysreg(0, hstr_el2);
114 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
115 write_sysreg(0, pmuserenr_el0);
118 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
120 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
121 write_sysreg(kvm->arch.vttbr, vttbr_el2);
124 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
126 write_sysreg(0, vttbr_el2);
129 static hyp_alternate_select(__vgic_call_save_state,
130 __vgic_v2_save_state, __vgic_v3_save_state,
131 ARM64_HAS_SYSREG_GIC_CPUIF);
133 static hyp_alternate_select(__vgic_call_restore_state,
134 __vgic_v2_restore_state, __vgic_v3_restore_state,
135 ARM64_HAS_SYSREG_GIC_CPUIF);
137 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
139 __vgic_call_save_state()(vcpu);
140 write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
143 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
147 val = read_sysreg(hcr_el2);
148 val |= HCR_INT_OVERRIDE;
149 val |= vcpu->arch.irq_lines;
150 write_sysreg(val, hcr_el2);
152 __vgic_call_restore_state()(vcpu);
155 static bool __hyp_text __true_value(void)
160 static bool __hyp_text __false_value(void)
165 static hyp_alternate_select(__check_arm_834220,
166 __false_value, __true_value,
167 ARM64_WORKAROUND_834220);
169 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
174 * Resolve the IPA the hard way using the guest VA.
176 * Stage-1 translation already validated the memory access
177 * rights. As such, we can use the EL1 translation regime, and
178 * don't have to distinguish between EL0 and EL1 access.
180 * We do need to save/restore PAR_EL1 though, as we haven't
181 * saved the guest context yet, and we may return early...
183 par = read_sysreg(par_el1);
184 asm volatile("at s1e1r, %0" : : "r" (far));
187 tmp = read_sysreg(par_el1);
188 write_sysreg(par, par_el1);
190 if (unlikely(tmp & 1))
191 return false; /* Translation failed, back to guest */
193 /* Convert PAR to HPFAR format */
194 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
198 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
200 u64 esr = read_sysreg_el2(esr);
201 u8 ec = ESR_ELx_EC(esr);
204 vcpu->arch.fault.esr_el2 = esr;
206 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
209 far = read_sysreg_el2(far);
212 * The HPFAR can be invalid if the stage 2 fault did not
213 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
214 * bit is clear) and one of the two following cases are true:
215 * 1. The fault was due to a permission fault
216 * 2. The processor carries errata 834220
218 * Therefore, for all non S1PTW faults where we either have a
219 * permission fault or the errata workaround is enabled, we
220 * resolve the IPA using the AT instruction.
222 if (!(esr & ESR_ELx_S1PTW) &&
223 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
224 if (!__translate_far_to_hpfar(far, &hpfar))
227 hpfar = read_sysreg(hpfar_el2);
230 vcpu->arch.fault.far_el2 = far;
231 vcpu->arch.fault.hpfar_el2 = hpfar;
235 static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
237 struct kvm_cpu_context *host_ctxt;
238 struct kvm_cpu_context *guest_ctxt;
242 vcpu = kern_hyp_va(vcpu);
243 write_sysreg(vcpu, tpidr_el2);
245 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
246 guest_ctxt = &vcpu->arch.ctxt;
248 __sysreg_save_host_state(host_ctxt);
249 __debug_cond_save_host_state(vcpu);
251 __activate_traps(vcpu);
254 __vgic_restore_state(vcpu);
255 __timer_restore_state(vcpu);
258 * We must restore the 32-bit state before the sysregs, thanks
259 * to Cortex-A57 erratum #852523.
261 __sysreg32_restore_state(vcpu);
262 __sysreg_restore_guest_state(guest_ctxt);
263 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
265 /* Jump in the fire! */
267 exit_code = __guest_enter(vcpu, host_ctxt);
268 /* And we're baaack! */
270 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
273 fp_enabled = __fpsimd_enabled();
275 __sysreg_save_guest_state(guest_ctxt);
276 __sysreg32_save_state(vcpu);
277 __timer_save_state(vcpu);
278 __vgic_save_state(vcpu);
280 __deactivate_traps(vcpu);
281 __deactivate_vm(vcpu);
283 __sysreg_restore_host_state(host_ctxt);
286 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
287 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
290 __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
291 __debug_cond_restore_host_state(vcpu);
296 __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
298 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
300 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
302 unsigned long str_va;
305 * Force the panic string to be loaded from the literal pool,
306 * making sure it is a kernel address and not a PC-relative
309 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
311 __hyp_do_panic(str_va,
313 read_sysreg(esr_el2), read_sysreg_el2(far),
314 read_sysreg(hpfar_el2), par,
315 (void *)read_sysreg(tpidr_el2));
318 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
320 panic(__hyp_panic_string,
322 read_sysreg_el2(esr), read_sysreg_el2(far),
323 read_sysreg(hpfar_el2), par,
324 (void *)read_sysreg(tpidr_el2));
327 static hyp_alternate_select(__hyp_call_panic,
328 __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
329 ARM64_HAS_VIRT_HOST_EXTN);
331 void __hyp_text __noreturn __hyp_panic(void)
333 u64 spsr = read_sysreg_el2(spsr);
334 u64 elr = read_sysreg_el2(elr);
335 u64 par = read_sysreg(par_el1);
337 if (read_sysreg(vttbr_el2)) {
338 struct kvm_vcpu *vcpu;
339 struct kvm_cpu_context *host_ctxt;
341 vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
342 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
343 __deactivate_traps(vcpu);
344 __deactivate_vm(vcpu);
345 __sysreg_restore_host_state(host_ctxt);
348 /* Call panic for real */
349 __hyp_call_panic()(spsr, elr, par);