2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/assembler.h>
21 #include <asm/memory.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/fpsimdmacros.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
29 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
30 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
31 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
32 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
35 .pushsection .hyp.text, "ax"
38 .macro save_common_regs
39 // x2: base address for cpu context
42 add x3, x2, #CPU_XREG_OFFSET(19)
44 stp x21, x22, [x3, #16]
45 stp x23, x24, [x3, #32]
46 stp x25, x26, [x3, #48]
47 stp x27, x28, [x3, #64]
48 stp x29, lr, [x3, #80]
51 mrs x20, elr_el2 // EL1 PC
52 mrs x21, spsr_el2 // EL1 pstate
54 stp x19, x20, [x3, #96]
61 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
62 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
63 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
66 .macro restore_common_regs
67 // x2: base address for cpu context
70 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
71 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
72 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
78 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 msr elr_el2, x20 // EL1 PC
84 msr spsr_el2, x21 // EL1 pstate
86 add x3, x2, #CPU_XREG_OFFSET(19)
88 ldp x21, x22, [x3, #16]
89 ldp x23, x24, [x3, #32]
90 ldp x25, x26, [x3, #48]
91 ldp x27, x28, [x3, #64]
92 ldp x29, lr, [x3, #80]
99 .macro restore_host_regs
104 // x2: cpu context address
106 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
110 .macro restore_fpsimd
111 // x2: cpu context address
113 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
117 .macro save_guest_regs
118 // x0 is the vcpu address
119 // x1 is the return code, do not corrupt!
120 // x2 is the cpu context
121 // x3 is a tmp register
122 // Guest's x0-x3 are on the stack
124 // Compute base to save registers
125 add x3, x2, #CPU_XREG_OFFSET(4)
127 stp x6, x7, [x3, #16]
128 stp x8, x9, [x3, #32]
129 stp x10, x11, [x3, #48]
130 stp x12, x13, [x3, #64]
131 stp x14, x15, [x3, #80]
132 stp x16, x17, [x3, #96]
138 add x3, x2, #CPU_XREG_OFFSET(0)
140 stp x6, x7, [x3, #16]
145 .macro restore_guest_regs
146 // x0 is the vcpu address.
147 // x2 is the cpu context
148 // x3 is a tmp register
150 // Prepare x0-x3 for later restore
151 add x3, x2, #CPU_XREG_OFFSET(0)
153 ldp x6, x7, [x3, #16]
154 push x4, x5 // Push x0-x3 on the stack
158 ldp x4, x5, [x3, #32]
159 ldp x6, x7, [x3, #48]
160 ldp x8, x9, [x3, #64]
161 ldp x10, x11, [x3, #80]
162 ldp x12, x13, [x3, #96]
163 ldp x14, x15, [x3, #112]
164 ldp x16, x17, [x3, #128]
167 // x19-x29, lr, sp*, elr*, spsr*
170 // Last bits of the 64bit state
174 // Do not touch any register after this!
178 * Macros to perform system register save/restore.
180 * Ordering here is absolutely critical, and must be kept consistent
181 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
184 * In other words, don't touch any of these unless you know what
188 // x2: base address for cpu context
191 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
207 mrs x18, contextidr_el1
216 stp x6, x7, [x3, #16]
217 stp x8, x9, [x3, #32]
218 stp x10, x11, [x3, #48]
219 stp x12, x13, [x3, #64]
220 stp x14, x15, [x3, #80]
221 stp x16, x17, [x3, #96]
222 stp x18, x19, [x3, #112]
223 stp x20, x21, [x3, #128]
224 stp x22, x23, [x3, #144]
228 .macro restore_sysregs
229 // x2: base address for cpu context
232 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
235 ldp x6, x7, [x3, #16]
236 ldp x8, x9, [x3, #32]
237 ldp x10, x11, [x3, #48]
238 ldp x12, x13, [x3, #64]
239 ldp x14, x15, [x3, #80]
240 ldp x16, x17, [x3, #96]
241 ldp x18, x19, [x3, #112]
242 ldp x20, x21, [x3, #128]
243 ldp x22, x23, [x3, #144]
260 msr contextidr_el1, x18
269 .macro skip_32bit_state tmp, target
270 // Skip 32bit state if not needed
272 tbnz \tmp, #HCR_RW_SHIFT, \target
275 .macro skip_tee_state tmp, target
276 // Skip ThumbEE state if not needed
277 mrs \tmp, id_pfr0_el1
278 tbz \tmp, #12, \target
281 .macro save_guest_32bit_state
282 skip_32bit_state x3, 1f
284 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
290 stp x6, x7, [x3, #16]
292 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
298 stp x6, x7, [x3, #16]
300 skip_tee_state x8, 1f
302 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
309 .macro restore_guest_32bit_state
310 skip_32bit_state x3, 1f
312 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
314 ldp x6, x7, [x3, #16]
320 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
322 ldp x6, x7, [x3, #16]
328 skip_tee_state x8, 1f
330 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
337 .macro activate_traps
338 ldr x2, [x0, #VCPU_HCR_EL2]
340 ldr x2, =(CPTR_EL2_TTA)
343 ldr x2, =(1 << 15) // Trap CP15 Cr=15
347 and x2, x2, #MDCR_EL2_HPMN_MASK
348 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
352 .macro deactivate_traps
359 and x2, x2, #MDCR_EL2_HPMN_MASK
364 ldr x1, [x0, #VCPU_KVM]
366 ldr x2, [x1, #KVM_VTTBR]
375 * Call into the vgic backend for state saving
377 .macro save_vgic_state
378 adr x24, __vgic_sr_vectors
379 ldr x24, [x24, VGIC_SAVE_FN]
383 mov x25, #HCR_INT_OVERRIDE
390 * Call into the vgic backend for state restoring
392 .macro restore_vgic_state
394 ldr x25, [x0, #VCPU_IRQ_LINES]
395 orr x24, x24, #HCR_INT_OVERRIDE
398 adr x24, __vgic_sr_vectors
399 ldr x24, [x24, #VGIC_RESTORE_FN]
404 .macro save_timer_state
406 ldr x2, [x0, #VCPU_KVM]
408 ldr w3, [x2, #KVM_TIMER_ENABLED]
413 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
414 bic x3, x3, #1 // Clear Enable
419 mrs x3, cntv_cval_el0
420 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
423 // Allow physical timer/counter access for the host
428 // Clear cntvoff for the host
432 .macro restore_timer_state
434 // Disallow physical timer access for the guest
435 // Physical counter access is allowed
441 ldr x2, [x0, #VCPU_KVM]
443 ldr w3, [x2, #KVM_TIMER_ENABLED]
446 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
448 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
449 msr cntv_cval_el0, x2
452 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
475 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
477 * This is the world switch. The first half of the function
478 * deals with entering the guest, and anything from __kvm_vcpu_return
479 * to the end of the function deals with reentering the host.
480 * On the enter path, only x0 (vcpu pointer) must be preserved until
481 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
482 * code) must both be preserved until the epilogue.
483 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
485 ENTRY(__kvm_vcpu_run)
487 msr tpidr_el2, x0 // Save the vcpu register
490 ldr x2, [x0, #VCPU_HOST_CONTEXT]
504 add x2, x0, #VCPU_CONTEXT
508 restore_guest_32bit_state
511 // That's it, no more messing around.
515 // Assume x0 is the vcpu pointer, x1 the return code
516 // Guest's x0-x3 are on the stack
519 add x2, x0, #VCPU_CONTEXT
524 save_guest_32bit_state
533 ldr x2, [x0, #VCPU_HOST_CONTEXT]
544 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
545 ENTRY(__kvm_tlb_flush_vmid_ipa)
549 ldr x2, [x0, #KVM_VTTBR]
554 * We could do so much better if we had the VA as well.
555 * Instead, we invalidate Stage-2 for this IPA, and the
556 * whole of Stage-1. Weep...
560 * We have to ensure completion of the invalidation at Stage-2,
561 * since a table walk on another CPU could refill a TLB with a
562 * complete (S1 + S2) walk based on the old Stage-2 mapping if
563 * the Stage-1 invalidation happened first.
572 ENDPROC(__kvm_tlb_flush_vmid_ipa)
574 ENTRY(__kvm_flush_vm_context)
580 ENDPROC(__kvm_flush_vm_context)
582 // struct vgic_sr_vectors __vgi_sr_vectors;
584 ENTRY(__vgic_sr_vectors)
585 .skip VGIC_SR_VECTOR_SZ
586 ENDPROC(__vgic_sr_vectors)
589 // Guess the context by looking at VTTBR:
590 // If zero, then we're already a host.
591 // Otherwise restore a minimal host context before panicing.
600 ldr x2, [x0, #VCPU_HOST_CONTEXT]
605 1: adr x0, __hyp_panic_str
618 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
626 2: .quad HYP_PAGE_OFFSET
628 ENDPROC(__kvm_hyp_panic)
631 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
636 * u64 kvm_call_hyp(void *hypfn, ...);
638 * This is not really a variadic function in the classic C-way and care must
639 * be taken when calling this to ensure parameters are passed in registers
640 * only, since the stack will change between the caller and the callee.
642 * Call the function with the first argument containing a pointer to the
643 * function you wish to call in Hyp mode, and subsequent arguments will be
644 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
645 * function pointer can be passed). The function being called must be mapped
646 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
647 * passed in r0 and r1.
649 * A function pointer with a value of 0 has a special meaning, and is
650 * used to implement __hyp_get_vectors in the same way as in
651 * arch/arm64/kernel/hyp_stub.S.
656 ENDPROC(kvm_call_hyp)
658 .macro invalid_vector label, target
665 /* None of these should ever happen */
666 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
667 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
668 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
669 invalid_vector el2t_error_invalid, __kvm_hyp_panic
670 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
671 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
672 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
673 invalid_vector el2h_error_invalid, __kvm_hyp_panic
674 invalid_vector el1_sync_invalid, __kvm_hyp_panic
675 invalid_vector el1_irq_invalid, __kvm_hyp_panic
676 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
677 invalid_vector el1_error_invalid, __kvm_hyp_panic
679 el1_sync: // Guest trapped into EL2
684 lsr x2, x1, #ESR_EL2_EC_SHIFT
686 cmp x2, #ESR_EL2_EC_HVC64
689 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
690 cbnz x3, el1_trap // called HVC
692 /* Here, we're pretty sure the host called HVC. */
696 /* Check for __hyp_get_vectors */
704 * Compute the function address in EL2, and shuffle the parameters.
721 cmp x2, #ESR_EL2_EC_DABT
722 mov x0, #ESR_EL2_EC_IABT
724 b.ne 1f // Not an abort we care about
726 /* This is an abort. Check for permission fault */
727 and x2, x1, #ESR_EL2_FSC_TYPE
729 b.ne 1f // Not a permission fault
732 * Check for Stage-1 page table walk, which is guaranteed
733 * to give a valid HPFAR_EL2.
735 tbnz x1, #7, 1f // S1PTW is set
737 /* Preserve PAR_EL1 */
742 * Permission fault, HPFAR_EL2 is invalid.
743 * Resolve the IPA the hard way using the guest VA.
744 * Stage-1 translation already validated the memory access rights.
745 * As such, we can use the EL1 translation regime, and don't have
746 * to distinguish between EL0 and EL1 access.
754 pop x0, xzr // Restore PAR_EL1 from the stack
756 tbnz x3, #0, 3f // Bail out if we failed the translation
757 ubfx x3, x3, #12, #36 // Extract IPA
758 lsl x3, x3, #4 // and present it like HPFAR
765 str w1, [x0, #VCPU_ESR_EL2]
766 str x2, [x0, #VCPU_FAR_EL2]
767 str x3, [x0, #VCPU_HPFAR_EL2]
769 mov x1, #ARM_EXCEPTION_TRAP
773 * Translation failed. Just return to the guest and
774 * let it fault again. Another CPU is probably playing
786 mov x1, #ARM_EXCEPTION_IRQ
793 ENTRY(__kvm_hyp_vector)
794 ventry el2t_sync_invalid // Synchronous EL2t
795 ventry el2t_irq_invalid // IRQ EL2t
796 ventry el2t_fiq_invalid // FIQ EL2t
797 ventry el2t_error_invalid // Error EL2t
799 ventry el2h_sync_invalid // Synchronous EL2h
800 ventry el2h_irq_invalid // IRQ EL2h
801 ventry el2h_fiq_invalid // FIQ EL2h
802 ventry el2h_error_invalid // Error EL2h
804 ventry el1_sync // Synchronous 64-bit EL1
805 ventry el1_irq // IRQ 64-bit EL1
806 ventry el1_fiq_invalid // FIQ 64-bit EL1
807 ventry el1_error_invalid // Error 64-bit EL1
809 ventry el1_sync // Synchronous 32-bit EL1
810 ventry el1_irq // IRQ 32-bit EL1
811 ventry el1_fiq_invalid // FIQ 32-bit EL1
812 ventry el1_error_invalid // Error 32-bit EL1
813 ENDPROC(__kvm_hyp_vector)