2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address). We need to find out how many bits to mask.
34 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
50 * T = __virt_to_phys(__hyp_idmap_text_start)
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
71 #define HYP_PAGE_OFFSET_SHIFT VA_BITS
72 #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
73 #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
76 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
77 * shared across all the page-tables. Conveniently, we use the last
78 * possible page, where no kernel mapping will ever exist.
80 #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
84 #include <asm/alternative.h>
85 #include <asm/cpufeature.h>
88 * Convert a kernel VA into a HYP VA.
89 * reg: VA to be converted.
91 .macro kern_hyp_va reg
92 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
93 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
101 #include <asm/pgalloc.h>
102 #include <asm/cachetype.h>
103 #include <asm/cacheflush.h>
104 #include <asm/mmu_context.h>
105 #include <asm/pgtable.h>
107 #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
110 * We currently only support a 40bit IPA.
112 #define KVM_PHYS_SHIFT (40)
113 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
114 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
116 #include <asm/stage2_pgtable.h>
118 int create_hyp_mappings(void *from, void *to, pgprot_t prot);
119 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
120 void free_boot_hyp_pgd(void);
121 void free_hyp_pgds(void);
123 void stage2_unmap_vm(struct kvm *kvm);
124 int kvm_alloc_stage2_pgd(struct kvm *kvm);
125 void kvm_free_stage2_pgd(struct kvm *kvm);
126 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
127 phys_addr_t pa, unsigned long size, bool writable);
129 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
131 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
133 phys_addr_t kvm_mmu_get_httbr(void);
134 phys_addr_t kvm_mmu_get_boot_httbr(void);
135 phys_addr_t kvm_get_idmap_vector(void);
136 phys_addr_t kvm_get_idmap_start(void);
137 int kvm_mmu_init(void);
138 void kvm_clear_hyp_idmap(void);
140 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
141 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
143 static inline void kvm_clean_pgd(pgd_t *pgd) {}
144 static inline void kvm_clean_pmd(pmd_t *pmd) {}
145 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
146 static inline void kvm_clean_pte(pte_t *pte) {}
147 static inline void kvm_clean_pte_entry(pte_t *pte) {}
149 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
151 pte_val(pte) |= PTE_S2_RDWR;
155 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
157 pmd_val(pmd) |= PMD_S2_RDWR;
161 static inline void kvm_set_s2pte_readonly(pte_t *pte)
166 asm volatile("// kvm_set_s2pte_readonly\n"
167 " prfm pstl1strm, %2\n"
169 " and %0, %0, %3 // clear PTE_S2_RDWR\n"
170 " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
171 " stxr %w1, %0, %2\n"
173 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
174 : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
177 static inline bool kvm_s2pte_readonly(pte_t *pte)
179 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
182 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
184 kvm_set_s2pte_readonly((pte_t *)pmd);
187 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
189 return kvm_s2pte_readonly((pte_t *)pmd);
192 static inline bool kvm_page_empty(void *ptr)
194 struct page *ptr_page = virt_to_page(ptr);
195 return page_count(ptr_page) == 1;
198 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
200 #ifdef __PAGETABLE_PMD_FOLDED
201 #define hyp_pmd_table_empty(pmdp) (0)
203 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
206 #ifdef __PAGETABLE_PUD_FOLDED
207 #define hyp_pud_table_empty(pudp) (0)
209 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
214 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
216 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
218 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
221 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
226 void *va = page_address(pfn_to_page(pfn));
228 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
229 kvm_flush_dcache_to_poc(va, size);
231 if (!icache_is_aliasing()) { /* PIPT */
232 flush_icache_range((unsigned long)va,
233 (unsigned long)va + size);
234 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
235 /* any kind of VIPT cache */
236 __flush_icache_all();
240 static inline void __kvm_flush_dcache_pte(pte_t pte)
242 struct page *page = pte_page(pte);
243 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
246 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
248 struct page *page = pmd_page(pmd);
249 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
252 static inline void __kvm_flush_dcache_pud(pud_t pud)
254 struct page *page = pud_page(pud);
255 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
258 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
260 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
261 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
263 static inline bool __kvm_cpu_uses_extended_idmap(void)
265 return __cpu_uses_extended_idmap();
268 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
270 pgd_t *merged_hyp_pgd,
271 unsigned long hyp_idmap_start)
276 * Use the first entry to access the HYP mappings. It is
277 * guaranteed to be free, otherwise we wouldn't use an
280 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
281 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
284 * Create another extended level entry that points to the boot HYP map,
285 * which contains an ID mapping of the HYP init code. We essentially
286 * merge the boot and runtime HYP maps by doing so, but they don't
287 * overlap anyway, so this is fine.
289 idmap_idx = hyp_idmap_start >> VA_BITS;
290 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
291 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
294 static inline unsigned int kvm_get_vmid_bits(void)
296 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
298 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
301 #endif /* __ASSEMBLY__ */
302 #endif /* __ARM64_KVM_MMU_H__ */