2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
34 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
40 static void *init_bounce_page;
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
47 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
49 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
52 * This function also gets called when dealing with HYP page
53 * tables. As HYP doesn't have an associated struct kvm (and
54 * the HYP page tables are fairly static), we don't do
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
61 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
66 BUG_ON(max > KVM_NR_MEM_OBJS);
67 if (cache->nobjs >= min)
69 while (cache->nobjs < max) {
70 page = (void *)__get_free_page(PGALLOC_GFP);
73 cache->objects[cache->nobjs++] = page;
78 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
81 free_page((unsigned long)mc->objects[--mc->nobjs]);
84 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
88 BUG_ON(!mc || !mc->nobjs);
89 p = mc->objects[--mc->nobjs];
93 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
95 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
102 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
104 pmd_t *pmd_table = pmd_offset(pud, 0);
105 VM_BUG_ON(pud_huge(*pud));
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
109 put_page(virt_to_page(pud));
112 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
115 VM_BUG_ON(kvm_pmd_huge(*pmd));
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
118 pte_free_kernel(NULL, pte_table);
119 put_page(virt_to_page(pmd));
122 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
125 phys_addr_t start_addr = addr;
126 pte_t *pte, *start_pte;
128 start_pte = pte = pte_offset_kernel(pmd, addr);
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
135 } while (pte++, addr += PAGE_SIZE, addr != end);
137 if (kvm_pte_table_empty(kvm, start_pte))
138 clear_pmd_entry(kvm, pmd, start_addr);
141 static void unmap_pmds(struct kvm *kvm, pud_t *pud,
142 phys_addr_t addr, phys_addr_t end)
144 phys_addr_t next, start_addr = addr;
145 pmd_t *pmd, *start_pmd;
147 start_pmd = pmd = pmd_offset(pud, addr);
149 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) {
153 kvm_tlb_flush_vmid_ipa(kvm, addr);
154 put_page(virt_to_page(pmd));
156 unmap_ptes(kvm, pmd, addr, next);
159 } while (pmd++, addr = next, addr != end);
161 if (kvm_pmd_table_empty(kvm, start_pmd))
162 clear_pud_entry(kvm, pud, start_addr);
165 static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
166 phys_addr_t addr, phys_addr_t end)
168 phys_addr_t next, start_addr = addr;
169 pud_t *pud, *start_pud;
171 start_pud = pud = pud_offset(pgd, addr);
173 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) {
177 kvm_tlb_flush_vmid_ipa(kvm, addr);
178 put_page(virt_to_page(pud));
180 unmap_pmds(kvm, pud, addr, next);
183 } while (pud++, addr = next, addr != end);
185 if (kvm_pud_table_empty(kvm, start_pud))
186 clear_pgd_entry(kvm, pgd, start_addr);
190 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
194 phys_addr_t addr = start, end = start + size;
197 pgd = pgdp + pgd_index(addr);
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
204 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
205 phys_addr_t addr, phys_addr_t end)
209 pte = pte_offset_kernel(pmd, addr);
211 if (!pte_none(*pte)) {
212 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
213 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
215 } while (pte++, addr += PAGE_SIZE, addr != end);
218 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
219 phys_addr_t addr, phys_addr_t end)
224 pmd = pmd_offset(pud, addr);
226 next = kvm_pmd_addr_end(addr, end);
227 if (!pmd_none(*pmd)) {
228 if (kvm_pmd_huge(*pmd)) {
229 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
230 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
232 stage2_flush_ptes(kvm, pmd, addr, next);
235 } while (pmd++, addr = next, addr != end);
238 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
239 phys_addr_t addr, phys_addr_t end)
244 pud = pud_offset(pgd, addr);
246 next = kvm_pud_addr_end(addr, end);
247 if (!pud_none(*pud)) {
248 if (pud_huge(*pud)) {
249 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
250 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
252 stage2_flush_pmds(kvm, pud, addr, next);
255 } while (pud++, addr = next, addr != end);
258 static void stage2_flush_memslot(struct kvm *kvm,
259 struct kvm_memory_slot *memslot)
261 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
262 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
266 pgd = kvm->arch.pgd + pgd_index(addr);
268 next = kvm_pgd_addr_end(addr, end);
269 stage2_flush_puds(kvm, pgd, addr, next);
270 } while (pgd++, addr = next, addr != end);
274 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
275 * @kvm: The struct kvm pointer
277 * Go through the stage 2 page tables and invalidate any cache lines
278 * backing memory already mapped to the VM.
280 void stage2_flush_vm(struct kvm *kvm)
282 struct kvm_memslots *slots;
283 struct kvm_memory_slot *memslot;
286 idx = srcu_read_lock(&kvm->srcu);
287 spin_lock(&kvm->mmu_lock);
289 slots = kvm_memslots(kvm);
290 kvm_for_each_memslot(memslot, slots)
291 stage2_flush_memslot(kvm, memslot);
293 spin_unlock(&kvm->mmu_lock);
294 srcu_read_unlock(&kvm->srcu, idx);
298 * free_boot_hyp_pgd - free HYP boot page tables
300 * Free the HYP boot page tables. The bounce page is also freed.
302 void free_boot_hyp_pgd(void)
304 mutex_lock(&kvm_hyp_pgd_mutex);
307 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
308 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
309 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
314 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
316 free_page((unsigned long)init_bounce_page);
317 init_bounce_page = NULL;
319 mutex_unlock(&kvm_hyp_pgd_mutex);
323 * free_hyp_pgds - free Hyp-mode page tables
325 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
326 * therefore contains either mappings in the kernel memory area (above
327 * PAGE_OFFSET), or device mappings in the vmalloc range (from
328 * VMALLOC_START to VMALLOC_END).
330 * boot_hyp_pgd should only map two pages for the init code.
332 void free_hyp_pgds(void)
338 mutex_lock(&kvm_hyp_pgd_mutex);
341 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
342 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
343 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
344 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
346 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
350 mutex_unlock(&kvm_hyp_pgd_mutex);
353 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
354 unsigned long end, unsigned long pfn,
362 pte = pte_offset_kernel(pmd, addr);
363 kvm_set_pte(pte, pfn_pte(pfn, prot));
364 get_page(virt_to_page(pte));
365 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
367 } while (addr += PAGE_SIZE, addr != end);
370 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
371 unsigned long end, unsigned long pfn,
376 unsigned long addr, next;
380 pmd = pmd_offset(pud, addr);
382 BUG_ON(pmd_sect(*pmd));
384 if (pmd_none(*pmd)) {
385 pte = pte_alloc_one_kernel(NULL, addr);
387 kvm_err("Cannot allocate Hyp pte\n");
390 pmd_populate_kernel(NULL, pmd, pte);
391 get_page(virt_to_page(pmd));
392 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
395 next = pmd_addr_end(addr, end);
397 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
398 pfn += (next - addr) >> PAGE_SHIFT;
399 } while (addr = next, addr != end);
404 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
405 unsigned long end, unsigned long pfn,
410 unsigned long addr, next;
415 pud = pud_offset(pgd, addr);
417 if (pud_none_or_clear_bad(pud)) {
418 pmd = pmd_alloc_one(NULL, addr);
420 kvm_err("Cannot allocate Hyp pmd\n");
423 pud_populate(NULL, pud, pmd);
424 get_page(virt_to_page(pud));
425 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
428 next = pud_addr_end(addr, end);
429 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
432 pfn += (next - addr) >> PAGE_SHIFT;
433 } while (addr = next, addr != end);
438 static int __create_hyp_mappings(pgd_t *pgdp,
439 unsigned long start, unsigned long end,
440 unsigned long pfn, pgprot_t prot)
444 unsigned long addr, next;
447 mutex_lock(&kvm_hyp_pgd_mutex);
448 addr = start & PAGE_MASK;
449 end = PAGE_ALIGN(end);
451 pgd = pgdp + pgd_index(addr);
453 if (pgd_none(*pgd)) {
454 pud = pud_alloc_one(NULL, addr);
456 kvm_err("Cannot allocate Hyp pud\n");
460 pgd_populate(NULL, pgd, pud);
461 get_page(virt_to_page(pgd));
462 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
465 next = pgd_addr_end(addr, end);
466 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
469 pfn += (next - addr) >> PAGE_SHIFT;
470 } while (addr = next, addr != end);
472 mutex_unlock(&kvm_hyp_pgd_mutex);
476 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
478 if (!is_vmalloc_addr(kaddr)) {
479 BUG_ON(!virt_addr_valid(kaddr));
482 return page_to_phys(vmalloc_to_page(kaddr)) +
483 offset_in_page(kaddr);
488 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
489 * @from: The virtual kernel start address of the range
490 * @to: The virtual kernel end address of the range (exclusive)
492 * The same virtual address as the kernel virtual address is also used
493 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
496 int create_hyp_mappings(void *from, void *to)
498 phys_addr_t phys_addr;
499 unsigned long virt_addr;
500 unsigned long start = KERN_TO_HYP((unsigned long)from);
501 unsigned long end = KERN_TO_HYP((unsigned long)to);
503 start = start & PAGE_MASK;
504 end = PAGE_ALIGN(end);
506 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
509 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
510 err = __create_hyp_mappings(hyp_pgd, virt_addr,
511 virt_addr + PAGE_SIZE,
512 __phys_to_pfn(phys_addr),
522 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
523 * @from: The kernel start VA of the range
524 * @to: The kernel end VA of the range (exclusive)
525 * @phys_addr: The physical start address which gets mapped
527 * The resulting HYP VA is the same as the kernel VA, modulo
530 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
532 unsigned long start = KERN_TO_HYP((unsigned long)from);
533 unsigned long end = KERN_TO_HYP((unsigned long)to);
535 /* Check for a valid kernel IO mapping */
536 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
539 return __create_hyp_mappings(hyp_pgd, start, end,
540 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
544 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
545 * @kvm: The KVM struct pointer for the VM.
547 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
548 * support either full 40-bit input addresses or limited to 32-bit input
549 * addresses). Clears the allocated pages.
551 * Note we don't need locking here as this is only called when the VM is
552 * created, which can only be done once.
554 int kvm_alloc_stage2_pgd(struct kvm *kvm)
559 if (kvm->arch.pgd != NULL) {
560 kvm_err("kvm_arch already initialized?\n");
564 if (KVM_PREALLOC_LEVEL > 0) {
566 * Allocate fake pgd for the page table manipulation macros to
567 * work. This is not used by the hardware and we have no
568 * alignment requirement for this allocation.
570 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
571 GFP_KERNEL | __GFP_ZERO);
574 * Allocate actual first-level Stage-2 page table used by the
575 * hardware for Stage-2 page table walks.
577 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
583 ret = kvm_prealloc_hwpgd(kvm, pgd);
591 if (KVM_PREALLOC_LEVEL > 0)
594 free_pages((unsigned long)pgd, S2_PGD_ORDER);
599 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
600 * @kvm: The VM pointer
601 * @start: The intermediate physical base address of the range to unmap
602 * @size: The size of the area to unmap
604 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
605 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
606 * destroying the VM), otherwise another faulting VCPU may come in and mess
607 * with things behind our backs.
609 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
611 unmap_range(kvm, kvm->arch.pgd, start, size);
614 static void stage2_unmap_memslot(struct kvm *kvm,
615 struct kvm_memory_slot *memslot)
617 hva_t hva = memslot->userspace_addr;
618 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
619 phys_addr_t size = PAGE_SIZE * memslot->npages;
620 hva_t reg_end = hva + size;
623 * A memory region could potentially cover multiple VMAs, and any holes
624 * between them, so iterate over all of them to find out if we should
627 * +--------------------------------------------+
628 * +---------------+----------------+ +----------------+
629 * | : VMA 1 | VMA 2 | | VMA 3 : |
630 * +---------------+----------------+ +----------------+
632 * +--------------------------------------------+
635 struct vm_area_struct *vma = find_vma(current->mm, hva);
636 hva_t vm_start, vm_end;
638 if (!vma || vma->vm_start >= reg_end)
642 * Take the intersection of this VMA with the memory region
644 vm_start = max(hva, vma->vm_start);
645 vm_end = min(reg_end, vma->vm_end);
647 if (!(vma->vm_flags & VM_PFNMAP)) {
648 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
649 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
652 } while (hva < reg_end);
656 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
657 * @kvm: The struct kvm pointer
659 * Go through the memregions and unmap any reguler RAM
660 * backing memory already mapped to the VM.
662 void stage2_unmap_vm(struct kvm *kvm)
664 struct kvm_memslots *slots;
665 struct kvm_memory_slot *memslot;
668 idx = srcu_read_lock(&kvm->srcu);
669 spin_lock(&kvm->mmu_lock);
671 slots = kvm_memslots(kvm);
672 kvm_for_each_memslot(memslot, slots)
673 stage2_unmap_memslot(kvm, memslot);
675 spin_unlock(&kvm->mmu_lock);
676 srcu_read_unlock(&kvm->srcu, idx);
680 * kvm_free_stage2_pgd - free all stage-2 tables
681 * @kvm: The KVM struct pointer for the VM.
683 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
684 * underlying level-2 and level-3 tables before freeing the actual level-1 table
685 * and setting the struct pointer to NULL.
687 * Note we don't need locking here as this is only called when the VM is
688 * destroyed, which can only be done once.
690 void kvm_free_stage2_pgd(struct kvm *kvm)
692 if (kvm->arch.pgd == NULL)
695 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
697 if (KVM_PREALLOC_LEVEL > 0)
698 kfree(kvm->arch.pgd);
700 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
701 kvm->arch.pgd = NULL;
704 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
710 pgd = kvm->arch.pgd + pgd_index(addr);
711 if (WARN_ON(pgd_none(*pgd))) {
714 pud = mmu_memory_cache_alloc(cache);
715 pgd_populate(NULL, pgd, pud);
716 get_page(virt_to_page(pgd));
719 return pud_offset(pgd, addr);
722 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
728 pud = stage2_get_pud(kvm, cache, addr);
729 if (pud_none(*pud)) {
732 pmd = mmu_memory_cache_alloc(cache);
733 pud_populate(NULL, pud, pmd);
734 get_page(virt_to_page(pud));
737 return pmd_offset(pud, addr);
740 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
741 *cache, phys_addr_t addr, const pmd_t *new_pmd)
745 pmd = stage2_get_pmd(kvm, cache, addr);
749 * Mapping in huge pages should only happen through a fault. If a
750 * page is merged into a transparent huge page, the individual
751 * subpages of that huge page should be unmapped through MMU
752 * notifiers before we get here.
754 * Merging of CompoundPages is not supported; they should become
755 * splitting first, unmapped, merged, and mapped back in on-demand.
757 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
760 kvm_set_pmd(pmd, *new_pmd);
761 if (pmd_present(old_pmd))
762 kvm_tlb_flush_vmid_ipa(kvm, addr);
764 get_page(virt_to_page(pmd));
768 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
769 phys_addr_t addr, const pte_t *new_pte, bool iomap)
774 /* Create stage-2 page table mapping - Levels 0 and 1 */
775 pmd = stage2_get_pmd(kvm, cache, addr);
778 * Ignore calls from kvm_set_spte_hva for unallocated
784 /* Create stage-2 page mappings - Level 2 */
785 if (pmd_none(*pmd)) {
787 return 0; /* ignore calls from kvm_set_spte_hva */
788 pte = mmu_memory_cache_alloc(cache);
790 pmd_populate_kernel(NULL, pmd, pte);
791 get_page(virt_to_page(pmd));
794 pte = pte_offset_kernel(pmd, addr);
796 if (iomap && pte_present(*pte))
799 /* Create 2nd stage page table mapping - Level 3 */
801 kvm_set_pte(pte, *new_pte);
802 if (pte_present(old_pte))
803 kvm_tlb_flush_vmid_ipa(kvm, addr);
805 get_page(virt_to_page(pte));
811 * kvm_phys_addr_ioremap - map a device range to guest IPA
813 * @kvm: The KVM pointer
814 * @guest_ipa: The IPA at which to insert the mapping
815 * @pa: The physical address of the device
816 * @size: The size of the mapping
818 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
819 phys_addr_t pa, unsigned long size, bool writable)
821 phys_addr_t addr, end;
824 struct kvm_mmu_memory_cache cache = { 0, };
826 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
827 pfn = __phys_to_pfn(pa);
829 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
830 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
833 kvm_set_s2pte_writable(&pte);
835 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
839 spin_lock(&kvm->mmu_lock);
840 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
841 spin_unlock(&kvm->mmu_lock);
849 mmu_free_memory_cache(&cache);
853 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
856 gfn_t gfn = *ipap >> PAGE_SHIFT;
858 if (PageTransCompound(pfn_to_page(pfn))) {
861 * The address we faulted on is backed by a transparent huge
862 * page. However, because we map the compound huge page and
863 * not the individual tail page, we need to transfer the
864 * refcount to the head page. We have to be careful that the
865 * THP doesn't start to split while we are adjusting the
868 * We are sure this doesn't happen, because mmu_notifier_retry
869 * was successful and we are holding the mmu_lock, so if this
870 * THP is trying to split, it will be blocked in the mmu
871 * notifier before touching any of the pages, specifically
872 * before being able to call __split_huge_page_refcount().
874 * We can therefore safely transfer the refcount from PG_tail
875 * to PG_head and switch the pfn from a tail page to the head
878 mask = PTRS_PER_PMD - 1;
879 VM_BUG_ON((gfn & mask) != (pfn & mask));
882 kvm_release_pfn_clean(pfn);
894 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
896 if (kvm_vcpu_trap_is_iabt(vcpu))
899 return kvm_vcpu_dabt_iswrite(vcpu);
902 static bool kvm_is_device_pfn(unsigned long pfn)
904 return !pfn_valid(pfn);
907 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
908 struct kvm_memory_slot *memslot, unsigned long hva,
909 unsigned long fault_status)
912 bool write_fault, writable, hugetlb = false, force_pte = false;
913 unsigned long mmu_seq;
914 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
915 struct kvm *kvm = vcpu->kvm;
916 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
917 struct vm_area_struct *vma;
919 pgprot_t mem_type = PAGE_S2;
920 bool fault_ipa_uncached;
922 write_fault = kvm_is_write_fault(vcpu);
923 if (fault_status == FSC_PERM && !write_fault) {
924 kvm_err("Unexpected L2 read permission error\n");
928 /* Let's check if we will get back a huge page backed by hugetlbfs */
929 down_read(¤t->mm->mmap_sem);
930 vma = find_vma_intersection(current->mm, hva, hva + 1);
931 if (unlikely(!vma)) {
932 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
933 up_read(¤t->mm->mmap_sem);
937 if (is_vm_hugetlb_page(vma)) {
939 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
942 * Pages belonging to memslots that don't have the same
943 * alignment for userspace and IPA cannot be mapped using
944 * block descriptors even if the pages belong to a THP for
945 * the process, because the stage-2 block descriptor will
946 * cover more than a single THP and we loose atomicity for
947 * unmapping, updates, and splits of the THP or other pages
948 * in the stage-2 block range.
950 if ((memslot->userspace_addr & ~PMD_MASK) !=
951 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
954 up_read(¤t->mm->mmap_sem);
956 /* We need minimum second+third level pages */
957 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
962 mmu_seq = vcpu->kvm->mmu_notifier_seq;
964 * Ensure the read of mmu_notifier_seq happens before we call
965 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
966 * the page we just got a reference to gets unmapped before we have a
967 * chance to grab the mmu_lock, which ensure that if the page gets
968 * unmapped afterwards, the call to kvm_unmap_hva will take it away
969 * from us again properly. This smp_rmb() interacts with the smp_wmb()
970 * in kvm_mmu_notifier_invalidate_<page|range_end>.
974 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
975 if (is_error_pfn(pfn))
978 if (kvm_is_device_pfn(pfn))
979 mem_type = PAGE_S2_DEVICE;
981 spin_lock(&kvm->mmu_lock);
982 if (mmu_notifier_retry(kvm, mmu_seq))
984 if (!hugetlb && !force_pte)
985 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
987 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
990 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
991 new_pmd = pmd_mkhuge(new_pmd);
993 kvm_set_s2pmd_writable(&new_pmd);
994 kvm_set_pfn_dirty(pfn);
996 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
998 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1000 pte_t new_pte = pfn_pte(pfn, mem_type);
1002 kvm_set_s2pte_writable(&new_pte);
1003 kvm_set_pfn_dirty(pfn);
1005 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1006 fault_ipa_uncached);
1007 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1008 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1013 spin_unlock(&kvm->mmu_lock);
1014 kvm_release_pfn_clean(pfn);
1019 * kvm_handle_guest_abort - handles all 2nd stage aborts
1020 * @vcpu: the VCPU pointer
1021 * @run: the kvm_run structure
1023 * Any abort that gets to the host is almost guaranteed to be caused by a
1024 * missing second stage translation table entry, which can mean that either the
1025 * guest simply needs more memory and we must allocate an appropriate page or it
1026 * can mean that the guest tried to access I/O memory, which is emulated by user
1027 * space. The distinction is based on the IPA causing the fault and whether this
1028 * memory region has been registered as standard RAM by user space.
1030 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1032 unsigned long fault_status;
1033 phys_addr_t fault_ipa;
1034 struct kvm_memory_slot *memslot;
1036 bool is_iabt, write_fault, writable;
1040 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1041 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1043 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1044 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1046 /* Check the stage-2 fault is trans. fault or write fault */
1047 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1048 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
1049 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1050 kvm_vcpu_trap_get_class(vcpu),
1051 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1052 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1056 idx = srcu_read_lock(&vcpu->kvm->srcu);
1058 gfn = fault_ipa >> PAGE_SHIFT;
1059 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1060 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1061 write_fault = kvm_is_write_fault(vcpu);
1062 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1064 /* Prefetch Abort on I/O address */
1065 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1071 * The IPA is reported as [MAX:12], so we need to
1072 * complement it with the bottom 12 bits from the
1073 * faulting VA. This is always 12 bits, irrespective
1076 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1077 ret = io_mem_abort(vcpu, run, fault_ipa);
1081 /* Userspace should not be able to register out-of-bounds IPAs */
1082 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1084 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1088 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1092 static void handle_hva_to_gpa(struct kvm *kvm,
1093 unsigned long start,
1095 void (*handler)(struct kvm *kvm,
1096 gpa_t gpa, void *data),
1099 struct kvm_memslots *slots;
1100 struct kvm_memory_slot *memslot;
1102 slots = kvm_memslots(kvm);
1104 /* we only care about the pages that the guest sees */
1105 kvm_for_each_memslot(memslot, slots) {
1106 unsigned long hva_start, hva_end;
1109 hva_start = max(start, memslot->userspace_addr);
1110 hva_end = min(end, memslot->userspace_addr +
1111 (memslot->npages << PAGE_SHIFT));
1112 if (hva_start >= hva_end)
1116 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1117 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1119 gfn = hva_to_gfn_memslot(hva_start, memslot);
1120 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1122 for (; gfn < gfn_end; ++gfn) {
1123 gpa_t gpa = gfn << PAGE_SHIFT;
1124 handler(kvm, gpa, data);
1129 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1131 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1134 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1136 unsigned long end = hva + PAGE_SIZE;
1141 trace_kvm_unmap_hva(hva);
1142 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1146 int kvm_unmap_hva_range(struct kvm *kvm,
1147 unsigned long start, unsigned long end)
1152 trace_kvm_unmap_hva_range(start, end);
1153 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1157 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1159 pte_t *pte = (pte_t *)data;
1161 stage2_set_pte(kvm, NULL, gpa, pte, false);
1165 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1167 unsigned long end = hva + PAGE_SIZE;
1173 trace_kvm_set_spte_hva(hva);
1174 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1175 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1178 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1180 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1183 phys_addr_t kvm_mmu_get_httbr(void)
1185 return virt_to_phys(hyp_pgd);
1188 phys_addr_t kvm_mmu_get_boot_httbr(void)
1190 return virt_to_phys(boot_hyp_pgd);
1193 phys_addr_t kvm_get_idmap_vector(void)
1195 return hyp_idmap_vector;
1198 int kvm_mmu_init(void)
1202 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1203 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1204 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1206 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1208 * Our init code is crossing a page boundary. Allocate
1209 * a bounce page, copy the code over and use that.
1211 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1212 phys_addr_t phys_base;
1214 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1215 if (!init_bounce_page) {
1216 kvm_err("Couldn't allocate HYP init bounce page\n");
1221 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1223 * Warning: the code we just copied to the bounce page
1224 * must be flushed to the point of coherency.
1225 * Otherwise, the data may be sitting in L2, and HYP
1226 * mode won't be able to observe it as it runs with
1227 * caches off at that point.
1229 kvm_flush_dcache_to_poc(init_bounce_page, len);
1231 phys_base = kvm_virt_to_phys(init_bounce_page);
1232 hyp_idmap_vector += phys_base - hyp_idmap_start;
1233 hyp_idmap_start = phys_base;
1234 hyp_idmap_end = phys_base + len;
1236 kvm_info("Using HYP init bounce page @%lx\n",
1237 (unsigned long)phys_base);
1240 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1241 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1243 if (!hyp_pgd || !boot_hyp_pgd) {
1244 kvm_err("Hyp mode PGD not allocated\n");
1249 /* Create the idmap in the boot page tables */
1250 err = __create_hyp_mappings(boot_hyp_pgd,
1251 hyp_idmap_start, hyp_idmap_end,
1252 __phys_to_pfn(hyp_idmap_start),
1256 kvm_err("Failed to idmap %lx-%lx\n",
1257 hyp_idmap_start, hyp_idmap_end);
1261 /* Map the very same page at the trampoline VA */
1262 err = __create_hyp_mappings(boot_hyp_pgd,
1263 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1264 __phys_to_pfn(hyp_idmap_start),
1267 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1272 /* Map the same page again into the runtime page tables */
1273 err = __create_hyp_mappings(hyp_pgd,
1274 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1275 __phys_to_pfn(hyp_idmap_start),
1278 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1289 void kvm_arch_commit_memory_region(struct kvm *kvm,
1290 struct kvm_userspace_memory_region *mem,
1291 const struct kvm_memory_slot *old,
1292 enum kvm_mr_change change)
1296 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1297 struct kvm_memory_slot *memslot,
1298 struct kvm_userspace_memory_region *mem,
1299 enum kvm_mr_change change)
1301 hva_t hva = mem->userspace_addr;
1302 hva_t reg_end = hva + mem->memory_size;
1303 bool writable = !(mem->flags & KVM_MEM_READONLY);
1306 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1310 * Prevent userspace from creating a memory region outside of the IPA
1311 * space addressable by the KVM guest IPA space.
1313 if (memslot->base_gfn + memslot->npages >=
1314 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1318 * A memory region could potentially cover multiple VMAs, and any holes
1319 * between them, so iterate over all of them to find out if we can map
1320 * any of them right now.
1322 * +--------------------------------------------+
1323 * +---------------+----------------+ +----------------+
1324 * | : VMA 1 | VMA 2 | | VMA 3 : |
1325 * +---------------+----------------+ +----------------+
1327 * +--------------------------------------------+
1330 struct vm_area_struct *vma = find_vma(current->mm, hva);
1331 hva_t vm_start, vm_end;
1333 if (!vma || vma->vm_start >= reg_end)
1337 * Mapping a read-only VMA is only allowed if the
1338 * memory region is configured as read-only.
1340 if (writable && !(vma->vm_flags & VM_WRITE)) {
1346 * Take the intersection of this VMA with the memory region
1348 vm_start = max(hva, vma->vm_start);
1349 vm_end = min(reg_end, vma->vm_end);
1351 if (vma->vm_flags & VM_PFNMAP) {
1352 gpa_t gpa = mem->guest_phys_addr +
1353 (vm_start - mem->userspace_addr);
1354 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1355 vm_start - vma->vm_start;
1357 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1364 } while (hva < reg_end);
1366 spin_lock(&kvm->mmu_lock);
1368 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1370 stage2_flush_memslot(kvm, memslot);
1371 spin_unlock(&kvm->mmu_lock);
1375 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1376 struct kvm_memory_slot *dont)
1380 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1381 unsigned long npages)
1384 * Readonly memslots are not incoherent with the caches by definition,
1385 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1386 * that the guest may consider devices and hence map as uncached.
1387 * To prevent incoherency issues in these cases, tag all readonly
1388 * regions as incoherent.
1390 if (slot->flags & KVM_MEM_READONLY)
1391 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1395 void kvm_arch_memslots_updated(struct kvm *kvm)
1399 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1403 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1404 struct kvm_memory_slot *slot)
1406 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1407 phys_addr_t size = slot->npages << PAGE_SHIFT;
1409 spin_lock(&kvm->mmu_lock);
1410 unmap_stage2_range(kvm, gpa, size);
1411 spin_unlock(&kvm->mmu_lock);