Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[cascardo/linux.git] / arch / arm / kvm / mmu.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
21 #include <linux/io.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
31
32 #include "trace.h"
33
34 extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
40 static void *init_bounce_page;
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
44
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
46
47 #define kvm_pmd_huge(_x)        (pmd_huge(_x) || pmd_trans_huge(_x))
48
49 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
50 {
51         /*
52          * This function also gets called when dealing with HYP page
53          * tables. As HYP doesn't have an associated struct kvm (and
54          * the HYP page tables are fairly static), we don't do
55          * anything there.
56          */
57         if (kvm)
58                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59 }
60
61 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62                                   int min, int max)
63 {
64         void *page;
65
66         BUG_ON(max > KVM_NR_MEM_OBJS);
67         if (cache->nobjs >= min)
68                 return 0;
69         while (cache->nobjs < max) {
70                 page = (void *)__get_free_page(PGALLOC_GFP);
71                 if (!page)
72                         return -ENOMEM;
73                 cache->objects[cache->nobjs++] = page;
74         }
75         return 0;
76 }
77
78 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
79 {
80         while (mc->nobjs)
81                 free_page((unsigned long)mc->objects[--mc->nobjs]);
82 }
83
84 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 {
86         void *p;
87
88         BUG_ON(!mc || !mc->nobjs);
89         p = mc->objects[--mc->nobjs];
90         return p;
91 }
92
93 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
94 {
95         pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96         pgd_clear(pgd);
97         kvm_tlb_flush_vmid_ipa(kvm, addr);
98         pud_free(NULL, pud_table);
99         put_page(virt_to_page(pgd));
100 }
101
102 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
103 {
104         pmd_t *pmd_table = pmd_offset(pud, 0);
105         VM_BUG_ON(pud_huge(*pud));
106         pud_clear(pud);
107         kvm_tlb_flush_vmid_ipa(kvm, addr);
108         pmd_free(NULL, pmd_table);
109         put_page(virt_to_page(pud));
110 }
111
112 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
113 {
114         pte_t *pte_table = pte_offset_kernel(pmd, 0);
115         VM_BUG_ON(kvm_pmd_huge(*pmd));
116         pmd_clear(pmd);
117         kvm_tlb_flush_vmid_ipa(kvm, addr);
118         pte_free_kernel(NULL, pte_table);
119         put_page(virt_to_page(pmd));
120 }
121
122 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123                        phys_addr_t addr, phys_addr_t end)
124 {
125         phys_addr_t start_addr = addr;
126         pte_t *pte, *start_pte;
127
128         start_pte = pte = pte_offset_kernel(pmd, addr);
129         do {
130                 if (!pte_none(*pte)) {
131                         kvm_set_pte(pte, __pte(0));
132                         put_page(virt_to_page(pte));
133                         kvm_tlb_flush_vmid_ipa(kvm, addr);
134                 }
135         } while (pte++, addr += PAGE_SIZE, addr != end);
136
137         if (kvm_pte_table_empty(kvm, start_pte))
138                 clear_pmd_entry(kvm, pmd, start_addr);
139 }
140
141 static void unmap_pmds(struct kvm *kvm, pud_t *pud,
142                        phys_addr_t addr, phys_addr_t end)
143 {
144         phys_addr_t next, start_addr = addr;
145         pmd_t *pmd, *start_pmd;
146
147         start_pmd = pmd = pmd_offset(pud, addr);
148         do {
149                 next = kvm_pmd_addr_end(addr, end);
150                 if (!pmd_none(*pmd)) {
151                         if (kvm_pmd_huge(*pmd)) {
152                                 pmd_clear(pmd);
153                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
154                                 put_page(virt_to_page(pmd));
155                         } else {
156                                 unmap_ptes(kvm, pmd, addr, next);
157                         }
158                 }
159         } while (pmd++, addr = next, addr != end);
160
161         if (kvm_pmd_table_empty(kvm, start_pmd))
162                 clear_pud_entry(kvm, pud, start_addr);
163 }
164
165 static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
166                        phys_addr_t addr, phys_addr_t end)
167 {
168         phys_addr_t next, start_addr = addr;
169         pud_t *pud, *start_pud;
170
171         start_pud = pud = pud_offset(pgd, addr);
172         do {
173                 next = kvm_pud_addr_end(addr, end);
174                 if (!pud_none(*pud)) {
175                         if (pud_huge(*pud)) {
176                                 pud_clear(pud);
177                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
178                                 put_page(virt_to_page(pud));
179                         } else {
180                                 unmap_pmds(kvm, pud, addr, next);
181                         }
182                 }
183         } while (pud++, addr = next, addr != end);
184
185         if (kvm_pud_table_empty(kvm, start_pud))
186                 clear_pgd_entry(kvm, pgd, start_addr);
187 }
188
189
190 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191                         phys_addr_t start, u64 size)
192 {
193         pgd_t *pgd;
194         phys_addr_t addr = start, end = start + size;
195         phys_addr_t next;
196
197         pgd = pgdp + pgd_index(addr);
198         do {
199                 next = kvm_pgd_addr_end(addr, end);
200                 if (!pgd_none(*pgd))
201                         unmap_puds(kvm, pgd, addr, next);
202         } while (pgd++, addr = next, addr != end);
203 }
204
205 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
206                               phys_addr_t addr, phys_addr_t end)
207 {
208         pte_t *pte;
209
210         pte = pte_offset_kernel(pmd, addr);
211         do {
212                 if (!pte_none(*pte)) {
213                         hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
214                         kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
215                 }
216         } while (pte++, addr += PAGE_SIZE, addr != end);
217 }
218
219 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
220                               phys_addr_t addr, phys_addr_t end)
221 {
222         pmd_t *pmd;
223         phys_addr_t next;
224
225         pmd = pmd_offset(pud, addr);
226         do {
227                 next = kvm_pmd_addr_end(addr, end);
228                 if (!pmd_none(*pmd)) {
229                         if (kvm_pmd_huge(*pmd)) {
230                                 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
231                                 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
232                         } else {
233                                 stage2_flush_ptes(kvm, pmd, addr, next);
234                         }
235                 }
236         } while (pmd++, addr = next, addr != end);
237 }
238
239 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
240                               phys_addr_t addr, phys_addr_t end)
241 {
242         pud_t *pud;
243         phys_addr_t next;
244
245         pud = pud_offset(pgd, addr);
246         do {
247                 next = kvm_pud_addr_end(addr, end);
248                 if (!pud_none(*pud)) {
249                         if (pud_huge(*pud)) {
250                                 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
251                                 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
252                         } else {
253                                 stage2_flush_pmds(kvm, pud, addr, next);
254                         }
255                 }
256         } while (pud++, addr = next, addr != end);
257 }
258
259 static void stage2_flush_memslot(struct kvm *kvm,
260                                  struct kvm_memory_slot *memslot)
261 {
262         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
263         phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
264         phys_addr_t next;
265         pgd_t *pgd;
266
267         pgd = kvm->arch.pgd + pgd_index(addr);
268         do {
269                 next = kvm_pgd_addr_end(addr, end);
270                 stage2_flush_puds(kvm, pgd, addr, next);
271         } while (pgd++, addr = next, addr != end);
272 }
273
274 /**
275  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
276  * @kvm: The struct kvm pointer
277  *
278  * Go through the stage 2 page tables and invalidate any cache lines
279  * backing memory already mapped to the VM.
280  */
281 void stage2_flush_vm(struct kvm *kvm)
282 {
283         struct kvm_memslots *slots;
284         struct kvm_memory_slot *memslot;
285         int idx;
286
287         idx = srcu_read_lock(&kvm->srcu);
288         spin_lock(&kvm->mmu_lock);
289
290         slots = kvm_memslots(kvm);
291         kvm_for_each_memslot(memslot, slots)
292                 stage2_flush_memslot(kvm, memslot);
293
294         spin_unlock(&kvm->mmu_lock);
295         srcu_read_unlock(&kvm->srcu, idx);
296 }
297
298 /**
299  * free_boot_hyp_pgd - free HYP boot page tables
300  *
301  * Free the HYP boot page tables. The bounce page is also freed.
302  */
303 void free_boot_hyp_pgd(void)
304 {
305         mutex_lock(&kvm_hyp_pgd_mutex);
306
307         if (boot_hyp_pgd) {
308                 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
309                 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
310                 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
311                 boot_hyp_pgd = NULL;
312         }
313
314         if (hyp_pgd)
315                 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
316
317         free_page((unsigned long)init_bounce_page);
318         init_bounce_page = NULL;
319
320         mutex_unlock(&kvm_hyp_pgd_mutex);
321 }
322
323 /**
324  * free_hyp_pgds - free Hyp-mode page tables
325  *
326  * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
327  * therefore contains either mappings in the kernel memory area (above
328  * PAGE_OFFSET), or device mappings in the vmalloc range (from
329  * VMALLOC_START to VMALLOC_END).
330  *
331  * boot_hyp_pgd should only map two pages for the init code.
332  */
333 void free_hyp_pgds(void)
334 {
335         unsigned long addr;
336
337         free_boot_hyp_pgd();
338
339         mutex_lock(&kvm_hyp_pgd_mutex);
340
341         if (hyp_pgd) {
342                 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
343                         unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
344                 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
345                         unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
346
347                 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
348                 hyp_pgd = NULL;
349         }
350
351         mutex_unlock(&kvm_hyp_pgd_mutex);
352 }
353
354 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
355                                     unsigned long end, unsigned long pfn,
356                                     pgprot_t prot)
357 {
358         pte_t *pte;
359         unsigned long addr;
360
361         addr = start;
362         do {
363                 pte = pte_offset_kernel(pmd, addr);
364                 kvm_set_pte(pte, pfn_pte(pfn, prot));
365                 get_page(virt_to_page(pte));
366                 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
367                 pfn++;
368         } while (addr += PAGE_SIZE, addr != end);
369 }
370
371 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
372                                    unsigned long end, unsigned long pfn,
373                                    pgprot_t prot)
374 {
375         pmd_t *pmd;
376         pte_t *pte;
377         unsigned long addr, next;
378
379         addr = start;
380         do {
381                 pmd = pmd_offset(pud, addr);
382
383                 BUG_ON(pmd_sect(*pmd));
384
385                 if (pmd_none(*pmd)) {
386                         pte = pte_alloc_one_kernel(NULL, addr);
387                         if (!pte) {
388                                 kvm_err("Cannot allocate Hyp pte\n");
389                                 return -ENOMEM;
390                         }
391                         pmd_populate_kernel(NULL, pmd, pte);
392                         get_page(virt_to_page(pmd));
393                         kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
394                 }
395
396                 next = pmd_addr_end(addr, end);
397
398                 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
399                 pfn += (next - addr) >> PAGE_SHIFT;
400         } while (addr = next, addr != end);
401
402         return 0;
403 }
404
405 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
406                                    unsigned long end, unsigned long pfn,
407                                    pgprot_t prot)
408 {
409         pud_t *pud;
410         pmd_t *pmd;
411         unsigned long addr, next;
412         int ret;
413
414         addr = start;
415         do {
416                 pud = pud_offset(pgd, addr);
417
418                 if (pud_none_or_clear_bad(pud)) {
419                         pmd = pmd_alloc_one(NULL, addr);
420                         if (!pmd) {
421                                 kvm_err("Cannot allocate Hyp pmd\n");
422                                 return -ENOMEM;
423                         }
424                         pud_populate(NULL, pud, pmd);
425                         get_page(virt_to_page(pud));
426                         kvm_flush_dcache_to_poc(pud, sizeof(*pud));
427                 }
428
429                 next = pud_addr_end(addr, end);
430                 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
431                 if (ret)
432                         return ret;
433                 pfn += (next - addr) >> PAGE_SHIFT;
434         } while (addr = next, addr != end);
435
436         return 0;
437 }
438
439 static int __create_hyp_mappings(pgd_t *pgdp,
440                                  unsigned long start, unsigned long end,
441                                  unsigned long pfn, pgprot_t prot)
442 {
443         pgd_t *pgd;
444         pud_t *pud;
445         unsigned long addr, next;
446         int err = 0;
447
448         mutex_lock(&kvm_hyp_pgd_mutex);
449         addr = start & PAGE_MASK;
450         end = PAGE_ALIGN(end);
451         do {
452                 pgd = pgdp + pgd_index(addr);
453
454                 if (pgd_none(*pgd)) {
455                         pud = pud_alloc_one(NULL, addr);
456                         if (!pud) {
457                                 kvm_err("Cannot allocate Hyp pud\n");
458                                 err = -ENOMEM;
459                                 goto out;
460                         }
461                         pgd_populate(NULL, pgd, pud);
462                         get_page(virt_to_page(pgd));
463                         kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
464                 }
465
466                 next = pgd_addr_end(addr, end);
467                 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
468                 if (err)
469                         goto out;
470                 pfn += (next - addr) >> PAGE_SHIFT;
471         } while (addr = next, addr != end);
472 out:
473         mutex_unlock(&kvm_hyp_pgd_mutex);
474         return err;
475 }
476
477 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
478 {
479         if (!is_vmalloc_addr(kaddr)) {
480                 BUG_ON(!virt_addr_valid(kaddr));
481                 return __pa(kaddr);
482         } else {
483                 return page_to_phys(vmalloc_to_page(kaddr)) +
484                        offset_in_page(kaddr);
485         }
486 }
487
488 /**
489  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
490  * @from:       The virtual kernel start address of the range
491  * @to:         The virtual kernel end address of the range (exclusive)
492  *
493  * The same virtual address as the kernel virtual address is also used
494  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
495  * physical pages.
496  */
497 int create_hyp_mappings(void *from, void *to)
498 {
499         phys_addr_t phys_addr;
500         unsigned long virt_addr;
501         unsigned long start = KERN_TO_HYP((unsigned long)from);
502         unsigned long end = KERN_TO_HYP((unsigned long)to);
503
504         start = start & PAGE_MASK;
505         end = PAGE_ALIGN(end);
506
507         for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
508                 int err;
509
510                 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
511                 err = __create_hyp_mappings(hyp_pgd, virt_addr,
512                                             virt_addr + PAGE_SIZE,
513                                             __phys_to_pfn(phys_addr),
514                                             PAGE_HYP);
515                 if (err)
516                         return err;
517         }
518
519         return 0;
520 }
521
522 /**
523  * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
524  * @from:       The kernel start VA of the range
525  * @to:         The kernel end VA of the range (exclusive)
526  * @phys_addr:  The physical start address which gets mapped
527  *
528  * The resulting HYP VA is the same as the kernel VA, modulo
529  * HYP_PAGE_OFFSET.
530  */
531 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
532 {
533         unsigned long start = KERN_TO_HYP((unsigned long)from);
534         unsigned long end = KERN_TO_HYP((unsigned long)to);
535
536         /* Check for a valid kernel IO mapping */
537         if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
538                 return -EINVAL;
539
540         return __create_hyp_mappings(hyp_pgd, start, end,
541                                      __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
542 }
543
544 /**
545  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
546  * @kvm:        The KVM struct pointer for the VM.
547  *
548  * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
549  * support either full 40-bit input addresses or limited to 32-bit input
550  * addresses). Clears the allocated pages.
551  *
552  * Note we don't need locking here as this is only called when the VM is
553  * created, which can only be done once.
554  */
555 int kvm_alloc_stage2_pgd(struct kvm *kvm)
556 {
557         int ret;
558         pgd_t *pgd;
559
560         if (kvm->arch.pgd != NULL) {
561                 kvm_err("kvm_arch already initialized?\n");
562                 return -EINVAL;
563         }
564
565         if (KVM_PREALLOC_LEVEL > 0) {
566                 /*
567                  * Allocate fake pgd for the page table manipulation macros to
568                  * work.  This is not used by the hardware and we have no
569                  * alignment requirement for this allocation.
570                  */
571                 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
572                                        GFP_KERNEL | __GFP_ZERO);
573         } else {
574                 /*
575                  * Allocate actual first-level Stage-2 page table used by the
576                  * hardware for Stage-2 page table walks.
577                  */
578                 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
579         }
580
581         if (!pgd)
582                 return -ENOMEM;
583
584         ret = kvm_prealloc_hwpgd(kvm, pgd);
585         if (ret)
586                 goto out_err;
587
588         kvm_clean_pgd(pgd);
589         kvm->arch.pgd = pgd;
590         return 0;
591 out_err:
592         if (KVM_PREALLOC_LEVEL > 0)
593                 kfree(pgd);
594         else
595                 free_pages((unsigned long)pgd, S2_PGD_ORDER);
596         return ret;
597 }
598
599 /**
600  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
601  * @kvm:   The VM pointer
602  * @start: The intermediate physical base address of the range to unmap
603  * @size:  The size of the area to unmap
604  *
605  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
606  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
607  * destroying the VM), otherwise another faulting VCPU may come in and mess
608  * with things behind our backs.
609  */
610 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
611 {
612         unmap_range(kvm, kvm->arch.pgd, start, size);
613 }
614
615 static void stage2_unmap_memslot(struct kvm *kvm,
616                                  struct kvm_memory_slot *memslot)
617 {
618         hva_t hva = memslot->userspace_addr;
619         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
620         phys_addr_t size = PAGE_SIZE * memslot->npages;
621         hva_t reg_end = hva + size;
622
623         /*
624          * A memory region could potentially cover multiple VMAs, and any holes
625          * between them, so iterate over all of them to find out if we should
626          * unmap any of them.
627          *
628          *     +--------------------------------------------+
629          * +---------------+----------------+   +----------------+
630          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
631          * +---------------+----------------+   +----------------+
632          *     |               memory region                |
633          *     +--------------------------------------------+
634          */
635         do {
636                 struct vm_area_struct *vma = find_vma(current->mm, hva);
637                 hva_t vm_start, vm_end;
638
639                 if (!vma || vma->vm_start >= reg_end)
640                         break;
641
642                 /*
643                  * Take the intersection of this VMA with the memory region
644                  */
645                 vm_start = max(hva, vma->vm_start);
646                 vm_end = min(reg_end, vma->vm_end);
647
648                 if (!(vma->vm_flags & VM_PFNMAP)) {
649                         gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
650                         unmap_stage2_range(kvm, gpa, vm_end - vm_start);
651                 }
652                 hva = vm_end;
653         } while (hva < reg_end);
654 }
655
656 /**
657  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
658  * @kvm: The struct kvm pointer
659  *
660  * Go through the memregions and unmap any reguler RAM
661  * backing memory already mapped to the VM.
662  */
663 void stage2_unmap_vm(struct kvm *kvm)
664 {
665         struct kvm_memslots *slots;
666         struct kvm_memory_slot *memslot;
667         int idx;
668
669         idx = srcu_read_lock(&kvm->srcu);
670         spin_lock(&kvm->mmu_lock);
671
672         slots = kvm_memslots(kvm);
673         kvm_for_each_memslot(memslot, slots)
674                 stage2_unmap_memslot(kvm, memslot);
675
676         spin_unlock(&kvm->mmu_lock);
677         srcu_read_unlock(&kvm->srcu, idx);
678 }
679
680 /**
681  * kvm_free_stage2_pgd - free all stage-2 tables
682  * @kvm:        The KVM struct pointer for the VM.
683  *
684  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
685  * underlying level-2 and level-3 tables before freeing the actual level-1 table
686  * and setting the struct pointer to NULL.
687  *
688  * Note we don't need locking here as this is only called when the VM is
689  * destroyed, which can only be done once.
690  */
691 void kvm_free_stage2_pgd(struct kvm *kvm)
692 {
693         if (kvm->arch.pgd == NULL)
694                 return;
695
696         unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
697         kvm_free_hwpgd(kvm);
698         if (KVM_PREALLOC_LEVEL > 0)
699                 kfree(kvm->arch.pgd);
700         else
701                 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
702         kvm->arch.pgd = NULL;
703 }
704
705 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
706                              phys_addr_t addr)
707 {
708         pgd_t *pgd;
709         pud_t *pud;
710
711         pgd = kvm->arch.pgd + pgd_index(addr);
712         if (WARN_ON(pgd_none(*pgd))) {
713                 if (!cache)
714                         return NULL;
715                 pud = mmu_memory_cache_alloc(cache);
716                 pgd_populate(NULL, pgd, pud);
717                 get_page(virt_to_page(pgd));
718         }
719
720         return pud_offset(pgd, addr);
721 }
722
723 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
724                              phys_addr_t addr)
725 {
726         pud_t *pud;
727         pmd_t *pmd;
728
729         pud = stage2_get_pud(kvm, cache, addr);
730         if (pud_none(*pud)) {
731                 if (!cache)
732                         return NULL;
733                 pmd = mmu_memory_cache_alloc(cache);
734                 pud_populate(NULL, pud, pmd);
735                 get_page(virt_to_page(pud));
736         }
737
738         return pmd_offset(pud, addr);
739 }
740
741 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
742                                *cache, phys_addr_t addr, const pmd_t *new_pmd)
743 {
744         pmd_t *pmd, old_pmd;
745
746         pmd = stage2_get_pmd(kvm, cache, addr);
747         VM_BUG_ON(!pmd);
748
749         /*
750          * Mapping in huge pages should only happen through a fault.  If a
751          * page is merged into a transparent huge page, the individual
752          * subpages of that huge page should be unmapped through MMU
753          * notifiers before we get here.
754          *
755          * Merging of CompoundPages is not supported; they should become
756          * splitting first, unmapped, merged, and mapped back in on-demand.
757          */
758         VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
759
760         old_pmd = *pmd;
761         kvm_set_pmd(pmd, *new_pmd);
762         if (pmd_present(old_pmd))
763                 kvm_tlb_flush_vmid_ipa(kvm, addr);
764         else
765                 get_page(virt_to_page(pmd));
766         return 0;
767 }
768
769 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
770                           phys_addr_t addr, const pte_t *new_pte, bool iomap)
771 {
772         pmd_t *pmd;
773         pte_t *pte, old_pte;
774
775         /* Create stage-2 page table mapping - Levels 0 and 1 */
776         pmd = stage2_get_pmd(kvm, cache, addr);
777         if (!pmd) {
778                 /*
779                  * Ignore calls from kvm_set_spte_hva for unallocated
780                  * address ranges.
781                  */
782                 return 0;
783         }
784
785         /* Create stage-2 page mappings - Level 2 */
786         if (pmd_none(*pmd)) {
787                 if (!cache)
788                         return 0; /* ignore calls from kvm_set_spte_hva */
789                 pte = mmu_memory_cache_alloc(cache);
790                 kvm_clean_pte(pte);
791                 pmd_populate_kernel(NULL, pmd, pte);
792                 get_page(virt_to_page(pmd));
793         }
794
795         pte = pte_offset_kernel(pmd, addr);
796
797         if (iomap && pte_present(*pte))
798                 return -EFAULT;
799
800         /* Create 2nd stage page table mapping - Level 3 */
801         old_pte = *pte;
802         kvm_set_pte(pte, *new_pte);
803         if (pte_present(old_pte))
804                 kvm_tlb_flush_vmid_ipa(kvm, addr);
805         else
806                 get_page(virt_to_page(pte));
807
808         return 0;
809 }
810
811 /**
812  * kvm_phys_addr_ioremap - map a device range to guest IPA
813  *
814  * @kvm:        The KVM pointer
815  * @guest_ipa:  The IPA at which to insert the mapping
816  * @pa:         The physical address of the device
817  * @size:       The size of the mapping
818  */
819 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
820                           phys_addr_t pa, unsigned long size, bool writable)
821 {
822         phys_addr_t addr, end;
823         int ret = 0;
824         unsigned long pfn;
825         struct kvm_mmu_memory_cache cache = { 0, };
826
827         end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
828         pfn = __phys_to_pfn(pa);
829
830         for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
831                 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
832
833                 if (writable)
834                         kvm_set_s2pte_writable(&pte);
835
836                 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
837                                                 KVM_NR_MEM_OBJS);
838                 if (ret)
839                         goto out;
840                 spin_lock(&kvm->mmu_lock);
841                 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
842                 spin_unlock(&kvm->mmu_lock);
843                 if (ret)
844                         goto out;
845
846                 pfn++;
847         }
848
849 out:
850         mmu_free_memory_cache(&cache);
851         return ret;
852 }
853
854 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
855 {
856         pfn_t pfn = *pfnp;
857         gfn_t gfn = *ipap >> PAGE_SHIFT;
858
859         if (PageTransCompound(pfn_to_page(pfn))) {
860                 unsigned long mask;
861                 /*
862                  * The address we faulted on is backed by a transparent huge
863                  * page.  However, because we map the compound huge page and
864                  * not the individual tail page, we need to transfer the
865                  * refcount to the head page.  We have to be careful that the
866                  * THP doesn't start to split while we are adjusting the
867                  * refcounts.
868                  *
869                  * We are sure this doesn't happen, because mmu_notifier_retry
870                  * was successful and we are holding the mmu_lock, so if this
871                  * THP is trying to split, it will be blocked in the mmu
872                  * notifier before touching any of the pages, specifically
873                  * before being able to call __split_huge_page_refcount().
874                  *
875                  * We can therefore safely transfer the refcount from PG_tail
876                  * to PG_head and switch the pfn from a tail page to the head
877                  * page accordingly.
878                  */
879                 mask = PTRS_PER_PMD - 1;
880                 VM_BUG_ON((gfn & mask) != (pfn & mask));
881                 if (pfn & mask) {
882                         *ipap &= PMD_MASK;
883                         kvm_release_pfn_clean(pfn);
884                         pfn &= ~mask;
885                         kvm_get_pfn(pfn);
886                         *pfnp = pfn;
887                 }
888
889                 return true;
890         }
891
892         return false;
893 }
894
895 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
896 {
897         if (kvm_vcpu_trap_is_iabt(vcpu))
898                 return false;
899
900         return kvm_vcpu_dabt_iswrite(vcpu);
901 }
902
903 static bool kvm_is_device_pfn(unsigned long pfn)
904 {
905         return !pfn_valid(pfn);
906 }
907
908 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
909                           struct kvm_memory_slot *memslot, unsigned long hva,
910                           unsigned long fault_status)
911 {
912         int ret;
913         bool write_fault, writable, hugetlb = false, force_pte = false;
914         unsigned long mmu_seq;
915         gfn_t gfn = fault_ipa >> PAGE_SHIFT;
916         struct kvm *kvm = vcpu->kvm;
917         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
918         struct vm_area_struct *vma;
919         pfn_t pfn;
920         pgprot_t mem_type = PAGE_S2;
921         bool fault_ipa_uncached;
922
923         write_fault = kvm_is_write_fault(vcpu);
924         if (fault_status == FSC_PERM && !write_fault) {
925                 kvm_err("Unexpected L2 read permission error\n");
926                 return -EFAULT;
927         }
928
929         /* Let's check if we will get back a huge page backed by hugetlbfs */
930         down_read(&current->mm->mmap_sem);
931         vma = find_vma_intersection(current->mm, hva, hva + 1);
932         if (unlikely(!vma)) {
933                 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
934                 up_read(&current->mm->mmap_sem);
935                 return -EFAULT;
936         }
937
938         if (is_vm_hugetlb_page(vma)) {
939                 hugetlb = true;
940                 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
941         } else {
942                 /*
943                  * Pages belonging to memslots that don't have the same
944                  * alignment for userspace and IPA cannot be mapped using
945                  * block descriptors even if the pages belong to a THP for
946                  * the process, because the stage-2 block descriptor will
947                  * cover more than a single THP and we loose atomicity for
948                  * unmapping, updates, and splits of the THP or other pages
949                  * in the stage-2 block range.
950                  */
951                 if ((memslot->userspace_addr & ~PMD_MASK) !=
952                     ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
953                         force_pte = true;
954         }
955         up_read(&current->mm->mmap_sem);
956
957         /* We need minimum second+third level pages */
958         ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
959                                      KVM_NR_MEM_OBJS);
960         if (ret)
961                 return ret;
962
963         mmu_seq = vcpu->kvm->mmu_notifier_seq;
964         /*
965          * Ensure the read of mmu_notifier_seq happens before we call
966          * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
967          * the page we just got a reference to gets unmapped before we have a
968          * chance to grab the mmu_lock, which ensure that if the page gets
969          * unmapped afterwards, the call to kvm_unmap_hva will take it away
970          * from us again properly. This smp_rmb() interacts with the smp_wmb()
971          * in kvm_mmu_notifier_invalidate_<page|range_end>.
972          */
973         smp_rmb();
974
975         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
976         if (is_error_pfn(pfn))
977                 return -EFAULT;
978
979         if (kvm_is_device_pfn(pfn))
980                 mem_type = PAGE_S2_DEVICE;
981
982         spin_lock(&kvm->mmu_lock);
983         if (mmu_notifier_retry(kvm, mmu_seq))
984                 goto out_unlock;
985         if (!hugetlb && !force_pte)
986                 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
987
988         fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
989
990         if (hugetlb) {
991                 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
992                 new_pmd = pmd_mkhuge(new_pmd);
993                 if (writable) {
994                         kvm_set_s2pmd_writable(&new_pmd);
995                         kvm_set_pfn_dirty(pfn);
996                 }
997                 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
998                                           fault_ipa_uncached);
999                 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1000         } else {
1001                 pte_t new_pte = pfn_pte(pfn, mem_type);
1002                 if (writable) {
1003                         kvm_set_s2pte_writable(&new_pte);
1004                         kvm_set_pfn_dirty(pfn);
1005                 }
1006                 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1007                                           fault_ipa_uncached);
1008                 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1009                         pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1010         }
1011
1012
1013 out_unlock:
1014         spin_unlock(&kvm->mmu_lock);
1015         kvm_release_pfn_clean(pfn);
1016         return ret;
1017 }
1018
1019 /**
1020  * kvm_handle_guest_abort - handles all 2nd stage aborts
1021  * @vcpu:       the VCPU pointer
1022  * @run:        the kvm_run structure
1023  *
1024  * Any abort that gets to the host is almost guaranteed to be caused by a
1025  * missing second stage translation table entry, which can mean that either the
1026  * guest simply needs more memory and we must allocate an appropriate page or it
1027  * can mean that the guest tried to access I/O memory, which is emulated by user
1028  * space. The distinction is based on the IPA causing the fault and whether this
1029  * memory region has been registered as standard RAM by user space.
1030  */
1031 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1032 {
1033         unsigned long fault_status;
1034         phys_addr_t fault_ipa;
1035         struct kvm_memory_slot *memslot;
1036         unsigned long hva;
1037         bool is_iabt, write_fault, writable;
1038         gfn_t gfn;
1039         int ret, idx;
1040
1041         is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1042         fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1043
1044         trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1045                               kvm_vcpu_get_hfar(vcpu), fault_ipa);
1046
1047         /* Check the stage-2 fault is trans. fault or write fault */
1048         fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1049         if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
1050                 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1051                         kvm_vcpu_trap_get_class(vcpu),
1052                         (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1053                         (unsigned long)kvm_vcpu_get_hsr(vcpu));
1054                 return -EFAULT;
1055         }
1056
1057         idx = srcu_read_lock(&vcpu->kvm->srcu);
1058
1059         gfn = fault_ipa >> PAGE_SHIFT;
1060         memslot = gfn_to_memslot(vcpu->kvm, gfn);
1061         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1062         write_fault = kvm_is_write_fault(vcpu);
1063         if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1064                 if (is_iabt) {
1065                         /* Prefetch Abort on I/O address */
1066                         kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1067                         ret = 1;
1068                         goto out_unlock;
1069                 }
1070
1071                 /*
1072                  * The IPA is reported as [MAX:12], so we need to
1073                  * complement it with the bottom 12 bits from the
1074                  * faulting VA. This is always 12 bits, irrespective
1075                  * of the page size.
1076                  */
1077                 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1078                 ret = io_mem_abort(vcpu, run, fault_ipa);
1079                 goto out_unlock;
1080         }
1081
1082         /* Userspace should not be able to register out-of-bounds IPAs */
1083         VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1084
1085         ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1086         if (ret == 0)
1087                 ret = 1;
1088 out_unlock:
1089         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1090         return ret;
1091 }
1092
1093 static void handle_hva_to_gpa(struct kvm *kvm,
1094                               unsigned long start,
1095                               unsigned long end,
1096                               void (*handler)(struct kvm *kvm,
1097                                               gpa_t gpa, void *data),
1098                               void *data)
1099 {
1100         struct kvm_memslots *slots;
1101         struct kvm_memory_slot *memslot;
1102
1103         slots = kvm_memslots(kvm);
1104
1105         /* we only care about the pages that the guest sees */
1106         kvm_for_each_memslot(memslot, slots) {
1107                 unsigned long hva_start, hva_end;
1108                 gfn_t gfn, gfn_end;
1109
1110                 hva_start = max(start, memslot->userspace_addr);
1111                 hva_end = min(end, memslot->userspace_addr +
1112                                         (memslot->npages << PAGE_SHIFT));
1113                 if (hva_start >= hva_end)
1114                         continue;
1115
1116                 /*
1117                  * {gfn(page) | page intersects with [hva_start, hva_end)} =
1118                  * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1119                  */
1120                 gfn = hva_to_gfn_memslot(hva_start, memslot);
1121                 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1122
1123                 for (; gfn < gfn_end; ++gfn) {
1124                         gpa_t gpa = gfn << PAGE_SHIFT;
1125                         handler(kvm, gpa, data);
1126                 }
1127         }
1128 }
1129
1130 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1131 {
1132         unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1133 }
1134
1135 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1136 {
1137         unsigned long end = hva + PAGE_SIZE;
1138
1139         if (!kvm->arch.pgd)
1140                 return 0;
1141
1142         trace_kvm_unmap_hva(hva);
1143         handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1144         return 0;
1145 }
1146
1147 int kvm_unmap_hva_range(struct kvm *kvm,
1148                         unsigned long start, unsigned long end)
1149 {
1150         if (!kvm->arch.pgd)
1151                 return 0;
1152
1153         trace_kvm_unmap_hva_range(start, end);
1154         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1155         return 0;
1156 }
1157
1158 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1159 {
1160         pte_t *pte = (pte_t *)data;
1161
1162         stage2_set_pte(kvm, NULL, gpa, pte, false);
1163 }
1164
1165
1166 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1167 {
1168         unsigned long end = hva + PAGE_SIZE;
1169         pte_t stage2_pte;
1170
1171         if (!kvm->arch.pgd)
1172                 return;
1173
1174         trace_kvm_set_spte_hva(hva);
1175         stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1176         handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1177 }
1178
1179 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1180 {
1181         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1182 }
1183
1184 phys_addr_t kvm_mmu_get_httbr(void)
1185 {
1186         return virt_to_phys(hyp_pgd);
1187 }
1188
1189 phys_addr_t kvm_mmu_get_boot_httbr(void)
1190 {
1191         return virt_to_phys(boot_hyp_pgd);
1192 }
1193
1194 phys_addr_t kvm_get_idmap_vector(void)
1195 {
1196         return hyp_idmap_vector;
1197 }
1198
1199 int kvm_mmu_init(void)
1200 {
1201         int err;
1202
1203         hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1204         hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1205         hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1206
1207         if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1208                 /*
1209                  * Our init code is crossing a page boundary. Allocate
1210                  * a bounce page, copy the code over and use that.
1211                  */
1212                 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1213                 phys_addr_t phys_base;
1214
1215                 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1216                 if (!init_bounce_page) {
1217                         kvm_err("Couldn't allocate HYP init bounce page\n");
1218                         err = -ENOMEM;
1219                         goto out;
1220                 }
1221
1222                 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1223                 /*
1224                  * Warning: the code we just copied to the bounce page
1225                  * must be flushed to the point of coherency.
1226                  * Otherwise, the data may be sitting in L2, and HYP
1227                  * mode won't be able to observe it as it runs with
1228                  * caches off at that point.
1229                  */
1230                 kvm_flush_dcache_to_poc(init_bounce_page, len);
1231
1232                 phys_base = kvm_virt_to_phys(init_bounce_page);
1233                 hyp_idmap_vector += phys_base - hyp_idmap_start;
1234                 hyp_idmap_start = phys_base;
1235                 hyp_idmap_end = phys_base + len;
1236
1237                 kvm_info("Using HYP init bounce page @%lx\n",
1238                          (unsigned long)phys_base);
1239         }
1240
1241         hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1242         boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1243
1244         if (!hyp_pgd || !boot_hyp_pgd) {
1245                 kvm_err("Hyp mode PGD not allocated\n");
1246                 err = -ENOMEM;
1247                 goto out;
1248         }
1249
1250         /* Create the idmap in the boot page tables */
1251         err =   __create_hyp_mappings(boot_hyp_pgd,
1252                                       hyp_idmap_start, hyp_idmap_end,
1253                                       __phys_to_pfn(hyp_idmap_start),
1254                                       PAGE_HYP);
1255
1256         if (err) {
1257                 kvm_err("Failed to idmap %lx-%lx\n",
1258                         hyp_idmap_start, hyp_idmap_end);
1259                 goto out;
1260         }
1261
1262         /* Map the very same page at the trampoline VA */
1263         err =   __create_hyp_mappings(boot_hyp_pgd,
1264                                       TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1265                                       __phys_to_pfn(hyp_idmap_start),
1266                                       PAGE_HYP);
1267         if (err) {
1268                 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1269                         TRAMPOLINE_VA);
1270                 goto out;
1271         }
1272
1273         /* Map the same page again into the runtime page tables */
1274         err =   __create_hyp_mappings(hyp_pgd,
1275                                       TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1276                                       __phys_to_pfn(hyp_idmap_start),
1277                                       PAGE_HYP);
1278         if (err) {
1279                 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1280                         TRAMPOLINE_VA);
1281                 goto out;
1282         }
1283
1284         return 0;
1285 out:
1286         free_hyp_pgds();
1287         return err;
1288 }
1289
1290 void kvm_arch_commit_memory_region(struct kvm *kvm,
1291                                    struct kvm_userspace_memory_region *mem,
1292                                    const struct kvm_memory_slot *old,
1293                                    enum kvm_mr_change change)
1294 {
1295 }
1296
1297 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1298                                    struct kvm_memory_slot *memslot,
1299                                    struct kvm_userspace_memory_region *mem,
1300                                    enum kvm_mr_change change)
1301 {
1302         hva_t hva = mem->userspace_addr;
1303         hva_t reg_end = hva + mem->memory_size;
1304         bool writable = !(mem->flags & KVM_MEM_READONLY);
1305         int ret = 0;
1306
1307         if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1308                 return 0;
1309
1310         /*
1311          * Prevent userspace from creating a memory region outside of the IPA
1312          * space addressable by the KVM guest IPA space.
1313          */
1314         if (memslot->base_gfn + memslot->npages >=
1315             (KVM_PHYS_SIZE >> PAGE_SHIFT))
1316                 return -EFAULT;
1317
1318         /*
1319          * A memory region could potentially cover multiple VMAs, and any holes
1320          * between them, so iterate over all of them to find out if we can map
1321          * any of them right now.
1322          *
1323          *     +--------------------------------------------+
1324          * +---------------+----------------+   +----------------+
1325          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
1326          * +---------------+----------------+   +----------------+
1327          *     |               memory region                |
1328          *     +--------------------------------------------+
1329          */
1330         do {
1331                 struct vm_area_struct *vma = find_vma(current->mm, hva);
1332                 hva_t vm_start, vm_end;
1333
1334                 if (!vma || vma->vm_start >= reg_end)
1335                         break;
1336
1337                 /*
1338                  * Mapping a read-only VMA is only allowed if the
1339                  * memory region is configured as read-only.
1340                  */
1341                 if (writable && !(vma->vm_flags & VM_WRITE)) {
1342                         ret = -EPERM;
1343                         break;
1344                 }
1345
1346                 /*
1347                  * Take the intersection of this VMA with the memory region
1348                  */
1349                 vm_start = max(hva, vma->vm_start);
1350                 vm_end = min(reg_end, vma->vm_end);
1351
1352                 if (vma->vm_flags & VM_PFNMAP) {
1353                         gpa_t gpa = mem->guest_phys_addr +
1354                                     (vm_start - mem->userspace_addr);
1355                         phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1356                                          vm_start - vma->vm_start;
1357
1358                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1359                                                     vm_end - vm_start,
1360                                                     writable);
1361                         if (ret)
1362                                 break;
1363                 }
1364                 hva = vm_end;
1365         } while (hva < reg_end);
1366
1367         spin_lock(&kvm->mmu_lock);
1368         if (ret)
1369                 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1370         else
1371                 stage2_flush_memslot(kvm, memslot);
1372         spin_unlock(&kvm->mmu_lock);
1373         return ret;
1374 }
1375
1376 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1377                            struct kvm_memory_slot *dont)
1378 {
1379 }
1380
1381 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1382                             unsigned long npages)
1383 {
1384         /*
1385          * Readonly memslots are not incoherent with the caches by definition,
1386          * but in practice, they are used mostly to emulate ROMs or NOR flashes
1387          * that the guest may consider devices and hence map as uncached.
1388          * To prevent incoherency issues in these cases, tag all readonly
1389          * regions as incoherent.
1390          */
1391         if (slot->flags & KVM_MEM_READONLY)
1392                 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1393         return 0;
1394 }
1395
1396 void kvm_arch_memslots_updated(struct kvm *kvm)
1397 {
1398 }
1399
1400 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1401 {
1402 }
1403
1404 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1405                                    struct kvm_memory_slot *slot)
1406 {
1407         gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1408         phys_addr_t size = slot->npages << PAGE_SHIFT;
1409
1410         spin_lock(&kvm->mmu_lock);
1411         unmap_stage2_range(kvm, gpa, size);
1412         spin_unlock(&kvm->mmu_lock);
1413 }