2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
24 * gmap_alloc - allocate and initialize a guest address space
25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum address of the gmap address space
28 * Returns a guest address space structure.
30 static struct gmap *gmap_alloc(unsigned long limit)
35 unsigned long etype, atype;
37 if (limit < (1UL << 31)) {
38 limit = (1UL << 31) - 1;
39 atype = _ASCE_TYPE_SEGMENT;
40 etype = _SEGMENT_ENTRY_EMPTY;
41 } else if (limit < (1UL << 42)) {
42 limit = (1UL << 42) - 1;
43 atype = _ASCE_TYPE_REGION3;
44 etype = _REGION3_ENTRY_EMPTY;
45 } else if (limit < (1UL << 53)) {
46 limit = (1UL << 53) - 1;
47 atype = _ASCE_TYPE_REGION2;
48 etype = _REGION2_ENTRY_EMPTY;
51 atype = _ASCE_TYPE_REGION1;
52 etype = _REGION1_ENTRY_EMPTY;
54 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
57 INIT_LIST_HEAD(&gmap->crst_list);
58 INIT_LIST_HEAD(&gmap->children);
59 INIT_LIST_HEAD(&gmap->pt_list);
60 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
61 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
62 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
63 spin_lock_init(&gmap->guest_table_lock);
64 spin_lock_init(&gmap->shadow_lock);
65 atomic_set(&gmap->ref_count, 1);
66 page = alloc_pages(GFP_KERNEL, 2);
70 list_add(&page->lru, &gmap->crst_list);
71 table = (unsigned long *) page_to_phys(page);
72 crst_table_init(table, etype);
74 gmap->asce = atype | _ASCE_TABLE_LENGTH |
75 _ASCE_USER_BITS | __pa(table);
76 gmap->asce_end = limit;
86 * gmap_create - create a guest address space
87 * @mm: pointer to the parent mm_struct
88 * @limit: maximum size of the gmap address space
90 * Returns a guest address space structure.
92 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
96 gmap = gmap_alloc(limit);
100 spin_lock(&mm->context.gmap_lock);
101 list_add_rcu(&gmap->list, &mm->context.gmap_list);
102 spin_unlock(&mm->context.gmap_lock);
105 EXPORT_SYMBOL_GPL(gmap_create);
107 static void gmap_flush_tlb(struct gmap *gmap)
109 if (MACHINE_HAS_IDTE)
110 __tlb_flush_asce(gmap->mm, gmap->asce);
112 __tlb_flush_global();
115 static void gmap_radix_tree_free(struct radix_tree_root *root)
117 struct radix_tree_iter iter;
118 unsigned long indices[16];
123 /* A radix tree is freed by deleting all of its entries */
127 radix_tree_for_each_slot(slot, root, &iter, index) {
128 indices[nr] = iter.index;
132 for (i = 0; i < nr; i++) {
134 radix_tree_delete(root, index);
139 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
141 struct gmap_rmap *rmap, *rnext, *head;
142 struct radix_tree_iter iter;
143 unsigned long indices[16];
148 /* A radix tree is freed by deleting all of its entries */
152 radix_tree_for_each_slot(slot, root, &iter, index) {
153 indices[nr] = iter.index;
157 for (i = 0; i < nr; i++) {
159 head = radix_tree_delete(root, index);
160 gmap_for_each_rmap_safe(rmap, rnext, head)
167 * gmap_free - free a guest address space
168 * @gmap: pointer to the guest address space structure
170 * No locks required. There are no references to this gmap anymore.
172 static void gmap_free(struct gmap *gmap)
174 struct page *page, *next;
176 /* Flush tlb of all gmaps (if not already done for shadows) */
177 if (!(gmap_is_shadow(gmap) && gmap->removed))
178 gmap_flush_tlb(gmap);
179 /* Free all segment & region tables. */
180 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
181 __free_pages(page, 2);
182 gmap_radix_tree_free(&gmap->guest_to_host);
183 gmap_radix_tree_free(&gmap->host_to_guest);
185 /* Free additional data for a shadow gmap */
186 if (gmap_is_shadow(gmap)) {
187 /* Free all page tables. */
188 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
189 page_table_free_pgste(page);
190 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
191 /* Release reference to the parent */
192 gmap_put(gmap->parent);
199 * gmap_get - increase reference counter for guest address space
200 * @gmap: pointer to the guest address space structure
202 * Returns the gmap pointer
204 struct gmap *gmap_get(struct gmap *gmap)
206 atomic_inc(&gmap->ref_count);
209 EXPORT_SYMBOL_GPL(gmap_get);
212 * gmap_put - decrease reference counter for guest address space
213 * @gmap: pointer to the guest address space structure
215 * If the reference counter reaches zero the guest address space is freed.
217 void gmap_put(struct gmap *gmap)
219 if (atomic_dec_return(&gmap->ref_count) == 0)
222 EXPORT_SYMBOL_GPL(gmap_put);
225 * gmap_remove - remove a guest address space but do not free it yet
226 * @gmap: pointer to the guest address space structure
228 void gmap_remove(struct gmap *gmap)
230 struct gmap *sg, *next;
232 /* Remove all shadow gmaps linked to this gmap */
233 if (!list_empty(&gmap->children)) {
234 spin_lock(&gmap->shadow_lock);
235 list_for_each_entry_safe(sg, next, &gmap->children, list) {
239 spin_unlock(&gmap->shadow_lock);
241 /* Remove gmap from the pre-mm list */
242 spin_lock(&gmap->mm->context.gmap_lock);
243 list_del_rcu(&gmap->list);
244 spin_unlock(&gmap->mm->context.gmap_lock);
249 EXPORT_SYMBOL_GPL(gmap_remove);
252 * gmap_enable - switch primary space to the guest address space
253 * @gmap: pointer to the guest address space structure
255 void gmap_enable(struct gmap *gmap)
257 S390_lowcore.gmap = (unsigned long) gmap;
259 EXPORT_SYMBOL_GPL(gmap_enable);
262 * gmap_disable - switch back to the standard primary address space
263 * @gmap: pointer to the guest address space structure
265 void gmap_disable(struct gmap *gmap)
267 S390_lowcore.gmap = 0UL;
269 EXPORT_SYMBOL_GPL(gmap_disable);
272 * gmap_alloc_table is assumed to be called with mmap_sem held
274 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
275 unsigned long init, unsigned long gaddr)
280 /* since we dont free the gmap table until gmap_free we can unlock */
281 page = alloc_pages(GFP_KERNEL, 2);
284 new = (unsigned long *) page_to_phys(page);
285 crst_table_init(new, init);
286 spin_lock(&gmap->guest_table_lock);
287 if (*table & _REGION_ENTRY_INVALID) {
288 list_add(&page->lru, &gmap->crst_list);
289 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
290 (*table & _REGION_ENTRY_TYPE_MASK);
294 spin_unlock(&gmap->guest_table_lock);
296 __free_pages(page, 2);
301 * __gmap_segment_gaddr - find virtual address from segment pointer
302 * @entry: pointer to a segment table entry in the guest address space
304 * Returns the virtual address in the guest address space for the segment
306 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
309 unsigned long offset, mask;
311 offset = (unsigned long) entry / sizeof(unsigned long);
312 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
313 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
314 page = virt_to_page((void *)((unsigned long) entry & mask));
315 return page->index + offset;
319 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
320 * @gmap: pointer to the guest address space structure
321 * @vmaddr: address in the host process address space
323 * Returns 1 if a TLB flush is required
325 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
327 unsigned long *entry;
330 BUG_ON(gmap_is_shadow(gmap));
331 spin_lock(&gmap->guest_table_lock);
332 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
334 flush = (*entry != _SEGMENT_ENTRY_INVALID);
335 *entry = _SEGMENT_ENTRY_INVALID;
337 spin_unlock(&gmap->guest_table_lock);
342 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
343 * @gmap: pointer to the guest address space structure
344 * @gaddr: address in the guest address space
346 * Returns 1 if a TLB flush is required
348 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
350 unsigned long vmaddr;
352 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
354 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
358 * gmap_unmap_segment - unmap segment from the guest address space
359 * @gmap: pointer to the guest address space structure
360 * @to: address in the guest address space
361 * @len: length of the memory area to unmap
363 * Returns 0 if the unmap succeeded, -EINVAL if not.
365 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
370 BUG_ON(gmap_is_shadow(gmap));
371 if ((to | len) & (PMD_SIZE - 1))
373 if (len == 0 || to + len < to)
377 down_write(&gmap->mm->mmap_sem);
378 for (off = 0; off < len; off += PMD_SIZE)
379 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
380 up_write(&gmap->mm->mmap_sem);
382 gmap_flush_tlb(gmap);
385 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
388 * gmap_map_segment - map a segment to the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @from: source address in the parent address space
391 * @to: target address in the guest address space
392 * @len: length of the memory area to map
394 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
396 int gmap_map_segment(struct gmap *gmap, unsigned long from,
397 unsigned long to, unsigned long len)
402 BUG_ON(gmap_is_shadow(gmap));
403 if ((from | to | len) & (PMD_SIZE - 1))
405 if (len == 0 || from + len < from || to + len < to ||
406 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
410 down_write(&gmap->mm->mmap_sem);
411 for (off = 0; off < len; off += PMD_SIZE) {
412 /* Remove old translation */
413 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
414 /* Store new translation */
415 if (radix_tree_insert(&gmap->guest_to_host,
416 (to + off) >> PMD_SHIFT,
417 (void *) from + off))
420 up_write(&gmap->mm->mmap_sem);
422 gmap_flush_tlb(gmap);
425 gmap_unmap_segment(gmap, to, len);
428 EXPORT_SYMBOL_GPL(gmap_map_segment);
431 * __gmap_translate - translate a guest address to a user space address
432 * @gmap: pointer to guest mapping meta data structure
433 * @gaddr: guest address
435 * Returns user space address which corresponds to the guest address or
436 * -EFAULT if no such mapping exists.
437 * This function does not establish potentially missing page table entries.
438 * The mmap_sem of the mm that belongs to the address space must be held
439 * when this function gets called.
441 * Note: Can also be called for shadow gmaps.
443 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
445 unsigned long vmaddr;
447 vmaddr = (unsigned long)
448 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
449 /* Note: guest_to_host is empty for a shadow gmap */
450 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
452 EXPORT_SYMBOL_GPL(__gmap_translate);
455 * gmap_translate - translate a guest address to a user space address
456 * @gmap: pointer to guest mapping meta data structure
457 * @gaddr: guest address
459 * Returns user space address which corresponds to the guest address or
460 * -EFAULT if no such mapping exists.
461 * This function does not establish potentially missing page table entries.
463 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
467 down_read(&gmap->mm->mmap_sem);
468 rc = __gmap_translate(gmap, gaddr);
469 up_read(&gmap->mm->mmap_sem);
472 EXPORT_SYMBOL_GPL(gmap_translate);
475 * gmap_unlink - disconnect a page table from the gmap shadow tables
476 * @gmap: pointer to guest mapping meta data structure
477 * @table: pointer to the host page table
478 * @vmaddr: vm address associated with the host page table
480 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
481 unsigned long vmaddr)
487 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
488 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
490 gmap_flush_tlb(gmap);
496 * gmap_link - set up shadow page tables to connect a host to a guest address
497 * @gmap: pointer to guest mapping meta data structure
498 * @gaddr: guest address
499 * @vmaddr: vm address
501 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
502 * if the vm address is already mapped to a different guest segment.
503 * The mmap_sem of the mm that belongs to the address space must be held
504 * when this function gets called.
506 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
508 struct mm_struct *mm;
509 unsigned long *table;
516 BUG_ON(gmap_is_shadow(gmap));
517 /* Create higher level tables in the gmap page table */
519 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
520 table += (gaddr >> 53) & 0x7ff;
521 if ((*table & _REGION_ENTRY_INVALID) &&
522 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
523 gaddr & 0xffe0000000000000UL))
525 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
527 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
528 table += (gaddr >> 42) & 0x7ff;
529 if ((*table & _REGION_ENTRY_INVALID) &&
530 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
531 gaddr & 0xfffffc0000000000UL))
533 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
535 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
536 table += (gaddr >> 31) & 0x7ff;
537 if ((*table & _REGION_ENTRY_INVALID) &&
538 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
539 gaddr & 0xffffffff80000000UL))
541 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
543 table += (gaddr >> 20) & 0x7ff;
544 /* Walk the parent mm page table */
546 pgd = pgd_offset(mm, vmaddr);
547 VM_BUG_ON(pgd_none(*pgd));
548 pud = pud_offset(pgd, vmaddr);
549 VM_BUG_ON(pud_none(*pud));
550 pmd = pmd_offset(pud, vmaddr);
551 VM_BUG_ON(pmd_none(*pmd));
552 /* large pmds cannot yet be handled */
555 /* Link gmap segment table entry location to page table. */
556 rc = radix_tree_preload(GFP_KERNEL);
559 ptl = pmd_lock(mm, pmd);
560 spin_lock(&gmap->guest_table_lock);
561 if (*table == _SEGMENT_ENTRY_INVALID) {
562 rc = radix_tree_insert(&gmap->host_to_guest,
563 vmaddr >> PMD_SHIFT, table);
565 *table = pmd_val(*pmd);
568 spin_unlock(&gmap->guest_table_lock);
570 radix_tree_preload_end();
575 * gmap_fault - resolve a fault on a guest address
576 * @gmap: pointer to guest mapping meta data structure
577 * @gaddr: guest address
578 * @fault_flags: flags to pass down to handle_mm_fault()
580 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
581 * if the vm address is already mapped to a different guest segment.
583 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
584 unsigned int fault_flags)
586 unsigned long vmaddr;
590 down_read(&gmap->mm->mmap_sem);
594 vmaddr = __gmap_translate(gmap, gaddr);
595 if (IS_ERR_VALUE(vmaddr)) {
599 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
605 * In the case that fixup_user_fault unlocked the mmap_sem during
606 * faultin redo __gmap_translate to not race with a map/unmap_segment.
611 rc = __gmap_link(gmap, gaddr, vmaddr);
613 up_read(&gmap->mm->mmap_sem);
616 EXPORT_SYMBOL_GPL(gmap_fault);
619 * this function is assumed to be called with mmap_sem held
621 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
623 unsigned long vmaddr;
627 /* Find the vm address for the guest address */
628 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
631 vmaddr |= gaddr & ~PMD_MASK;
632 /* Get pointer to the page table entry */
633 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
635 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
636 pte_unmap_unlock(ptep, ptl);
639 EXPORT_SYMBOL_GPL(__gmap_zap);
641 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
643 unsigned long gaddr, vmaddr, size;
644 struct vm_area_struct *vma;
646 down_read(&gmap->mm->mmap_sem);
647 for (gaddr = from; gaddr < to;
648 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
649 /* Find the vm address for the guest address */
650 vmaddr = (unsigned long)
651 radix_tree_lookup(&gmap->guest_to_host,
655 vmaddr |= gaddr & ~PMD_MASK;
656 /* Find vma in the parent mm */
657 vma = find_vma(gmap->mm, vmaddr);
658 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
659 zap_page_range(vma, vmaddr, size, NULL);
661 up_read(&gmap->mm->mmap_sem);
663 EXPORT_SYMBOL_GPL(gmap_discard);
665 static LIST_HEAD(gmap_notifier_list);
666 static DEFINE_SPINLOCK(gmap_notifier_lock);
669 * gmap_register_pte_notifier - register a pte invalidation callback
670 * @nb: pointer to the gmap notifier block
672 void gmap_register_pte_notifier(struct gmap_notifier *nb)
674 spin_lock(&gmap_notifier_lock);
675 list_add_rcu(&nb->list, &gmap_notifier_list);
676 spin_unlock(&gmap_notifier_lock);
678 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
681 * gmap_unregister_pte_notifier - remove a pte invalidation callback
682 * @nb: pointer to the gmap notifier block
684 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
686 spin_lock(&gmap_notifier_lock);
687 list_del_rcu(&nb->list);
688 spin_unlock(&gmap_notifier_lock);
691 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
694 * gmap_call_notifier - call all registered invalidation callbacks
695 * @gmap: pointer to guest mapping meta data structure
696 * @start: start virtual address in the guest address space
697 * @end: end virtual address in the guest address space
699 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
702 struct gmap_notifier *nb;
704 list_for_each_entry(nb, &gmap_notifier_list, list)
705 nb->notifier_call(gmap, start, end);
709 * gmap_table_walk - walk the gmap page tables
710 * @gmap: pointer to guest mapping meta data structure
711 * @gaddr: virtual address in the guest address space
712 * @level: page table level to stop at
714 * Returns a table entry pointer for the given guest address and @level
715 * @level=0 : returns a pointer to a page table table entry (or NULL)
716 * @level=1 : returns a pointer to a segment table entry (or NULL)
717 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
718 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
719 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
721 * Returns NULL if the gmap page tables could not be walked to the
724 * Note: Can also be called for shadow gmaps.
726 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
727 unsigned long gaddr, int level)
729 unsigned long *table;
731 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
733 if (gmap_is_shadow(gmap) && gmap->removed)
735 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
738 switch (gmap->asce & _ASCE_TYPE_MASK) {
739 case _ASCE_TYPE_REGION1:
740 table += (gaddr >> 53) & 0x7ff;
743 if (*table & _REGION_ENTRY_INVALID)
745 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
747 case _ASCE_TYPE_REGION2:
748 table += (gaddr >> 42) & 0x7ff;
751 if (*table & _REGION_ENTRY_INVALID)
753 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
755 case _ASCE_TYPE_REGION3:
756 table += (gaddr >> 31) & 0x7ff;
759 if (*table & _REGION_ENTRY_INVALID)
761 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
763 case _ASCE_TYPE_SEGMENT:
764 table += (gaddr >> 20) & 0x7ff;
767 if (*table & _REGION_ENTRY_INVALID)
769 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
770 table += (gaddr >> 12) & 0xff;
776 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
777 * and return the pte pointer
778 * @gmap: pointer to guest mapping meta data structure
779 * @gaddr: virtual address in the guest address space
780 * @ptl: pointer to the spinlock pointer
782 * Returns a pointer to the locked pte for a guest address, or NULL
784 * Note: Can also be called for shadow gmaps.
786 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
789 unsigned long *table;
791 if (gmap_is_shadow(gmap))
792 spin_lock(&gmap->guest_table_lock);
793 /* Walk the gmap page table, lock and get pte pointer */
794 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
795 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
796 if (gmap_is_shadow(gmap))
797 spin_unlock(&gmap->guest_table_lock);
800 if (gmap_is_shadow(gmap)) {
801 *ptl = &gmap->guest_table_lock;
802 return pte_offset_map((pmd_t *) table, gaddr);
804 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
808 * gmap_pte_op_fixup - force a page in and connect the gmap page table
809 * @gmap: pointer to guest mapping meta data structure
810 * @gaddr: virtual address in the guest address space
811 * @vmaddr: address in the host process address space
813 * Returns 0 if the caller can retry __gmap_translate (might fail again),
814 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
815 * up or connecting the gmap page table.
817 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
818 unsigned long vmaddr)
820 struct mm_struct *mm = gmap->mm;
821 bool unlocked = false;
823 BUG_ON(gmap_is_shadow(gmap));
824 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
827 /* lost mmap_sem, caller has to retry __gmap_translate */
829 /* Connect the page tables */
830 return __gmap_link(gmap, gaddr, vmaddr);
834 * gmap_pte_op_end - release the page table lock
835 * @ptl: pointer to the spinlock pointer
837 static void gmap_pte_op_end(spinlock_t *ptl)
843 * gmap_protect_range - remove access rights to memory and set pgste bits
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
847 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
848 * @bits: pgste notification bits to set
850 * Returns 0 if successfully protected, -ENOMEM if out of memory and
851 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
853 * Called with sg->mm->mmap_sem in read.
855 * Note: Can also be called for shadow gmaps.
857 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
858 unsigned long len, int prot, unsigned long bits)
860 unsigned long vmaddr;
867 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
869 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
870 gmap_pte_op_end(ptl);
873 vmaddr = __gmap_translate(gmap, gaddr);
874 if (IS_ERR_VALUE(vmaddr))
876 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
888 * gmap_mprotect_notify - change access rights for a range of ptes and
889 * call the notifier if any pte changes again
890 * @gmap: pointer to guest mapping meta data structure
891 * @gaddr: virtual address in the guest address space
893 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
895 * Returns 0 if for each page in the given range a gmap mapping exists,
896 * the new access rights could be set and the notifier could be armed.
897 * If the gmap mapping is missing for one or more pages -EFAULT is
898 * returned. If no memory could be allocated -ENOMEM is returned.
899 * This function establishes missing page table entries.
901 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
902 unsigned long len, int prot)
906 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
908 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
910 down_read(&gmap->mm->mmap_sem);
911 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
912 up_read(&gmap->mm->mmap_sem);
915 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
918 * gmap_read_table - get an unsigned long value from a guest page table using
919 * absolute addressing, without marking the page referenced.
920 * @gmap: pointer to guest mapping meta data structure
921 * @gaddr: virtual address in the guest address space
922 * @val: pointer to the unsigned long value to return
924 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
925 * if reading using the virtual address failed.
927 * Called with gmap->mm->mmap_sem in read.
929 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
931 unsigned long address, vmaddr;
938 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
941 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
942 address = pte_val(pte) & PAGE_MASK;
943 address += gaddr & ~PAGE_MASK;
944 *val = *(unsigned long *) address;
945 pte_val(*ptep) |= _PAGE_YOUNG;
946 /* Do *NOT* clear the _PAGE_INVALID bit! */
949 gmap_pte_op_end(ptl);
953 vmaddr = __gmap_translate(gmap, gaddr);
954 if (IS_ERR_VALUE(vmaddr)) {
958 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
964 EXPORT_SYMBOL_GPL(gmap_read_table);
967 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
968 * @sg: pointer to the shadow guest address space structure
969 * @vmaddr: vm address associated with the rmap
970 * @rmap: pointer to the rmap structure
972 * Called with the sg->guest_table_lock
974 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
975 struct gmap_rmap *rmap)
979 BUG_ON(!gmap_is_shadow(sg));
980 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
982 rmap->next = radix_tree_deref_slot_protected(slot,
983 &sg->guest_table_lock);
984 radix_tree_replace_slot(slot, rmap);
987 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
993 * gmap_protect_rmap - modify access rights to memory and create an rmap
994 * @sg: pointer to the shadow guest address space structure
995 * @raddr: rmap address in the shadow gmap
996 * @paddr: address in the parent guest address space
997 * @len: length of the memory area to protect
998 * @prot: indicates access rights: none, read-only or read-write
1000 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1001 * if out of memory and -EFAULT if paddr is invalid.
1003 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1004 unsigned long paddr, unsigned long len, int prot)
1006 struct gmap *parent;
1007 struct gmap_rmap *rmap;
1008 unsigned long vmaddr;
1013 BUG_ON(!gmap_is_shadow(sg));
1014 parent = sg->parent;
1016 vmaddr = __gmap_translate(parent, paddr);
1017 if (IS_ERR_VALUE(vmaddr))
1019 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1022 rmap->raddr = raddr;
1023 rc = radix_tree_preload(GFP_KERNEL);
1029 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1031 spin_lock(&sg->guest_table_lock);
1032 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1035 gmap_insert_rmap(sg, vmaddr, rmap);
1036 spin_unlock(&sg->guest_table_lock);
1037 gmap_pte_op_end(ptl);
1039 radix_tree_preload_end();
1042 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1053 #define _SHADOW_RMAP_MASK 0x7
1054 #define _SHADOW_RMAP_REGION1 0x5
1055 #define _SHADOW_RMAP_REGION2 0x4
1056 #define _SHADOW_RMAP_REGION3 0x3
1057 #define _SHADOW_RMAP_SEGMENT 0x2
1058 #define _SHADOW_RMAP_PGTABLE 0x1
1061 * gmap_idte_one - invalidate a single region or segment table entry
1062 * @asce: region or segment table *origin* + table-type bits
1063 * @vaddr: virtual address to identify the table entry to flush
1065 * The invalid bit of a single region or segment table entry is set
1066 * and the associated TLB entries depending on the entry are flushed.
1067 * The table-type of the @asce identifies the portion of the @vaddr
1068 * that is used as the invalidation index.
1070 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1073 " .insn rrf,0xb98e0000,%0,%1,0,0"
1074 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1078 * gmap_unshadow_page - remove a page from a shadow page table
1079 * @sg: pointer to the shadow guest address space structure
1080 * @raddr: rmap address in the shadow guest address space
1082 * Called with the sg->guest_table_lock
1084 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1086 unsigned long *table;
1088 BUG_ON(!gmap_is_shadow(sg));
1089 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1090 if (!table || *table & _PAGE_INVALID)
1092 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1093 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1097 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1098 * @sg: pointer to the shadow guest address space structure
1099 * @raddr: rmap address in the shadow guest address space
1100 * @pgt: pointer to the start of a shadow page table
1102 * Called with the sg->guest_table_lock
1104 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1109 BUG_ON(!gmap_is_shadow(sg));
1110 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1111 pgt[i] = _PAGE_INVALID;
1115 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: address in the shadow guest address space
1119 * Called with the sg->guest_table_lock
1121 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1123 unsigned long sto, *ste, *pgt;
1126 BUG_ON(!gmap_is_shadow(sg));
1127 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1128 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1130 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1131 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1132 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1133 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1134 *ste = _SEGMENT_ENTRY_EMPTY;
1135 __gmap_unshadow_pgt(sg, raddr, pgt);
1136 /* Free page table */
1137 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1138 list_del(&page->lru);
1139 page_table_free_pgste(page);
1143 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1144 * @sg: pointer to the shadow guest address space structure
1145 * @raddr: rmap address in the shadow guest address space
1146 * @sgt: pointer to the start of a shadow segment table
1148 * Called with the sg->guest_table_lock
1150 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1153 unsigned long asce, *pgt;
1157 BUG_ON(!gmap_is_shadow(sg));
1158 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1159 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1160 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1162 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1163 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1164 __gmap_unshadow_pgt(sg, raddr, pgt);
1165 /* Free page table */
1166 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1167 list_del(&page->lru);
1168 page_table_free_pgste(page);
1173 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1174 * @sg: pointer to the shadow guest address space structure
1175 * @raddr: rmap address in the shadow guest address space
1177 * Called with the shadow->guest_table_lock
1179 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1181 unsigned long r3o, *r3e, *sgt;
1184 BUG_ON(!gmap_is_shadow(sg));
1185 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1186 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1188 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1189 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1190 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1191 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1192 *r3e = _REGION3_ENTRY_EMPTY;
1193 __gmap_unshadow_sgt(sg, raddr, sgt);
1194 /* Free segment table */
1195 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1196 list_del(&page->lru);
1197 __free_pages(page, 2);
1201 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1202 * @sg: pointer to the shadow guest address space structure
1203 * @raddr: address in the shadow guest address space
1204 * @r3t: pointer to the start of a shadow region-3 table
1206 * Called with the sg->guest_table_lock
1208 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1211 unsigned long asce, *sgt;
1215 BUG_ON(!gmap_is_shadow(sg));
1216 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1217 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1218 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1220 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1221 r3t[i] = _REGION3_ENTRY_EMPTY;
1222 __gmap_unshadow_sgt(sg, raddr, sgt);
1223 /* Free segment table */
1224 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1225 list_del(&page->lru);
1226 __free_pages(page, 2);
1231 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1232 * @sg: pointer to the shadow guest address space structure
1233 * @raddr: rmap address in the shadow guest address space
1235 * Called with the sg->guest_table_lock
1237 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1239 unsigned long r2o, *r2e, *r3t;
1242 BUG_ON(!gmap_is_shadow(sg));
1243 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1244 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1246 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1247 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1248 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1249 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1250 *r2e = _REGION2_ENTRY_EMPTY;
1251 __gmap_unshadow_r3t(sg, raddr, r3t);
1252 /* Free region 3 table */
1253 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1254 list_del(&page->lru);
1255 __free_pages(page, 2);
1259 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1260 * @sg: pointer to the shadow guest address space structure
1261 * @raddr: rmap address in the shadow guest address space
1262 * @r2t: pointer to the start of a shadow region-2 table
1264 * Called with the sg->guest_table_lock
1266 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1269 unsigned long asce, *r3t;
1273 BUG_ON(!gmap_is_shadow(sg));
1274 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1275 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1276 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1278 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1279 r2t[i] = _REGION2_ENTRY_EMPTY;
1280 __gmap_unshadow_r3t(sg, raddr, r3t);
1281 /* Free region 3 table */
1282 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1283 list_del(&page->lru);
1284 __free_pages(page, 2);
1289 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1290 * @sg: pointer to the shadow guest address space structure
1291 * @raddr: rmap address in the shadow guest address space
1293 * Called with the sg->guest_table_lock
1295 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1297 unsigned long r1o, *r1e, *r2t;
1300 BUG_ON(!gmap_is_shadow(sg));
1301 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1302 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1304 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1305 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1306 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1307 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1308 *r1e = _REGION1_ENTRY_EMPTY;
1309 __gmap_unshadow_r2t(sg, raddr, r2t);
1310 /* Free region 2 table */
1311 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1312 list_del(&page->lru);
1313 __free_pages(page, 2);
1317 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1318 * @sg: pointer to the shadow guest address space structure
1319 * @raddr: rmap address in the shadow guest address space
1320 * @r1t: pointer to the start of a shadow region-1 table
1322 * Called with the shadow->guest_table_lock
1324 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1327 unsigned long asce, *r2t;
1331 BUG_ON(!gmap_is_shadow(sg));
1332 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1333 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1334 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1336 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1337 __gmap_unshadow_r2t(sg, raddr, r2t);
1338 /* Clear entry and flush translation r1t -> r2t */
1339 gmap_idte_one(asce, raddr);
1340 r1t[i] = _REGION1_ENTRY_EMPTY;
1341 /* Free region 2 table */
1342 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1343 list_del(&page->lru);
1344 __free_pages(page, 2);
1349 * gmap_unshadow - remove a shadow page table completely
1350 * @sg: pointer to the shadow guest address space structure
1352 * Called with sg->guest_table_lock
1354 static void gmap_unshadow(struct gmap *sg)
1356 unsigned long *table;
1358 BUG_ON(!gmap_is_shadow(sg));
1362 gmap_call_notifier(sg, 0, -1UL);
1364 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1365 switch (sg->asce & _ASCE_TYPE_MASK) {
1366 case _ASCE_TYPE_REGION1:
1367 __gmap_unshadow_r1t(sg, 0, table);
1369 case _ASCE_TYPE_REGION2:
1370 __gmap_unshadow_r2t(sg, 0, table);
1372 case _ASCE_TYPE_REGION3:
1373 __gmap_unshadow_r3t(sg, 0, table);
1375 case _ASCE_TYPE_SEGMENT:
1376 __gmap_unshadow_sgt(sg, 0, table);
1382 * gmap_find_shadow - find a specific asce in the list of shadow tables
1383 * @parent: pointer to the parent gmap
1384 * @asce: ASCE for which the shadow table is created
1385 * @edat_level: edat level to be used for the shadow translation
1387 * Returns the pointer to a gmap if a shadow table with the given asce is
1388 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1391 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1396 list_for_each_entry(sg, &parent->children, list) {
1397 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1400 if (!sg->initialized)
1401 return ERR_PTR(-EAGAIN);
1402 atomic_inc(&sg->ref_count);
1409 * gmap_shadow - create/find a shadow guest address space
1410 * @parent: pointer to the parent gmap
1411 * @asce: ASCE for which the shadow table is created
1412 * @edat_level: edat level to be used for the shadow translation
1414 * The pages of the top level page table referred by the asce parameter
1415 * will be set to read-only and marked in the PGSTEs of the kvm process.
1416 * The shadow table will be removed automatically on any change to the
1417 * PTE mapping for the source table.
1419 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1420 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1421 * parent gmap table could not be protected.
1423 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1426 struct gmap *sg, *new;
1427 unsigned long limit;
1430 BUG_ON(gmap_is_shadow(parent));
1431 spin_lock(&parent->shadow_lock);
1432 sg = gmap_find_shadow(parent, asce, edat_level);
1433 spin_unlock(&parent->shadow_lock);
1436 /* Create a new shadow gmap */
1437 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1438 new = gmap_alloc(limit);
1440 return ERR_PTR(-ENOMEM);
1441 new->mm = parent->mm;
1442 new->parent = gmap_get(parent);
1443 new->orig_asce = asce;
1444 new->edat_level = edat_level;
1445 new->initialized = false;
1446 spin_lock(&parent->shadow_lock);
1447 /* Recheck if another CPU created the same shadow */
1448 sg = gmap_find_shadow(parent, asce, edat_level);
1450 spin_unlock(&parent->shadow_lock);
1454 atomic_set(&new->ref_count, 2);
1455 list_add(&new->list, &parent->children);
1456 spin_unlock(&parent->shadow_lock);
1457 /* protect after insertion, so it will get properly invalidated */
1458 down_read(&parent->mm->mmap_sem);
1459 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1460 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1461 PROT_READ, PGSTE_VSIE_BIT);
1462 up_read(&parent->mm->mmap_sem);
1463 spin_lock(&parent->shadow_lock);
1464 new->initialized = true;
1466 list_del(&new->list);
1470 spin_unlock(&parent->shadow_lock);
1473 EXPORT_SYMBOL_GPL(gmap_shadow);
1476 * gmap_shadow_r2t - create an empty shadow region 2 table
1477 * @sg: pointer to the shadow guest address space structure
1478 * @saddr: faulting address in the shadow gmap
1479 * @r2t: parent gmap address of the region 2 table to get shadowed
1481 * The r2t parameter specifies the address of the source table. The
1482 * four pages of the source table are made read-only in the parent gmap
1483 * address space. A write to the source table area @r2t will automatically
1484 * remove the shadow r2 table and all of its decendents.
1486 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1487 * shadow table structure is incomplete, -ENOMEM if out of memory and
1488 * -EFAULT if an address in the parent gmap could not be resolved.
1490 * Called with sg->mm->mmap_sem in read.
1492 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t)
1494 unsigned long raddr, origin, offset, len;
1495 unsigned long *s_r2t, *table;
1499 BUG_ON(!gmap_is_shadow(sg));
1500 /* Allocate a shadow region second table */
1501 page = alloc_pages(GFP_KERNEL, 2);
1504 page->index = r2t & _REGION_ENTRY_ORIGIN;
1505 s_r2t = (unsigned long *) page_to_phys(page);
1506 /* Install shadow region second table */
1507 spin_lock(&sg->guest_table_lock);
1508 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1510 rc = -EAGAIN; /* Race with unshadow */
1513 if (!(*table & _REGION_ENTRY_INVALID)) {
1514 rc = 0; /* Already established */
1516 } else if (*table & _REGION_ENTRY_ORIGIN) {
1517 rc = -EAGAIN; /* Race with shadow */
1520 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1521 /* mark as invalid as long as the parent table is not protected */
1522 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1523 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1524 list_add(&page->lru, &sg->crst_list);
1525 spin_unlock(&sg->guest_table_lock);
1526 /* Make r2t read-only in parent gmap page table */
1527 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1528 origin = r2t & _REGION_ENTRY_ORIGIN;
1529 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1530 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1531 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1532 spin_lock(&sg->guest_table_lock);
1534 table = gmap_table_walk(sg, saddr, 4);
1535 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1536 (unsigned long) s_r2t)
1537 rc = -EAGAIN; /* Race with unshadow */
1539 *table &= ~_REGION_ENTRY_INVALID;
1541 gmap_unshadow_r2t(sg, raddr);
1543 spin_unlock(&sg->guest_table_lock);
1546 spin_unlock(&sg->guest_table_lock);
1547 __free_pages(page, 2);
1550 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1553 * gmap_shadow_r3t - create a shadow region 3 table
1554 * @sg: pointer to the shadow guest address space structure
1555 * @saddr: faulting address in the shadow gmap
1556 * @r3t: parent gmap address of the region 3 table to get shadowed
1558 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1559 * shadow table structure is incomplete, -ENOMEM if out of memory and
1560 * -EFAULT if an address in the parent gmap could not be resolved.
1562 * Called with sg->mm->mmap_sem in read.
1564 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t)
1566 unsigned long raddr, origin, offset, len;
1567 unsigned long *s_r3t, *table;
1571 BUG_ON(!gmap_is_shadow(sg));
1572 /* Allocate a shadow region second table */
1573 page = alloc_pages(GFP_KERNEL, 2);
1576 page->index = r3t & _REGION_ENTRY_ORIGIN;
1577 s_r3t = (unsigned long *) page_to_phys(page);
1578 /* Install shadow region second table */
1579 spin_lock(&sg->guest_table_lock);
1580 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1582 rc = -EAGAIN; /* Race with unshadow */
1585 if (!(*table & _REGION_ENTRY_INVALID)) {
1586 rc = 0; /* Already established */
1588 } else if (*table & _REGION_ENTRY_ORIGIN) {
1589 rc = -EAGAIN; /* Race with shadow */
1591 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1592 /* mark as invalid as long as the parent table is not protected */
1593 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1594 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1595 list_add(&page->lru, &sg->crst_list);
1596 spin_unlock(&sg->guest_table_lock);
1597 /* Make r3t read-only in parent gmap page table */
1598 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1599 origin = r3t & _REGION_ENTRY_ORIGIN;
1600 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1601 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1602 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1603 spin_lock(&sg->guest_table_lock);
1605 table = gmap_table_walk(sg, saddr, 3);
1606 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1607 (unsigned long) s_r3t)
1608 rc = -EAGAIN; /* Race with unshadow */
1610 *table &= ~_REGION_ENTRY_INVALID;
1612 gmap_unshadow_r3t(sg, raddr);
1614 spin_unlock(&sg->guest_table_lock);
1617 spin_unlock(&sg->guest_table_lock);
1618 __free_pages(page, 2);
1621 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1624 * gmap_shadow_sgt - create a shadow segment table
1625 * @sg: pointer to the shadow guest address space structure
1626 * @saddr: faulting address in the shadow gmap
1627 * @sgt: parent gmap address of the segment table to get shadowed
1629 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1630 * shadow table structure is incomplete, -ENOMEM if out of memory and
1631 * -EFAULT if an address in the parent gmap could not be resolved.
1633 * Called with sg->mm->mmap_sem in read.
1635 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
1637 unsigned long raddr, origin, offset, len;
1638 unsigned long *s_sgt, *table;
1642 BUG_ON(!gmap_is_shadow(sg));
1643 /* Allocate a shadow segment table */
1644 page = alloc_pages(GFP_KERNEL, 2);
1647 page->index = sgt & _REGION_ENTRY_ORIGIN;
1648 s_sgt = (unsigned long *) page_to_phys(page);
1649 /* Install shadow region second table */
1650 spin_lock(&sg->guest_table_lock);
1651 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1653 rc = -EAGAIN; /* Race with unshadow */
1656 if (!(*table & _REGION_ENTRY_INVALID)) {
1657 rc = 0; /* Already established */
1659 } else if (*table & _REGION_ENTRY_ORIGIN) {
1660 rc = -EAGAIN; /* Race with shadow */
1663 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1664 /* mark as invalid as long as the parent table is not protected */
1665 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1666 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1667 list_add(&page->lru, &sg->crst_list);
1668 spin_unlock(&sg->guest_table_lock);
1669 /* Make sgt read-only in parent gmap page table */
1670 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1671 origin = sgt & _REGION_ENTRY_ORIGIN;
1672 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1673 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1674 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1675 spin_lock(&sg->guest_table_lock);
1677 table = gmap_table_walk(sg, saddr, 2);
1678 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1679 (unsigned long) s_sgt)
1680 rc = -EAGAIN; /* Race with unshadow */
1682 *table &= ~_REGION_ENTRY_INVALID;
1684 gmap_unshadow_sgt(sg, raddr);
1686 spin_unlock(&sg->guest_table_lock);
1689 spin_unlock(&sg->guest_table_lock);
1690 __free_pages(page, 2);
1693 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1696 * gmap_shadow_lookup_pgtable - find a shadow page table
1697 * @sg: pointer to the shadow guest address space structure
1698 * @saddr: the address in the shadow aguest address space
1699 * @pgt: parent gmap address of the page table to get shadowed
1700 * @dat_protection: if the pgtable is marked as protected by dat
1702 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1703 * table was not found.
1705 * Called with sg->mm->mmap_sem in read.
1707 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1708 unsigned long *pgt, int *dat_protection)
1710 unsigned long *table;
1714 BUG_ON(!gmap_is_shadow(sg));
1715 spin_lock(&sg->guest_table_lock);
1716 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1717 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1718 /* Shadow page tables are full pages (pte+pgste) */
1719 page = pfn_to_page(*table >> PAGE_SHIFT);
1721 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1726 spin_unlock(&sg->guest_table_lock);
1730 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1733 * gmap_shadow_pgt - instantiate a shadow page table
1734 * @sg: pointer to the shadow guest address space structure
1735 * @saddr: faulting address in the shadow gmap
1736 * @pgt: parent gmap address of the page table to get shadowed
1738 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1739 * shadow table structure is incomplete, -ENOMEM if out of memory,
1740 * -EFAULT if an address in the parent gmap could not be resolved and
1742 * Called with gmap->mm->mmap_sem in read
1744 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
1746 unsigned long raddr, origin;
1747 unsigned long *s_pgt, *table;
1751 BUG_ON(!gmap_is_shadow(sg));
1752 /* Allocate a shadow page table */
1753 page = page_table_alloc_pgste(sg->mm);
1756 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1757 s_pgt = (unsigned long *) page_to_phys(page);
1758 /* Install shadow page table */
1759 spin_lock(&sg->guest_table_lock);
1760 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1762 rc = -EAGAIN; /* Race with unshadow */
1765 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1766 rc = 0; /* Already established */
1768 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1769 rc = -EAGAIN; /* Race with shadow */
1772 /* mark as invalid as long as the parent table is not protected */
1773 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1774 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1775 list_add(&page->lru, &sg->pt_list);
1776 spin_unlock(&sg->guest_table_lock);
1777 /* Make pgt read-only in parent gmap page table (not the pgste) */
1778 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1779 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1780 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1781 spin_lock(&sg->guest_table_lock);
1783 table = gmap_table_walk(sg, saddr, 1);
1784 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1785 (unsigned long) s_pgt)
1786 rc = -EAGAIN; /* Race with unshadow */
1788 *table &= ~_SEGMENT_ENTRY_INVALID;
1790 gmap_unshadow_pgt(sg, raddr);
1792 spin_unlock(&sg->guest_table_lock);
1795 spin_unlock(&sg->guest_table_lock);
1796 page_table_free_pgste(page);
1800 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1803 * gmap_shadow_page - create a shadow page mapping
1804 * @sg: pointer to the shadow guest address space structure
1805 * @saddr: faulting address in the shadow gmap
1806 * @pte: pte in parent gmap address space to get shadowed
1808 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1809 * shadow table structure is incomplete, -ENOMEM if out of memory and
1810 * -EFAULT if an address in the parent gmap could not be resolved.
1812 * Called with sg->mm->mmap_sem in read.
1814 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1816 struct gmap *parent;
1817 struct gmap_rmap *rmap;
1818 unsigned long vmaddr, paddr;
1820 pte_t *sptep, *tptep;
1823 BUG_ON(!gmap_is_shadow(sg));
1824 parent = sg->parent;
1826 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1829 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1832 paddr = pte_val(pte) & PAGE_MASK;
1833 vmaddr = __gmap_translate(parent, paddr);
1834 if (IS_ERR_VALUE(vmaddr)) {
1838 rc = radix_tree_preload(GFP_KERNEL);
1842 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1844 spin_lock(&sg->guest_table_lock);
1845 /* Get page table pointer */
1846 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1848 spin_unlock(&sg->guest_table_lock);
1849 gmap_pte_op_end(ptl);
1850 radix_tree_preload_end();
1853 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1855 /* Success and a new mapping */
1856 gmap_insert_rmap(sg, vmaddr, rmap);
1860 gmap_pte_op_end(ptl);
1861 spin_unlock(&sg->guest_table_lock);
1863 radix_tree_preload_end();
1866 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1873 EXPORT_SYMBOL_GPL(gmap_shadow_page);
1876 * gmap_shadow_notify - handle notifications for shadow gmap
1878 * Called with sg->parent->shadow_lock.
1880 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1881 unsigned long offset, pte_t *pte)
1883 struct gmap_rmap *rmap, *rnext, *head;
1884 unsigned long gaddr, start, end, bits, raddr;
1885 unsigned long *table;
1887 BUG_ON(!gmap_is_shadow(sg));
1888 spin_lock(&sg->parent->guest_table_lock);
1889 table = radix_tree_lookup(&sg->parent->host_to_guest,
1890 vmaddr >> PMD_SHIFT);
1891 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
1892 spin_unlock(&sg->parent->guest_table_lock);
1896 spin_lock(&sg->guest_table_lock);
1898 spin_unlock(&sg->guest_table_lock);
1901 /* Check for top level table */
1902 start = sg->orig_asce & _ASCE_ORIGIN;
1903 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
1904 if (gaddr >= start && gaddr < end) {
1905 /* The complete shadow table has to go */
1907 spin_unlock(&sg->guest_table_lock);
1908 list_del(&sg->list);
1912 /* Remove the page table tree from on specific entry */
1913 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
1914 gmap_for_each_rmap_safe(rmap, rnext, head) {
1915 bits = rmap->raddr & _SHADOW_RMAP_MASK;
1916 raddr = rmap->raddr ^ bits;
1918 case _SHADOW_RMAP_REGION1:
1919 gmap_unshadow_r2t(sg, raddr);
1921 case _SHADOW_RMAP_REGION2:
1922 gmap_unshadow_r3t(sg, raddr);
1924 case _SHADOW_RMAP_REGION3:
1925 gmap_unshadow_sgt(sg, raddr);
1927 case _SHADOW_RMAP_SEGMENT:
1928 gmap_unshadow_pgt(sg, raddr);
1930 case _SHADOW_RMAP_PGTABLE:
1931 gmap_unshadow_page(sg, raddr);
1936 spin_unlock(&sg->guest_table_lock);
1940 * ptep_notify - call all invalidation callbacks for a specific pte.
1941 * @mm: pointer to the process mm_struct
1942 * @addr: virtual address in the process address space
1943 * @pte: pointer to the page table entry
1944 * @bits: bits from the pgste that caused the notify call
1946 * This function is assumed to be called with the page table lock held
1947 * for the pte to notify.
1949 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
1950 pte_t *pte, unsigned long bits)
1952 unsigned long offset, gaddr;
1953 unsigned long *table;
1954 struct gmap *gmap, *sg, *next;
1956 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
1957 offset = offset * (4096 / sizeof(pte_t));
1959 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
1960 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
1961 spin_lock(&gmap->shadow_lock);
1962 list_for_each_entry_safe(sg, next,
1963 &gmap->children, list)
1964 gmap_shadow_notify(sg, vmaddr, offset, pte);
1965 spin_unlock(&gmap->shadow_lock);
1967 if (!(bits & PGSTE_IN_BIT))
1969 spin_lock(&gmap->guest_table_lock);
1970 table = radix_tree_lookup(&gmap->host_to_guest,
1971 vmaddr >> PMD_SHIFT);
1973 gaddr = __gmap_segment_gaddr(table) + offset;
1974 spin_unlock(&gmap->guest_table_lock);
1976 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
1980 EXPORT_SYMBOL_GPL(ptep_notify);
1982 static inline void thp_split_mm(struct mm_struct *mm)
1984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1985 struct vm_area_struct *vma;
1988 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1989 for (addr = vma->vm_start;
1992 follow_page(vma, addr, FOLL_SPLIT);
1993 vma->vm_flags &= ~VM_HUGEPAGE;
1994 vma->vm_flags |= VM_NOHUGEPAGE;
1996 mm->def_flags |= VM_NOHUGEPAGE;
2001 * switch on pgstes for its userspace process (for kvm)
2003 int s390_enable_sie(void)
2005 struct mm_struct *mm = current->mm;
2007 /* Do we have pgstes? if yes, we are done */
2008 if (mm_has_pgste(mm))
2010 /* Fail if the page tables are 2K */
2011 if (!mm_alloc_pgste(mm))
2013 down_write(&mm->mmap_sem);
2014 mm->context.has_pgste = 1;
2015 /* split thp mappings and disable thp for future mappings */
2017 up_write(&mm->mmap_sem);
2020 EXPORT_SYMBOL_GPL(s390_enable_sie);
2023 * Enable storage key handling from now on and initialize the storage
2024 * keys with the default key.
2026 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2027 unsigned long next, struct mm_walk *walk)
2030 * Remove all zero page mappings,
2031 * after establishing a policy to forbid zero page mappings
2032 * following faults for that page will get fresh anonymous pages
2034 if (is_zero_pfn(pte_pfn(*pte)))
2035 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
2036 /* Clear storage key */
2037 ptep_zap_key(walk->mm, addr, pte);
2041 int s390_enable_skey(void)
2043 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2044 struct mm_struct *mm = current->mm;
2045 struct vm_area_struct *vma;
2048 down_write(&mm->mmap_sem);
2049 if (mm_use_skey(mm))
2052 mm->context.use_skey = 1;
2053 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2054 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2055 MADV_UNMERGEABLE, &vma->vm_flags)) {
2056 mm->context.use_skey = 0;
2061 mm->def_flags &= ~VM_MERGEABLE;
2064 walk_page_range(0, TASK_SIZE, &walk);
2067 up_write(&mm->mmap_sem);
2070 EXPORT_SYMBOL_GPL(s390_enable_skey);
2073 * Reset CMMA state, make all pages stable again.
2075 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2076 unsigned long next, struct mm_walk *walk)
2078 ptep_zap_unused(walk->mm, addr, pte, 1);
2082 void s390_reset_cmma(struct mm_struct *mm)
2084 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2086 down_write(&mm->mmap_sem);
2088 walk_page_range(0, TASK_SIZE, &walk);
2089 up_write(&mm->mmap_sem);
2091 EXPORT_SYMBOL_GPL(s390_reset_cmma);