Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[cascardo/linux.git] / arch / s390 / mm / pgtable.c
index 0b18585..be99357 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/swapops.h>
+#include <linux/ksm.h>
+#include <linux/mman.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
                        break;
                /* Walk the process page table, lock and get pte pointer */
                ptep = get_locked_pte(gmap->mm, addr, &ptl);
-               if (unlikely(!ptep))
-                       continue;
+               VM_BUG_ON(!ptep);
                /* Set notification bit in the pgste of the pte */
                entry = *ptep;
                if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
                        gaddr += PAGE_SIZE;
                        len -= PAGE_SIZE;
                }
-               spin_unlock(ptl);
+               pte_unmap_unlock(ptep, ptl);
        }
        up_read(&gmap->mm->mmap_sem);
        return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
        __free_page(page);
 }
 
-static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, unsigned long end, bool init_skey)
-{
-       pte_t *start_pte, *pte;
-       spinlock_t *ptl;
-       pgste_t pgste;
-
-       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-       pte = start_pte;
-       do {
-               pgste = pgste_get_lock(pte);
-               pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
-               if (init_skey) {
-                       unsigned long address;
-
-                       pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
-                                             PGSTE_GR_BIT | PGSTE_GC_BIT);
-
-                       /* skip invalid and not writable pages */
-                       if (pte_val(*pte) & _PAGE_INVALID ||
-                           !(pte_val(*pte) & _PAGE_WRITE)) {
-                               pgste_set_unlock(pte, pgste);
-                               continue;
-                       }
-
-                       address = pte_val(*pte) & PAGE_MASK;
-                       page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
-               }
-               pgste_set_unlock(pte, pgste);
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-       pte_unmap_unlock(start_pte, ptl);
-
-       return addr;
-}
-
-static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
-                       unsigned long addr, unsigned long end, bool init_skey)
-{
-       unsigned long next;
-       pmd_t *pmd;
-
-       pmd = pmd_offset(pud, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd))
-                       continue;
-               next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
-       } while (pmd++, addr = next, addr != end);
-
-       return addr;
-}
-
-static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
-                       unsigned long addr, unsigned long end, bool init_skey)
-{
-       unsigned long next;
-       pud_t *pud;
-
-       pud = pud_offset(pgd, addr);
-       do {
-               next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud))
-                       continue;
-               next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
-       } while (pud++, addr = next, addr != end);
-
-       return addr;
-}
-
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
-                           unsigned long end, bool init_skey)
-{
-       unsigned long addr, next;
-       pgd_t *pgd;
-
-       down_write(&mm->mmap_sem);
-       if (init_skey && mm_use_skey(mm))
-               goto out_up;
-       addr = start;
-       pgd = pgd_offset(mm, addr);
-       do {
-               next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd))
-                       continue;
-               next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
-       } while (pgd++, addr = next, addr != end);
-       if (init_skey)
-               current->mm->context.use_skey = 1;
-out_up:
-       up_write(&mm->mmap_sem);
-}
-EXPORT_SYMBOL(page_table_reset_pgste);
-
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq)
 {
@@ -1031,11 +939,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
        return NULL;
 }
 
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
-                           unsigned long end, bool init_skey)
-{
-}
-
 static inline void page_table_free_pgste(unsigned long *table)
 {
 }
@@ -1386,12 +1289,88 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
  * Enable storage key handling from now on and initialize the storage
  * keys with the default key.
  */
-void s390_enable_skey(void)
+static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+                             unsigned long next, struct mm_walk *walk)
 {
-       page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
+       unsigned long ptev;
+       pgste_t pgste;
+
+       pgste = pgste_get_lock(pte);
+       /*
+        * Remove all zero page mappings,
+        * after establishing a policy to forbid zero page mappings
+        * following faults for that page will get fresh anonymous pages
+        */
+       if (is_zero_pfn(pte_pfn(*pte))) {
+               ptep_flush_direct(walk->mm, addr, pte);
+               pte_val(*pte) = _PAGE_INVALID;
+       }
+       /* Clear storage key */
+       pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+                             PGSTE_GR_BIT | PGSTE_GC_BIT);
+       ptev = pte_val(*pte);
+       if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+               page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+       pgste_set_unlock(pte, pgste);
+       return 0;
+}
+
+int s390_enable_skey(void)
+{
+       struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int rc = 0;
+
+       down_write(&mm->mmap_sem);
+       if (mm_use_skey(mm))
+               goto out_up;
+
+       mm->context.use_skey = 1;
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
+                               MADV_UNMERGEABLE, &vma->vm_flags)) {
+                       mm->context.use_skey = 0;
+                       rc = -ENOMEM;
+                       goto out_up;
+               }
+       }
+       mm->def_flags &= ~VM_MERGEABLE;
+
+       walk.mm = mm;
+       walk_page_range(0, TASK_SIZE, &walk);
+
+out_up:
+       up_write(&mm->mmap_sem);
+       return rc;
 }
 EXPORT_SYMBOL_GPL(s390_enable_skey);
 
+/*
+ * Reset CMMA state, make all pages stable again.
+ */
+static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
+                            unsigned long next, struct mm_walk *walk)
+{
+       pgste_t pgste;
+
+       pgste = pgste_get_lock(pte);
+       pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+       pgste_set_unlock(pte, pgste);
+       return 0;
+}
+
+void s390_reset_cmma(struct mm_struct *mm)
+{
+       struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
+
+       down_write(&mm->mmap_sem);
+       walk.mm = mm;
+       walk_page_range(0, TASK_SIZE, &walk);
+       up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(s390_reset_cmma);
+
 /*
  * Test and reset if a guest page is dirty
  */