s390/thp: respect page protection in pmd_none() and pmd_present()
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Thu, 25 Oct 2012 15:42:50 +0000 (17:42 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 26 Oct 2012 14:44:23 +0000 (16:44 +0200)
Similar to pte_none() and pte_present(), the pmd functions should also
respect page protection of huge pages, especially PROT_NONE.
This patch also simplifies massage_pgprot_pmd() by adding new definitions
for huge page protection.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/pgtable.h

index dd647c9..2d3b7cb 100644 (file)
@@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud)
 
 static inline int pmd_present(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
+       unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
+       return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
+              !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
 }
 
 static inline int pmd_none(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
+       return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
+              !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
 }
 
 static inline int pmd_large(pmd_t pmd)
@@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define SEGMENT_NONE   __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO     __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW     __pgprot(_HPAGE_TYPE_RW)
+
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
 
@@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 
 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
 {
-       unsigned long pgprot_pmd = 0;
-
-       if (pgprot_val(pgprot) & _PAGE_INVALID) {
-               if (pgprot_val(pgprot) & _PAGE_SWT)
-                       pgprot_pmd |= _HPAGE_TYPE_NONE;
-               pgprot_pmd |= _SEGMENT_ENTRY_INV;
-       }
-       if (pgprot_val(pgprot) & _PAGE_RO)
-               pgprot_pmd |= _SEGMENT_ENTRY_RO;
-       return pgprot_pmd;
+       /*
+        * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+        * Convert to segment table entry format.
+        */
+       if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+               return pgprot_val(SEGMENT_NONE);
+       if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
+               return pgprot_val(SEGMENT_RO);
+       return pgprot_val(SEGMENT_RW);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-       pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+       /* Do not clobber _HPAGE_TYPE_NONE pages! */
+       if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
+               pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
        return pmd;
 }