powerpc/mm: Move around testing of _PAGE_PRESENT in hash code
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Thu, 22 Jul 2010 22:53:23 +0000 (08:53 +1000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Thu, 22 Jul 2010 22:53:23 +0000 (08:53 +1000)
Instead of adding _PAGE_PRESENT to the access permission mask
in each low level routine independently, we add it once from
hash_page().

We also move the preliminary access check (the racy one before
the PTE is locked) up so it applies to the huge page case. This
duplicates code in __hash_page_huge() which we'll remove in a
subsequent patch to fix a race in there.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/mm/hash_low_64.S
arch/powerpc/mm/hash_utils_64.c

index a719f53..3079f6b 100644 (file)
@@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
        
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
index 98f262d..40847a9 100644 (file)
@@ -955,6 +955,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                return 1;
        }
 
+       /* Add _PAGE_PRESENT to the required access perm */
+       access |= _PAGE_PRESENT;
+
+       /* Pre-check access permissions (will be re-checked atomically
+        * in __hash_page_XX but this pre-check is a fast path
+        */
+       if (access & ~pte_val(*ptep)) {
+               DBG_LOW(" no access !\n");
+               return 1;
+       }
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (hugeshift)
                return __hash_page_huge(ea, access, vsid, ptep, trap, local,
@@ -967,14 +978,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
                pte_val(*(ptep + PTRS_PER_PTE)));
 #endif
-       /* Pre-check access permissions (will be re-checked atomically
-        * in __hash_page_XX but this pre-check is a fast path
-        */
-       if (access & ~pte_val(*ptep)) {
-               DBG_LOW(" no access !\n");
-               return 1;
-       }
-
        /* Do actual hashing */
 #ifdef CONFIG_PPC_64K_PAGES
        /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */