sparc64: Support transparent huge pages.
[cascardo/linux.git] / arch / sparc / include / asm / tsb.h
index 1a8afd1..b4c258d 100644 (file)
@@ -147,20 +147,96 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        brz,pn          REG1, FAIL_LABEL; \
         sllx           VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
        srlx            REG2, 64 - PAGE_SHIFT, REG2; \
-       sllx            REG1, 11, REG1; \
+       sllx            REG1, PGD_PADDR_SHIFT, REG1; \
        andn            REG2, 0x3, REG2; \
        lduwa           [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
        brz,pn          REG1, FAIL_LABEL; \
         sllx           VADDR, 64 - PMD_SHIFT, REG2; \
-       srlx            REG2, 64 - PAGE_SHIFT, REG2; \
-       sllx            REG1, 11, REG1; \
+       srlx            REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+       sllx            REG1, PMD_PADDR_SHIFT, REG1; \
        andn            REG2, 0x7, REG2; \
        add             REG1, REG2, REG1;
 
-       /* Do a user page table walk in MMU globals.  Leaves physical PTE
-        * pointer in REG1.  Jumps to FAIL_LABEL on early page table walk
-        * termination.  Physical base of page tables is in PHYS_PGD which
-        * will not be modified.
+       /* This macro exists only to make the PMD translator below easier
+        * to read.  It hides the ELF section switch for the sun4v code
+        * patching.
+        */
+#define OR_PTE_BIT(REG, NAME)                          \
+661:   or              REG, _PAGE_##NAME##_4U, REG;    \
+       .section        .sun4v_1insn_patch, "ax";       \
+       .word           661b;                           \
+       or              REG, _PAGE_##NAME##_4V, REG;    \
+       .previous;
+
+       /* Load into REG the PTE value for VALID, CACHE, and SZHUGE.  */
+#define BUILD_PTE_VALID_SZHUGE_CACHE(REG)                                 \
+661:   sethi           %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG;            \
+       .section        .sun4v_1insn_patch, "ax";                          \
+       .word           661b;                                              \
+       sethi           %uhi(_PAGE_VALID), REG;                            \
+       .previous;                                                         \
+       sllx            REG, 32, REG;                                      \
+661:   or              REG, _PAGE_CP_4U|_PAGE_CV_4U, REG;                 \
+       .section        .sun4v_1insn_patch, "ax";                          \
+       .word           661b;                                              \
+       or              REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
+       .previous;
+
+       /* PMD has been loaded into REG1, interpret the value, seeing
+        * if it is a HUGE PMD or a normal one.  If it is not valid
+        * then jump to FAIL_LABEL.  If it is a HUGE PMD, and it
+        * translates to a valid PTE, branch to PTE_LABEL.
+        *
+        * We translate the PMD by hand, one bit at a time,
+        * constructing the huge PTE.
+        *
+        * So we construct the PTE in REG2 as follows:
+        *
+        * 1) Extract the PMD PFN from REG1 and place it into REG2.
+        *
+        * 2) Translate PMD protection bits in REG1 into REG2, one bit
+        *    at a time using andcc tests on REG1 and OR's into REG2.
+        *
+        *    Only two bits to be concerned with here, EXEC and WRITE.
+        *    Now REG1 is freed up and we can use it as a temporary.
+        *
+        * 3) Construct the VALID, CACHE, and page size PTE bits in
+        *    REG1, OR with REG2 to form final PTE.
+        */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+       brz,pn          REG1, FAIL_LABEL;                                     \
+        andcc          REG1, PMD_ISHUGE, %g0;                                \
+       be,pt           %xcc, 700f;                                           \
+        and            REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2;       \
+       cmp             REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED;             \
+       bne,pn          %xcc, FAIL_LABEL;                                     \
+        andn           REG1, PMD_HUGE_PROTBITS, REG2;                        \
+       sllx            REG2, PMD_PADDR_SHIFT, REG2;                          \
+       /* REG2 now holds PFN << PAGE_SHIFT */                                \
+       andcc           REG1, PMD_HUGE_EXEC, %g0;                             \
+       bne,a,pt        %xcc, 1f;                                             \
+        OR_PTE_BIT(REG2, EXEC);                                              \
+1:     andcc           REG1, PMD_HUGE_WRITE, %g0;                            \
+       bne,a,pt        %xcc, 1f;                                             \
+        OR_PTE_BIT(REG2, W);                                                 \
+       /* REG1 can now be clobbered, build final PTE */                      \
+1:     BUILD_PTE_VALID_SZHUGE_CACHE(REG1);                                   \
+       ba,pt           %xcc, PTE_LABEL;                                      \
+        or             REG1, REG2, REG1;                                     \
+700:
+#else
+#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+       brz,pn          REG1, FAIL_LABEL; \
+        nop;
+#endif
+
+       /* Do a user page table walk in MMU globals.  Leaves final,
+        * valid, PTE value in REG1.  Jumps to FAIL_LABEL on early
+        * page table walk termination or if the PTE is not valid.
+        *
+        * Physical base of page tables is in PHYS_PGD which will not
+        * be modified.
         *
         * VADDR will not be clobbered, but REG1 and REG2 will.
         */
@@ -172,15 +248,19 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        brz,pn          REG1, FAIL_LABEL; \
         sllx           VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
        srlx            REG2, 64 - PAGE_SHIFT, REG2; \
-       sllx            REG1, 11, REG1; \
+       sllx            REG1, PGD_PADDR_SHIFT, REG1; \
        andn            REG2, 0x3, REG2; \
        lduwa           [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
-       brz,pn          REG1, FAIL_LABEL; \
-        sllx           VADDR, 64 - PMD_SHIFT, REG2; \
-       srlx            REG2, 64 - PAGE_SHIFT, REG2; \
-       sllx            REG1, 11, REG1; \
+       USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
+       sllx            VADDR, 64 - PMD_SHIFT, REG2; \
+       srlx            REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+       sllx            REG1, PMD_PADDR_SHIFT, REG1; \
        andn            REG2, 0x7, REG2; \
-       add             REG1, REG2, REG1;
+       add             REG1, REG2, REG1; \
+       ldxa            [REG1] ASI_PHYS_USE_EC, REG1; \
+       brgez,pn        REG1, FAIL_LABEL; \
+        nop; \
+800:
 
 /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
  * If no entry is found, FAIL_LABEL will be branched to.  On success