1 #ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
2 #define _ASM_POWERPC_BOOK3S_PGTABLE_H
5 #include <asm/book3s/64/pgtable.h>
7 #include <asm/book3s/32/pgtable.h>
10 #define FIRST_USER_ADDRESS 0UL
13 /* Generic accessors to PTE bits */
14 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
15 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
16 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
17 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
18 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
19 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
21 #ifdef CONFIG_NUMA_BALANCING
23 * These work without NUMA balancing but the kernel does not care. See the
24 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
25 * work for user pages and always return true for kernel pages.
27 static inline int pte_protnone(pte_t pte)
29 return (pte_val(pte) &
30 (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
33 static inline int pmd_protnone(pmd_t pmd)
35 return pte_protnone(pmd_pte(pmd));
37 #endif /* CONFIG_NUMA_BALANCING */
39 static inline int pte_present(pte_t pte)
41 return pte_val(pte) & _PAGE_PRESENT;
44 /* Conversion functions: convert a page and protection to a page entry,
45 * and a page entry and page directory to the page they refer to.
47 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
50 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
52 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
56 static inline unsigned long pte_pfn(pte_t pte)
58 return pte_val(pte) >> PTE_RPN_SHIFT;
61 /* Generic modifiers for PTE bits */
62 static inline pte_t pte_wrprotect(pte_t pte)
64 return __pte(pte_val(pte) & ~_PAGE_RW);
67 static inline pte_t pte_mkclean(pte_t pte)
69 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
72 static inline pte_t pte_mkold(pte_t pte)
74 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
77 static inline pte_t pte_mkwrite(pte_t pte)
79 return __pte(pte_val(pte) | _PAGE_RW);
82 static inline pte_t pte_mkdirty(pte_t pte)
84 return __pte(pte_val(pte) | _PAGE_DIRTY);
87 static inline pte_t pte_mkyoung(pte_t pte)
89 return __pte(pte_val(pte) | _PAGE_ACCESSED);
92 static inline pte_t pte_mkspecial(pte_t pte)
94 return __pte(pte_val(pte) | _PAGE_SPECIAL);
97 static inline pte_t pte_mkhuge(pte_t pte)
102 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
104 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
108 /* Insert a PTE, top-level function is out of line. It uses an inline
109 * low level function in the respective pgtable-* files
111 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
114 /* This low level function performs the actual PTE insertion
115 * Setting the PTE depends on the MMU type and other factors. It's
116 * an horrible mess that I'm not going to try to clean up now but
117 * I'm keeping it in one place rather than spread around
119 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
120 pte_t *ptep, pte_t pte, int percpu)
122 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
123 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
124 * helper pte_update() which does an atomic update. We need to do that
125 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
126 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
127 * the hash bits instead (ie, same as the non-SMP case)
130 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
131 | (pte_val(pte) & ~_PAGE_HASHPTE));
133 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
135 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
136 /* Second case is 32-bit with 64-bit PTE. In this case, we
137 * can just store as long as we do the two halves in the right order
138 * with a barrier in between. This is possible because we take care,
139 * in the hash code, to pre-invalidate if the PTE was already hashed,
140 * which synchronizes us with any concurrent invalidation.
141 * In the percpu case, we also fallback to the simple update preserving
145 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
146 | (pte_val(pte) & ~_PAGE_HASHPTE));
149 if (pte_val(*ptep) & _PAGE_HASHPTE)
150 flush_hash_entry(mm, ptep, addr);
151 __asm__ __volatile__("\
155 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
156 : "r" (pte) : "memory");
158 #elif defined(CONFIG_PPC_STD_MMU_32)
159 /* Third case is 32-bit hash table in UP mode, we need to preserve
160 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
161 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
162 * and see we need to keep track that this PTE needs invalidating
164 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
165 | (pte_val(pte) & ~_PAGE_HASHPTE));
168 /* Anything else just stores the PTE normally. That covers all 64-bit
169 * cases, and 32-bit non-hash with 32-bit PTEs.
176 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
177 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
178 pte_t *ptep, pte_t entry, int dirty);
181 * Macro to mark a page protection value as "uncacheable".
184 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
187 #define pgprot_noncached pgprot_noncached
188 static inline pgprot_t pgprot_noncached(pgprot_t prot)
190 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
191 _PAGE_NO_CACHE | _PAGE_GUARDED);
194 #define pgprot_noncached_wc pgprot_noncached_wc
195 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
197 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
201 #define pgprot_cached pgprot_cached
202 static inline pgprot_t pgprot_cached(pgprot_t prot)
204 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
208 #define pgprot_cached_wthru pgprot_cached_wthru
209 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
211 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
212 _PAGE_COHERENT | _PAGE_WRITETHRU);
215 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
216 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
218 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
221 #define pgprot_writecombine pgprot_writecombine
222 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
224 return pgprot_noncached_wc(prot);
228 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
229 unsigned long size, pgprot_t vma_prot);
230 #define __HAVE_PHYS_MEM_ACCESS_PROT
232 #endif /* __ASSEMBLY__ */