powerpc/mm/radix: Add tlbflush routines
[cascardo/linux.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush-hash.h
1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3
4 /*
5  * TLB flushing for 64-bit hash-MMU CPUs
6  */
7
8 #include <linux/percpu.h>
9 #include <asm/page.h>
10
11 #define PPC64_TLB_BATCH_NR 192
12
13 struct ppc64_tlb_batch {
14         int                     active;
15         unsigned long           index;
16         struct mm_struct        *mm;
17         real_pte_t              pte[PPC64_TLB_BATCH_NR];
18         unsigned long           vpn[PPC64_TLB_BATCH_NR];
19         unsigned int            psize;
20         int                     ssize;
21 };
22 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
23
24 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
25
26 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
27
28 static inline void arch_enter_lazy_mmu_mode(void)
29 {
30         struct ppc64_tlb_batch *batch;
31
32         if (radix_enabled())
33                 return;
34         batch = this_cpu_ptr(&ppc64_tlb_batch);
35         batch->active = 1;
36 }
37
38 static inline void arch_leave_lazy_mmu_mode(void)
39 {
40         struct ppc64_tlb_batch *batch;
41
42         if (radix_enabled())
43                 return;
44         batch = this_cpu_ptr(&ppc64_tlb_batch);
45
46         if (batch->index)
47                 __flush_tlb_pending(batch);
48         batch->active = 0;
49 }
50
51 #define arch_flush_lazy_mmu_mode()      do {} while (0)
52
53
54 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
55                             int ssize, unsigned long flags);
56 extern void flush_hash_range(unsigned long number, int local);
57 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
58                                 pmd_t *pmdp, unsigned int psize, int ssize,
59                                 unsigned long flags);
60 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
61 {
62 }
63
64 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
65 {
66 }
67
68 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
69                                           unsigned long vmaddr)
70 {
71 }
72
73 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
74                                     unsigned long vmaddr)
75 {
76 }
77
78 static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma,
79                                            unsigned long vmaddr)
80 {
81 }
82
83 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
84                                      unsigned long start, unsigned long end)
85 {
86 }
87
88 static inline void hash__flush_tlb_kernel_range(unsigned long start,
89                                             unsigned long end)
90 {
91 }
92
93
94 struct mmu_gather;
95 extern void hash__tlb_flush(struct mmu_gather *tlb);
96 /* Private function for use by PCI IO mapping code */
97 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
98                                      unsigned long end);
99 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
100                                 unsigned long addr);
101 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */