Merge tag 'for-v3.13/clock-fixes-a' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / arch / powerpc / mm / hash_utils_64.c
1 /*
2  * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3  *   {mikejc|engebret}@us.ibm.com
4  *
5  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6  *
7  * SMP scalability work:
8  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  * 
10  *    Module name: htab.c
11  *
12  *    Description:
13  *      PowerPC Hashed Page Table functions
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  */
20
21 #undef DEBUG
22 #undef DEBUG_LOW
23
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/export.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
35 #include <linux/memblock.h>
36 #include <linux/context_tracking.h>
37
38 #include <asm/processor.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/mmu_context.h>
42 #include <asm/page.h>
43 #include <asm/types.h>
44 #include <asm/uaccess.h>
45 #include <asm/machdep.h>
46 #include <asm/prom.h>
47 #include <asm/tlbflush.h>
48 #include <asm/io.h>
49 #include <asm/eeh.h>
50 #include <asm/tlb.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/spu.h>
55 #include <asm/udbg.h>
56 #include <asm/code-patching.h>
57 #include <asm/fadump.h>
58 #include <asm/firmware.h>
59 #include <asm/tm.h>
60
61 #ifdef DEBUG
62 #define DBG(fmt...) udbg_printf(fmt)
63 #else
64 #define DBG(fmt...)
65 #endif
66
67 #ifdef DEBUG_LOW
68 #define DBG_LOW(fmt...) udbg_printf(fmt)
69 #else
70 #define DBG_LOW(fmt...)
71 #endif
72
73 #define KB (1024)
74 #define MB (1024*KB)
75 #define GB (1024L*MB)
76
77 /*
78  * Note:  pte   --> Linux PTE
79  *        HPTE  --> PowerPC Hashed Page Table Entry
80  *
81  * Execution context:
82  *   htab_initialize is called with the MMU off (of course), but
83  *   the kernel has been copied down to zero so it can directly
84  *   reference global data.  At this point it is very difficult
85  *   to print debug info.
86  *
87  */
88
89 #ifdef CONFIG_U3_DART
90 extern unsigned long dart_tablebase;
91 #endif /* CONFIG_U3_DART */
92
93 static unsigned long _SDR1;
94 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
95
96 struct hash_pte *htab_address;
97 unsigned long htab_size_bytes;
98 unsigned long htab_hash_mask;
99 EXPORT_SYMBOL_GPL(htab_hash_mask);
100 int mmu_linear_psize = MMU_PAGE_4K;
101 int mmu_virtual_psize = MMU_PAGE_4K;
102 int mmu_vmalloc_psize = MMU_PAGE_4K;
103 #ifdef CONFIG_SPARSEMEM_VMEMMAP
104 int mmu_vmemmap_psize = MMU_PAGE_4K;
105 #endif
106 int mmu_io_psize = MMU_PAGE_4K;
107 int mmu_kernel_ssize = MMU_SEGSIZE_256M;
108 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
109 u16 mmu_slb_size = 64;
110 EXPORT_SYMBOL_GPL(mmu_slb_size);
111 #ifdef CONFIG_PPC_64K_PAGES
112 int mmu_ci_restrictions;
113 #endif
114 #ifdef CONFIG_DEBUG_PAGEALLOC
115 static u8 *linear_map_hash_slots;
116 static unsigned long linear_map_hash_count;
117 static DEFINE_SPINLOCK(linear_map_hash_lock);
118 #endif /* CONFIG_DEBUG_PAGEALLOC */
119
120 /* There are definitions of page sizes arrays to be used when none
121  * is provided by the firmware.
122  */
123
124 /* Pre-POWER4 CPUs (4k pages only)
125  */
126 static struct mmu_psize_def mmu_psize_defaults_old[] = {
127         [MMU_PAGE_4K] = {
128                 .shift  = 12,
129                 .sllp   = 0,
130                 .penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
131                 .avpnm  = 0,
132                 .tlbiel = 0,
133         },
134 };
135
136 /* POWER4, GPUL, POWER5
137  *
138  * Support for 16Mb large pages
139  */
140 static struct mmu_psize_def mmu_psize_defaults_gp[] = {
141         [MMU_PAGE_4K] = {
142                 .shift  = 12,
143                 .sllp   = 0,
144                 .penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
145                 .avpnm  = 0,
146                 .tlbiel = 1,
147         },
148         [MMU_PAGE_16M] = {
149                 .shift  = 24,
150                 .sllp   = SLB_VSID_L,
151                 .penc   = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
152                             [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
153                 .avpnm  = 0x1UL,
154                 .tlbiel = 0,
155         },
156 };
157
158 static unsigned long htab_convert_pte_flags(unsigned long pteflags)
159 {
160         unsigned long rflags = pteflags & 0x1fa;
161
162         /* _PAGE_EXEC -> NOEXEC */
163         if ((pteflags & _PAGE_EXEC) == 0)
164                 rflags |= HPTE_R_N;
165
166         /* PP bits. PAGE_USER is already PP bit 0x2, so we only
167          * need to add in 0x1 if it's a read-only user page
168          */
169         if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
170                                          (pteflags & _PAGE_DIRTY)))
171                 rflags |= 1;
172
173         /* Always add C */
174         return rflags | HPTE_R_C;
175 }
176
177 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
178                       unsigned long pstart, unsigned long prot,
179                       int psize, int ssize)
180 {
181         unsigned long vaddr, paddr;
182         unsigned int step, shift;
183         int ret = 0;
184
185         shift = mmu_psize_defs[psize].shift;
186         step = 1 << shift;
187
188         prot = htab_convert_pte_flags(prot);
189
190         DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
191             vstart, vend, pstart, prot, psize, ssize);
192
193         for (vaddr = vstart, paddr = pstart; vaddr < vend;
194              vaddr += step, paddr += step) {
195                 unsigned long hash, hpteg;
196                 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
197                 unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
198                 unsigned long tprot = prot;
199
200                 /*
201                  * If we hit a bad address return error.
202                  */
203                 if (!vsid)
204                         return -1;
205                 /* Make kernel text executable */
206                 if (overlaps_kernel_text(vaddr, vaddr + step))
207                         tprot &= ~HPTE_R_N;
208
209                 hash = hpt_hash(vpn, shift, ssize);
210                 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
211
212                 BUG_ON(!ppc_md.hpte_insert);
213                 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
214                                          HPTE_V_BOLTED, psize, psize, ssize);
215
216                 if (ret < 0)
217                         break;
218 #ifdef CONFIG_DEBUG_PAGEALLOC
219                 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
220                         linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
221 #endif /* CONFIG_DEBUG_PAGEALLOC */
222         }
223         return ret < 0 ? ret : 0;
224 }
225
226 #ifdef CONFIG_MEMORY_HOTPLUG
227 static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
228                       int psize, int ssize)
229 {
230         unsigned long vaddr;
231         unsigned int step, shift;
232
233         shift = mmu_psize_defs[psize].shift;
234         step = 1 << shift;
235
236         if (!ppc_md.hpte_removebolted) {
237                 printk(KERN_WARNING "Platform doesn't implement "
238                                 "hpte_removebolted\n");
239                 return -EINVAL;
240         }
241
242         for (vaddr = vstart; vaddr < vend; vaddr += step)
243                 ppc_md.hpte_removebolted(vaddr, psize, ssize);
244
245         return 0;
246 }
247 #endif /* CONFIG_MEMORY_HOTPLUG */
248
249 static int __init htab_dt_scan_seg_sizes(unsigned long node,
250                                          const char *uname, int depth,
251                                          void *data)
252 {
253         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
254         __be32 *prop;
255         unsigned long size = 0;
256
257         /* We are scanning "cpu" nodes only */
258         if (type == NULL || strcmp(type, "cpu") != 0)
259                 return 0;
260
261         prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
262         if (prop == NULL)
263                 return 0;
264         for (; size >= 4; size -= 4, ++prop) {
265                 if (be32_to_cpu(prop[0]) == 40) {
266                         DBG("1T segment support detected\n");
267                         cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
268                         return 1;
269                 }
270         }
271         cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
272         return 0;
273 }
274
275 static void __init htab_init_seg_sizes(void)
276 {
277         of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
278 }
279
280 static int __init get_idx_from_shift(unsigned int shift)
281 {
282         int idx = -1;
283
284         switch (shift) {
285         case 0xc:
286                 idx = MMU_PAGE_4K;
287                 break;
288         case 0x10:
289                 idx = MMU_PAGE_64K;
290                 break;
291         case 0x14:
292                 idx = MMU_PAGE_1M;
293                 break;
294         case 0x18:
295                 idx = MMU_PAGE_16M;
296                 break;
297         case 0x22:
298                 idx = MMU_PAGE_16G;
299                 break;
300         }
301         return idx;
302 }
303
304 static int __init htab_dt_scan_page_sizes(unsigned long node,
305                                           const char *uname, int depth,
306                                           void *data)
307 {
308         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
309         __be32 *prop;
310         unsigned long size = 0;
311
312         /* We are scanning "cpu" nodes only */
313         if (type == NULL || strcmp(type, "cpu") != 0)
314                 return 0;
315
316         prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
317         if (prop != NULL) {
318                 pr_info("Page sizes from device-tree:\n");
319                 size /= 4;
320                 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
321                 while(size > 0) {
322                         unsigned int base_shift = be32_to_cpu(prop[0]);
323                         unsigned int slbenc = be32_to_cpu(prop[1]);
324                         unsigned int lpnum = be32_to_cpu(prop[2]);
325                         struct mmu_psize_def *def;
326                         int idx, base_idx;
327
328                         size -= 3; prop += 3;
329                         base_idx = get_idx_from_shift(base_shift);
330                         if (base_idx < 0) {
331                                 /*
332                                  * skip the pte encoding also
333                                  */
334                                 prop += lpnum * 2; size -= lpnum * 2;
335                                 continue;
336                         }
337                         def = &mmu_psize_defs[base_idx];
338                         if (base_idx == MMU_PAGE_16M)
339                                 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
340
341                         def->shift = base_shift;
342                         if (base_shift <= 23)
343                                 def->avpnm = 0;
344                         else
345                                 def->avpnm = (1 << (base_shift - 23)) - 1;
346                         def->sllp = slbenc;
347                         /*
348                          * We don't know for sure what's up with tlbiel, so
349                          * for now we only set it for 4K and 64K pages
350                          */
351                         if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
352                                 def->tlbiel = 1;
353                         else
354                                 def->tlbiel = 0;
355
356                         while (size > 0 && lpnum) {
357                                 unsigned int shift = be32_to_cpu(prop[0]);
358                                 int penc  = be32_to_cpu(prop[1]);
359
360                                 prop += 2; size -= 2;
361                                 lpnum--;
362
363                                 idx = get_idx_from_shift(shift);
364                                 if (idx < 0)
365                                         continue;
366
367                                 if (penc == -1)
368                                         pr_err("Invalid penc for base_shift=%d "
369                                                "shift=%d\n", base_shift, shift);
370
371                                 def->penc[idx] = penc;
372                                 pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
373                                         " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
374                                         base_shift, shift, def->sllp,
375                                         def->avpnm, def->tlbiel, def->penc[idx]);
376                         }
377                 }
378                 return 1;
379         }
380         return 0;
381 }
382
383 #ifdef CONFIG_HUGETLB_PAGE
384 /* Scan for 16G memory blocks that have been set aside for huge pages
385  * and reserve those blocks for 16G huge pages.
386  */
387 static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
388                                         const char *uname, int depth,
389                                         void *data) {
390         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
391         __be64 *addr_prop;
392         __be32 *page_count_prop;
393         unsigned int expected_pages;
394         long unsigned int phys_addr;
395         long unsigned int block_size;
396
397         /* We are scanning "memory" nodes only */
398         if (type == NULL || strcmp(type, "memory") != 0)
399                 return 0;
400
401         /* This property is the log base 2 of the number of virtual pages that
402          * will represent this memory block. */
403         page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
404         if (page_count_prop == NULL)
405                 return 0;
406         expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
407         addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
408         if (addr_prop == NULL)
409                 return 0;
410         phys_addr = be64_to_cpu(addr_prop[0]);
411         block_size = be64_to_cpu(addr_prop[1]);
412         if (block_size != (16 * GB))
413                 return 0;
414         printk(KERN_INFO "Huge page(16GB) memory: "
415                         "addr = 0x%lX size = 0x%lX pages = %d\n",
416                         phys_addr, block_size, expected_pages);
417         if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
418                 memblock_reserve(phys_addr, block_size * expected_pages);
419                 add_gpage(phys_addr, block_size, expected_pages);
420         }
421         return 0;
422 }
423 #endif /* CONFIG_HUGETLB_PAGE */
424
425 static void mmu_psize_set_default_penc(void)
426 {
427         int bpsize, apsize;
428         for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
429                 for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
430                         mmu_psize_defs[bpsize].penc[apsize] = -1;
431 }
432
433 static void __init htab_init_page_sizes(void)
434 {
435         int rc;
436
437         /* se the invalid penc to -1 */
438         mmu_psize_set_default_penc();
439
440         /* Default to 4K pages only */
441         memcpy(mmu_psize_defs, mmu_psize_defaults_old,
442                sizeof(mmu_psize_defaults_old));
443
444         /*
445          * Try to find the available page sizes in the device-tree
446          */
447         rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
448         if (rc != 0)  /* Found */
449                 goto found;
450
451         /*
452          * Not in the device-tree, let's fallback on known size
453          * list for 16M capable GP & GR
454          */
455         if (mmu_has_feature(MMU_FTR_16M_PAGE))
456                 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
457                        sizeof(mmu_psize_defaults_gp));
458  found:
459 #ifndef CONFIG_DEBUG_PAGEALLOC
460         /*
461          * Pick a size for the linear mapping. Currently, we only support
462          * 16M, 1M and 4K which is the default
463          */
464         if (mmu_psize_defs[MMU_PAGE_16M].shift)
465                 mmu_linear_psize = MMU_PAGE_16M;
466         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
467                 mmu_linear_psize = MMU_PAGE_1M;
468 #endif /* CONFIG_DEBUG_PAGEALLOC */
469
470 #ifdef CONFIG_PPC_64K_PAGES
471         /*
472          * Pick a size for the ordinary pages. Default is 4K, we support
473          * 64K for user mappings and vmalloc if supported by the processor.
474          * We only use 64k for ioremap if the processor
475          * (and firmware) support cache-inhibited large pages.
476          * If not, we use 4k and set mmu_ci_restrictions so that
477          * hash_page knows to switch processes that use cache-inhibited
478          * mappings to 4k pages.
479          */
480         if (mmu_psize_defs[MMU_PAGE_64K].shift) {
481                 mmu_virtual_psize = MMU_PAGE_64K;
482                 mmu_vmalloc_psize = MMU_PAGE_64K;
483                 if (mmu_linear_psize == MMU_PAGE_4K)
484                         mmu_linear_psize = MMU_PAGE_64K;
485                 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
486                         /*
487                          * Don't use 64k pages for ioremap on pSeries, since
488                          * that would stop us accessing the HEA ethernet.
489                          */
490                         if (!machine_is(pseries))
491                                 mmu_io_psize = MMU_PAGE_64K;
492                 } else
493                         mmu_ci_restrictions = 1;
494         }
495 #endif /* CONFIG_PPC_64K_PAGES */
496
497 #ifdef CONFIG_SPARSEMEM_VMEMMAP
498         /* We try to use 16M pages for vmemmap if that is supported
499          * and we have at least 1G of RAM at boot
500          */
501         if (mmu_psize_defs[MMU_PAGE_16M].shift &&
502             memblock_phys_mem_size() >= 0x40000000)
503                 mmu_vmemmap_psize = MMU_PAGE_16M;
504         else if (mmu_psize_defs[MMU_PAGE_64K].shift)
505                 mmu_vmemmap_psize = MMU_PAGE_64K;
506         else
507                 mmu_vmemmap_psize = MMU_PAGE_4K;
508 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
509
510         printk(KERN_DEBUG "Page orders: linear mapping = %d, "
511                "virtual = %d, io = %d"
512 #ifdef CONFIG_SPARSEMEM_VMEMMAP
513                ", vmemmap = %d"
514 #endif
515                "\n",
516                mmu_psize_defs[mmu_linear_psize].shift,
517                mmu_psize_defs[mmu_virtual_psize].shift,
518                mmu_psize_defs[mmu_io_psize].shift
519 #ifdef CONFIG_SPARSEMEM_VMEMMAP
520                ,mmu_psize_defs[mmu_vmemmap_psize].shift
521 #endif
522                );
523
524 #ifdef CONFIG_HUGETLB_PAGE
525         /* Reserve 16G huge page memory sections for huge pages */
526         of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
527 #endif /* CONFIG_HUGETLB_PAGE */
528 }
529
530 static int __init htab_dt_scan_pftsize(unsigned long node,
531                                        const char *uname, int depth,
532                                        void *data)
533 {
534         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
535         __be32 *prop;
536
537         /* We are scanning "cpu" nodes only */
538         if (type == NULL || strcmp(type, "cpu") != 0)
539                 return 0;
540
541         prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
542         if (prop != NULL) {
543                 /* pft_size[0] is the NUMA CEC cookie */
544                 ppc64_pft_size = be32_to_cpu(prop[1]);
545                 return 1;
546         }
547         return 0;
548 }
549
550 static unsigned long __init htab_get_table_size(void)
551 {
552         unsigned long mem_size, rnd_mem_size, pteg_count, psize;
553
554         /* If hash size isn't already provided by the platform, we try to
555          * retrieve it from the device-tree. If it's not there neither, we
556          * calculate it now based on the total RAM size
557          */
558         if (ppc64_pft_size == 0)
559                 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
560         if (ppc64_pft_size)
561                 return 1UL << ppc64_pft_size;
562
563         /* round mem_size up to next power of 2 */
564         mem_size = memblock_phys_mem_size();
565         rnd_mem_size = 1UL << __ilog2(mem_size);
566         if (rnd_mem_size < mem_size)
567                 rnd_mem_size <<= 1;
568
569         /* # pages / 2 */
570         psize = mmu_psize_defs[mmu_virtual_psize].shift;
571         pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
572
573         return pteg_count << 7;
574 }
575
576 #ifdef CONFIG_MEMORY_HOTPLUG
577 int create_section_mapping(unsigned long start, unsigned long end)
578 {
579         return htab_bolt_mapping(start, end, __pa(start),
580                                  pgprot_val(PAGE_KERNEL), mmu_linear_psize,
581                                  mmu_kernel_ssize);
582 }
583
584 int remove_section_mapping(unsigned long start, unsigned long end)
585 {
586         return htab_remove_mapping(start, end, mmu_linear_psize,
587                         mmu_kernel_ssize);
588 }
589 #endif /* CONFIG_MEMORY_HOTPLUG */
590
591 #define FUNCTION_TEXT(A)        ((*(unsigned long *)(A)))
592
593 static void __init htab_finish_init(void)
594 {
595         extern unsigned int *htab_call_hpte_insert1;
596         extern unsigned int *htab_call_hpte_insert2;
597         extern unsigned int *htab_call_hpte_remove;
598         extern unsigned int *htab_call_hpte_updatepp;
599
600 #ifdef CONFIG_PPC_HAS_HASH_64K
601         extern unsigned int *ht64_call_hpte_insert1;
602         extern unsigned int *ht64_call_hpte_insert2;
603         extern unsigned int *ht64_call_hpte_remove;
604         extern unsigned int *ht64_call_hpte_updatepp;
605
606         patch_branch(ht64_call_hpte_insert1,
607                 FUNCTION_TEXT(ppc_md.hpte_insert),
608                 BRANCH_SET_LINK);
609         patch_branch(ht64_call_hpte_insert2,
610                 FUNCTION_TEXT(ppc_md.hpte_insert),
611                 BRANCH_SET_LINK);
612         patch_branch(ht64_call_hpte_remove,
613                 FUNCTION_TEXT(ppc_md.hpte_remove),
614                 BRANCH_SET_LINK);
615         patch_branch(ht64_call_hpte_updatepp,
616                 FUNCTION_TEXT(ppc_md.hpte_updatepp),
617                 BRANCH_SET_LINK);
618
619 #endif /* CONFIG_PPC_HAS_HASH_64K */
620
621         patch_branch(htab_call_hpte_insert1,
622                 FUNCTION_TEXT(ppc_md.hpte_insert),
623                 BRANCH_SET_LINK);
624         patch_branch(htab_call_hpte_insert2,
625                 FUNCTION_TEXT(ppc_md.hpte_insert),
626                 BRANCH_SET_LINK);
627         patch_branch(htab_call_hpte_remove,
628                 FUNCTION_TEXT(ppc_md.hpte_remove),
629                 BRANCH_SET_LINK);
630         patch_branch(htab_call_hpte_updatepp,
631                 FUNCTION_TEXT(ppc_md.hpte_updatepp),
632                 BRANCH_SET_LINK);
633 }
634
635 static void __init htab_initialize(void)
636 {
637         unsigned long table;
638         unsigned long pteg_count;
639         unsigned long prot;
640         unsigned long base = 0, size = 0, limit;
641         struct memblock_region *reg;
642
643         DBG(" -> htab_initialize()\n");
644
645         /* Initialize segment sizes */
646         htab_init_seg_sizes();
647
648         /* Initialize page sizes */
649         htab_init_page_sizes();
650
651         if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
652                 mmu_kernel_ssize = MMU_SEGSIZE_1T;
653                 mmu_highuser_ssize = MMU_SEGSIZE_1T;
654                 printk(KERN_INFO "Using 1TB segments\n");
655         }
656
657         /*
658          * Calculate the required size of the htab.  We want the number of
659          * PTEGs to equal one half the number of real pages.
660          */ 
661         htab_size_bytes = htab_get_table_size();
662         pteg_count = htab_size_bytes >> 7;
663
664         htab_hash_mask = pteg_count - 1;
665
666         if (firmware_has_feature(FW_FEATURE_LPAR)) {
667                 /* Using a hypervisor which owns the htab */
668                 htab_address = NULL;
669                 _SDR1 = 0; 
670 #ifdef CONFIG_FA_DUMP
671                 /*
672                  * If firmware assisted dump is active firmware preserves
673                  * the contents of htab along with entire partition memory.
674                  * Clear the htab if firmware assisted dump is active so
675                  * that we dont end up using old mappings.
676                  */
677                 if (is_fadump_active() && ppc_md.hpte_clear_all)
678                         ppc_md.hpte_clear_all();
679 #endif
680         } else {
681                 /* Find storage for the HPT.  Must be contiguous in
682                  * the absolute address space. On cell we want it to be
683                  * in the first 2 Gig so we can use it for IOMMU hacks.
684                  */
685                 if (machine_is(cell))
686                         limit = 0x80000000;
687                 else
688                         limit = MEMBLOCK_ALLOC_ANYWHERE;
689
690                 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
691
692                 DBG("Hash table allocated at %lx, size: %lx\n", table,
693                     htab_size_bytes);
694
695                 htab_address = __va(table);
696
697                 /* htab absolute addr + encoded htabsize */
698                 _SDR1 = table + __ilog2(pteg_count) - 11;
699
700                 /* Initialize the HPT with no entries */
701                 memset((void *)table, 0, htab_size_bytes);
702
703                 /* Set SDR1 */
704                 mtspr(SPRN_SDR1, _SDR1);
705         }
706
707         prot = pgprot_val(PAGE_KERNEL);
708
709 #ifdef CONFIG_DEBUG_PAGEALLOC
710         linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
711         linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
712                                                     1, ppc64_rma_size));
713         memset(linear_map_hash_slots, 0, linear_map_hash_count);
714 #endif /* CONFIG_DEBUG_PAGEALLOC */
715
716         /* On U3 based machines, we need to reserve the DART area and
717          * _NOT_ map it to avoid cache paradoxes as it's remapped non
718          * cacheable later on
719          */
720
721         /* create bolted the linear mapping in the hash table */
722         for_each_memblock(memory, reg) {
723                 base = (unsigned long)__va(reg->base);
724                 size = reg->size;
725
726                 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
727                     base, size, prot);
728
729 #ifdef CONFIG_U3_DART
730                 /* Do not map the DART space. Fortunately, it will be aligned
731                  * in such a way that it will not cross two memblock regions and
732                  * will fit within a single 16Mb page.
733                  * The DART space is assumed to be a full 16Mb region even if
734                  * we only use 2Mb of that space. We will use more of it later
735                  * for AGP GART. We have to use a full 16Mb large page.
736                  */
737                 DBG("DART base: %lx\n", dart_tablebase);
738
739                 if (dart_tablebase != 0 && dart_tablebase >= base
740                     && dart_tablebase < (base + size)) {
741                         unsigned long dart_table_end = dart_tablebase + 16 * MB;
742                         if (base != dart_tablebase)
743                                 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
744                                                         __pa(base), prot,
745                                                         mmu_linear_psize,
746                                                         mmu_kernel_ssize));
747                         if ((base + size) > dart_table_end)
748                                 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
749                                                         base + size,
750                                                         __pa(dart_table_end),
751                                                          prot,
752                                                          mmu_linear_psize,
753                                                          mmu_kernel_ssize));
754                         continue;
755                 }
756 #endif /* CONFIG_U3_DART */
757                 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
758                                 prot, mmu_linear_psize, mmu_kernel_ssize));
759         }
760         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
761
762         /*
763          * If we have a memory_limit and we've allocated TCEs then we need to
764          * explicitly map the TCE area at the top of RAM. We also cope with the
765          * case that the TCEs start below memory_limit.
766          * tce_alloc_start/end are 16MB aligned so the mapping should work
767          * for either 4K or 16MB pages.
768          */
769         if (tce_alloc_start) {
770                 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
771                 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
772
773                 if (base + size >= tce_alloc_start)
774                         tce_alloc_start = base + size + 1;
775
776                 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
777                                          __pa(tce_alloc_start), prot,
778                                          mmu_linear_psize, mmu_kernel_ssize));
779         }
780
781         htab_finish_init();
782
783         DBG(" <- htab_initialize()\n");
784 }
785 #undef KB
786 #undef MB
787
788 void __init early_init_mmu(void)
789 {
790         /* Setup initial STAB address in the PACA */
791         get_paca()->stab_real = __pa((u64)&initial_stab);
792         get_paca()->stab_addr = (u64)&initial_stab;
793
794         /* Initialize the MMU Hash table and create the linear mapping
795          * of memory. Has to be done before stab/slb initialization as
796          * this is currently where the page size encoding is obtained
797          */
798         htab_initialize();
799
800         /* Initialize stab / SLB management */
801         if (mmu_has_feature(MMU_FTR_SLB))
802                 slb_initialize();
803         else
804                 stab_initialize(get_paca()->stab_real);
805 }
806
807 #ifdef CONFIG_SMP
808 void early_init_mmu_secondary(void)
809 {
810         /* Initialize hash table for that CPU */
811         if (!firmware_has_feature(FW_FEATURE_LPAR))
812                 mtspr(SPRN_SDR1, _SDR1);
813
814         /* Initialize STAB/SLB. We use a virtual address as it works
815          * in real mode on pSeries.
816          */
817         if (mmu_has_feature(MMU_FTR_SLB))
818                 slb_initialize();
819         else
820                 stab_initialize(get_paca()->stab_addr);
821 }
822 #endif /* CONFIG_SMP */
823
824 /*
825  * Called by asm hashtable.S for doing lazy icache flush
826  */
827 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
828 {
829         struct page *page;
830
831         if (!pfn_valid(pte_pfn(pte)))
832                 return pp;
833
834         page = pte_page(pte);
835
836         /* page is dirty */
837         if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
838                 if (trap == 0x400) {
839                         flush_dcache_icache_page(page);
840                         set_bit(PG_arch_1, &page->flags);
841                 } else
842                         pp |= HPTE_R_N;
843         }
844         return pp;
845 }
846
847 #ifdef CONFIG_PPC_MM_SLICES
848 unsigned int get_paca_psize(unsigned long addr)
849 {
850         u64 lpsizes;
851         unsigned char *hpsizes;
852         unsigned long index, mask_index;
853
854         if (addr < SLICE_LOW_TOP) {
855                 lpsizes = get_paca()->context.low_slices_psize;
856                 index = GET_LOW_SLICE_INDEX(addr);
857                 return (lpsizes >> (index * 4)) & 0xF;
858         }
859         hpsizes = get_paca()->context.high_slices_psize;
860         index = GET_HIGH_SLICE_INDEX(addr);
861         mask_index = index & 0x1;
862         return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
863 }
864
865 #else
866 unsigned int get_paca_psize(unsigned long addr)
867 {
868         return get_paca()->context.user_psize;
869 }
870 #endif
871
872 /*
873  * Demote a segment to using 4k pages.
874  * For now this makes the whole process use 4k pages.
875  */
876 #ifdef CONFIG_PPC_64K_PAGES
877 void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
878 {
879         if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
880                 return;
881         slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
882 #ifdef CONFIG_SPU_BASE
883         spu_flush_all_slbs(mm);
884 #endif
885         if (get_paca_psize(addr) != MMU_PAGE_4K) {
886                 get_paca()->context = mm->context;
887                 slb_flush_and_rebolt();
888         }
889 }
890 #endif /* CONFIG_PPC_64K_PAGES */
891
892 #ifdef CONFIG_PPC_SUBPAGE_PROT
893 /*
894  * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
895  * Userspace sets the subpage permissions using the subpage_prot system call.
896  *
897  * Result is 0: full permissions, _PAGE_RW: read-only,
898  * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
899  */
900 static int subpage_protection(struct mm_struct *mm, unsigned long ea)
901 {
902         struct subpage_prot_table *spt = &mm->context.spt;
903         u32 spp = 0;
904         u32 **sbpm, *sbpp;
905
906         if (ea >= spt->maxaddr)
907                 return 0;
908         if (ea < 0x100000000UL) {
909                 /* addresses below 4GB use spt->low_prot */
910                 sbpm = spt->low_prot;
911         } else {
912                 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
913                 if (!sbpm)
914                         return 0;
915         }
916         sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
917         if (!sbpp)
918                 return 0;
919         spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
920
921         /* extract 2-bit bitfield for this 4k subpage */
922         spp >>= 30 - 2 * ((ea >> 12) & 0xf);
923
924         /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
925         spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
926         return spp;
927 }
928
929 #else /* CONFIG_PPC_SUBPAGE_PROT */
930 static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
931 {
932         return 0;
933 }
934 #endif
935
936 void hash_failure_debug(unsigned long ea, unsigned long access,
937                         unsigned long vsid, unsigned long trap,
938                         int ssize, int psize, int lpsize, unsigned long pte)
939 {
940         if (!printk_ratelimit())
941                 return;
942         pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
943                 ea, access, current->comm);
944         pr_info("    trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
945                 trap, vsid, ssize, psize, lpsize, pte);
946 }
947
948 /* Result code is:
949  *  0 - handled
950  *  1 - normal page fault
951  * -1 - critical hash insertion error
952  * -2 - access not permitted by subpage protection mechanism
953  */
954 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
955 {
956         enum ctx_state prev_state = exception_enter();
957         pgd_t *pgdir;
958         unsigned long vsid;
959         struct mm_struct *mm;
960         pte_t *ptep;
961         unsigned hugeshift;
962         const struct cpumask *tmp;
963         int rc, user_region = 0, local = 0;
964         int psize, ssize;
965
966         DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
967                 ea, access, trap);
968
969         /* Get region & vsid */
970         switch (REGION_ID(ea)) {
971         case USER_REGION_ID:
972                 user_region = 1;
973                 mm = current->mm;
974                 if (! mm) {
975                         DBG_LOW(" user region with no mm !\n");
976                         rc = 1;
977                         goto bail;
978                 }
979                 psize = get_slice_psize(mm, ea);
980                 ssize = user_segment_size(ea);
981                 vsid = get_vsid(mm->context.id, ea, ssize);
982                 break;
983         case VMALLOC_REGION_ID:
984                 mm = &init_mm;
985                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
986                 if (ea < VMALLOC_END)
987                         psize = mmu_vmalloc_psize;
988                 else
989                         psize = mmu_io_psize;
990                 ssize = mmu_kernel_ssize;
991                 break;
992         default:
993                 /* Not a valid range
994                  * Send the problem up to do_page_fault 
995                  */
996                 rc = 1;
997                 goto bail;
998         }
999         DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1000
1001         /* Bad address. */
1002         if (!vsid) {
1003                 DBG_LOW("Bad address!\n");
1004                 rc = 1;
1005                 goto bail;
1006         }
1007         /* Get pgdir */
1008         pgdir = mm->pgd;
1009         if (pgdir == NULL) {
1010                 rc = 1;
1011                 goto bail;
1012         }
1013
1014         /* Check CPU locality */
1015         tmp = cpumask_of(smp_processor_id());
1016         if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
1017                 local = 1;
1018
1019 #ifndef CONFIG_PPC_64K_PAGES
1020         /* If we use 4K pages and our psize is not 4K, then we might
1021          * be hitting a special driver mapping, and need to align the
1022          * address before we fetch the PTE.
1023          *
1024          * It could also be a hugepage mapping, in which case this is
1025          * not necessary, but it's not harmful, either.
1026          */
1027         if (psize != MMU_PAGE_4K)
1028                 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
1029 #endif /* CONFIG_PPC_64K_PAGES */
1030
1031         /* Get PTE and page size from page tables */
1032         ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1033         if (ptep == NULL || !pte_present(*ptep)) {
1034                 DBG_LOW(" no PTE !\n");
1035                 rc = 1;
1036                 goto bail;
1037         }
1038
1039         /* Add _PAGE_PRESENT to the required access perm */
1040         access |= _PAGE_PRESENT;
1041
1042         /* Pre-check access permissions (will be re-checked atomically
1043          * in __hash_page_XX but this pre-check is a fast path
1044          */
1045         if (access & ~pte_val(*ptep)) {
1046                 DBG_LOW(" no access !\n");
1047                 rc = 1;
1048                 goto bail;
1049         }
1050
1051         if (hugeshift) {
1052                 if (pmd_trans_huge(*(pmd_t *)ptep))
1053                         rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
1054                                              trap, local, ssize, psize);
1055 #ifdef CONFIG_HUGETLB_PAGE
1056                 else
1057                         rc = __hash_page_huge(ea, access, vsid, ptep, trap,
1058                                               local, ssize, hugeshift, psize);
1059 #else
1060                 else {
1061                         /*
1062                          * if we have hugeshift, and is not transhuge with
1063                          * hugetlb disabled, something is really wrong.
1064                          */
1065                         rc = 1;
1066                         WARN_ON(1);
1067                 }
1068 #endif
1069                 goto bail;
1070         }
1071
1072 #ifndef CONFIG_PPC_64K_PAGES
1073         DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
1074 #else
1075         DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
1076                 pte_val(*(ptep + PTRS_PER_PTE)));
1077 #endif
1078         /* Do actual hashing */
1079 #ifdef CONFIG_PPC_64K_PAGES
1080         /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
1081         if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
1082                 demote_segment_4k(mm, ea);
1083                 psize = MMU_PAGE_4K;
1084         }
1085
1086         /* If this PTE is non-cacheable and we have restrictions on
1087          * using non cacheable large pages, then we switch to 4k
1088          */
1089         if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
1090             (pte_val(*ptep) & _PAGE_NO_CACHE)) {
1091                 if (user_region) {
1092                         demote_segment_4k(mm, ea);
1093                         psize = MMU_PAGE_4K;
1094                 } else if (ea < VMALLOC_END) {
1095                         /*
1096                          * some driver did a non-cacheable mapping
1097                          * in vmalloc space, so switch vmalloc
1098                          * to 4k pages
1099                          */
1100                         printk(KERN_ALERT "Reducing vmalloc segment "
1101                                "to 4kB pages because of "
1102                                "non-cacheable mapping\n");
1103                         psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1104 #ifdef CONFIG_SPU_BASE
1105                         spu_flush_all_slbs(mm);
1106 #endif
1107                 }
1108         }
1109         if (user_region) {
1110                 if (psize != get_paca_psize(ea)) {
1111                         get_paca()->context = mm->context;
1112                         slb_flush_and_rebolt();
1113                 }
1114         } else if (get_paca()->vmalloc_sllp !=
1115                    mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1116                 get_paca()->vmalloc_sllp =
1117                         mmu_psize_defs[mmu_vmalloc_psize].sllp;
1118                 slb_vmalloc_update();
1119         }
1120 #endif /* CONFIG_PPC_64K_PAGES */
1121
1122 #ifdef CONFIG_PPC_HAS_HASH_64K
1123         if (psize == MMU_PAGE_64K)
1124                 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
1125         else
1126 #endif /* CONFIG_PPC_HAS_HASH_64K */
1127         {
1128                 int spp = subpage_protection(mm, ea);
1129                 if (access & spp)
1130                         rc = -2;
1131                 else
1132                         rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1133                                             local, ssize, spp);
1134         }
1135
1136         /* Dump some info in case of hash insertion failure, they should
1137          * never happen so it is really useful to know if/when they do
1138          */
1139         if (rc == -1)
1140                 hash_failure_debug(ea, access, vsid, trap, ssize, psize,
1141                                    psize, pte_val(*ptep));
1142 #ifndef CONFIG_PPC_64K_PAGES
1143         DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1144 #else
1145         DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1146                 pte_val(*(ptep + PTRS_PER_PTE)));
1147 #endif
1148         DBG_LOW(" -> rc=%d\n", rc);
1149
1150 bail:
1151         exception_exit(prev_state);
1152         return rc;
1153 }
1154 EXPORT_SYMBOL_GPL(hash_page);
1155
1156 void hash_preload(struct mm_struct *mm, unsigned long ea,
1157                   unsigned long access, unsigned long trap)
1158 {
1159         int hugepage_shift;
1160         unsigned long vsid;
1161         pgd_t *pgdir;
1162         pte_t *ptep;
1163         unsigned long flags;
1164         int rc, ssize, local = 0;
1165
1166         BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1167
1168 #ifdef CONFIG_PPC_MM_SLICES
1169         /* We only prefault standard pages for now */
1170         if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
1171                 return;
1172 #endif
1173
1174         DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1175                 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1176
1177         /* Get Linux PTE if available */
1178         pgdir = mm->pgd;
1179         if (pgdir == NULL)
1180                 return;
1181
1182         /* Get VSID */
1183         ssize = user_segment_size(ea);
1184         vsid = get_vsid(mm->context.id, ea, ssize);
1185         if (!vsid)
1186                 return;
1187         /*
1188          * Hash doesn't like irqs. Walking linux page table with irq disabled
1189          * saves us from holding multiple locks.
1190          */
1191         local_irq_save(flags);
1192
1193         /*
1194          * THP pages use update_mmu_cache_pmd. We don't do
1195          * hash preload there. Hence can ignore THP here
1196          */
1197         ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
1198         if (!ptep)
1199                 goto out_exit;
1200
1201         WARN_ON(hugepage_shift);
1202 #ifdef CONFIG_PPC_64K_PAGES
1203         /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1204          * a 64K kernel), then we don't preload, hash_page() will take
1205          * care of it once we actually try to access the page.
1206          * That way we don't have to duplicate all of the logic for segment
1207          * page size demotion here
1208          */
1209         if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1210                 goto out_exit;
1211 #endif /* CONFIG_PPC_64K_PAGES */
1212
1213         /* Is that local to this CPU ? */
1214         if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1215                 local = 1;
1216
1217         /* Hash it in */
1218 #ifdef CONFIG_PPC_HAS_HASH_64K
1219         if (mm->context.user_psize == MMU_PAGE_64K)
1220                 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
1221         else
1222 #endif /* CONFIG_PPC_HAS_HASH_64K */
1223                 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1224                                     subpage_protection(mm, ea));
1225
1226         /* Dump some info in case of hash insertion failure, they should
1227          * never happen so it is really useful to know if/when they do
1228          */
1229         if (rc == -1)
1230                 hash_failure_debug(ea, access, vsid, trap, ssize,
1231                                    mm->context.user_psize,
1232                                    mm->context.user_psize,
1233                                    pte_val(*ptep));
1234 out_exit:
1235         local_irq_restore(flags);
1236 }
1237
1238 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
1239  *          do not forget to update the assembly call site !
1240  */
1241 void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1242                      int local)
1243 {
1244         unsigned long hash, index, shift, hidx, slot;
1245
1246         DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1247         pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1248                 hash = hpt_hash(vpn, shift, ssize);
1249                 hidx = __rpte_to_hidx(pte, index);
1250                 if (hidx & _PTEIDX_SECONDARY)
1251                         hash = ~hash;
1252                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1253                 slot += hidx & _PTEIDX_GROUP_IX;
1254                 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1255                 /*
1256                  * We use same base page size and actual psize, because we don't
1257                  * use these functions for hugepage
1258                  */
1259                 ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
1260         } pte_iterate_hashed_end();
1261
1262 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1263         /* Transactions are not aborted by tlbiel, only tlbie.
1264          * Without, syncing a page back to a block device w/ PIO could pick up
1265          * transactional data (bad!) so we force an abort here.  Before the
1266          * sync the page will be made read-only, which will flush_hash_page.
1267          * BIG ISSUE here: if the kernel uses a page from userspace without
1268          * unmapping it first, it may see the speculated version.
1269          */
1270         if (local && cpu_has_feature(CPU_FTR_TM) &&
1271             current->thread.regs &&
1272             MSR_TM_ACTIVE(current->thread.regs->msr)) {
1273                 tm_enable();
1274                 tm_abort(TM_CAUSE_TLBI);
1275         }
1276 #endif
1277 }
1278
1279 void flush_hash_range(unsigned long number, int local)
1280 {
1281         if (ppc_md.flush_hash_range)
1282                 ppc_md.flush_hash_range(number, local);
1283         else {
1284                 int i;
1285                 struct ppc64_tlb_batch *batch =
1286                         &__get_cpu_var(ppc64_tlb_batch);
1287
1288                 for (i = 0; i < number; i++)
1289                         flush_hash_page(batch->vpn[i], batch->pte[i],
1290                                         batch->psize, batch->ssize, local);
1291         }
1292 }
1293
1294 /*
1295  * low_hash_fault is called when we the low level hash code failed
1296  * to instert a PTE due to an hypervisor error
1297  */
1298 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1299 {
1300         enum ctx_state prev_state = exception_enter();
1301
1302         if (user_mode(regs)) {
1303 #ifdef CONFIG_PPC_SUBPAGE_PROT
1304                 if (rc == -2)
1305                         _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1306                 else
1307 #endif
1308                         _exception(SIGBUS, regs, BUS_ADRERR, address);
1309         } else
1310                 bad_page_fault(regs, address, SIGBUS);
1311
1312         exception_exit(prev_state);
1313 }
1314
1315 long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
1316                            unsigned long pa, unsigned long rflags,
1317                            unsigned long vflags, int psize, int ssize)
1318 {
1319         unsigned long hpte_group;
1320         long slot;
1321
1322 repeat:
1323         hpte_group = ((hash & htab_hash_mask) *
1324                        HPTES_PER_GROUP) & ~0x7UL;
1325
1326         /* Insert into the hash table, primary slot */
1327         slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
1328                                   psize, psize, ssize);
1329
1330         /* Primary is full, try the secondary */
1331         if (unlikely(slot == -1)) {
1332                 hpte_group = ((~hash & htab_hash_mask) *
1333                               HPTES_PER_GROUP) & ~0x7UL;
1334                 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
1335                                           vflags | HPTE_V_SECONDARY,
1336                                           psize, psize, ssize);
1337                 if (slot == -1) {
1338                         if (mftb() & 0x1)
1339                                 hpte_group = ((hash & htab_hash_mask) *
1340                                               HPTES_PER_GROUP)&~0x7UL;
1341
1342                         ppc_md.hpte_remove(hpte_group);
1343                         goto repeat;
1344                 }
1345         }
1346
1347         return slot;
1348 }
1349
1350 #ifdef CONFIG_DEBUG_PAGEALLOC
1351 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1352 {
1353         unsigned long hash;
1354         unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1355         unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1356         unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1357         long ret;
1358
1359         hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1360
1361         /* Don't create HPTE entries for bad address */
1362         if (!vsid)
1363                 return;
1364
1365         ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
1366                                     HPTE_V_BOLTED,
1367                                     mmu_linear_psize, mmu_kernel_ssize);
1368
1369         BUG_ON (ret < 0);
1370         spin_lock(&linear_map_hash_lock);
1371         BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1372         linear_map_hash_slots[lmi] = ret | 0x80;
1373         spin_unlock(&linear_map_hash_lock);
1374 }
1375
1376 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1377 {
1378         unsigned long hash, hidx, slot;
1379         unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1380         unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1381
1382         hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1383         spin_lock(&linear_map_hash_lock);
1384         BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1385         hidx = linear_map_hash_slots[lmi] & 0x7f;
1386         linear_map_hash_slots[lmi] = 0;
1387         spin_unlock(&linear_map_hash_lock);
1388         if (hidx & _PTEIDX_SECONDARY)
1389                 hash = ~hash;
1390         slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1391         slot += hidx & _PTEIDX_GROUP_IX;
1392         ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
1393                                mmu_kernel_ssize, 0);
1394 }
1395
1396 void kernel_map_pages(struct page *page, int numpages, int enable)
1397 {
1398         unsigned long flags, vaddr, lmi;
1399         int i;
1400
1401         local_irq_save(flags);
1402         for (i = 0; i < numpages; i++, page++) {
1403                 vaddr = (unsigned long)page_address(page);
1404                 lmi = __pa(vaddr) >> PAGE_SHIFT;
1405                 if (lmi >= linear_map_hash_count)
1406                         continue;
1407                 if (enable)
1408                         kernel_map_linear_page(vaddr, lmi);
1409                 else
1410                         kernel_unmap_linear_page(vaddr, lmi);
1411         }
1412         local_irq_restore(flags);
1413 }
1414 #endif /* CONFIG_DEBUG_PAGEALLOC */
1415
1416 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
1417                                 phys_addr_t first_memblock_size)
1418 {
1419         /* We don't currently support the first MEMBLOCK not mapping 0
1420          * physical on those processors
1421          */
1422         BUG_ON(first_memblock_base != 0);
1423
1424         /* On LPAR systems, the first entry is our RMA region,
1425          * non-LPAR 64-bit hash MMU systems don't have a limitation
1426          * on real mode access, but using the first entry works well
1427          * enough. We also clamp it to 1G to avoid some funky things
1428          * such as RTAS bugs etc...
1429          */
1430         ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
1431
1432         /* Finally limit subsequent allocations */
1433         memblock_set_current_limit(ppc64_rma_size);
1434 }