2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/memblock.h>
37 #include <linux/slab.h>
38 #include <linux/hugetlb.h>
40 #include <asm/pgalloc.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/thp.h>
61 /* Some sanity checking */
62 #if TASK_SIZE_USER64 > PGTABLE_RANGE
63 #error TASK_SIZE_USER64 exceeds pagetable range
66 #ifdef CONFIG_PPC_STD_MMU_64
67 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
68 #error TASK_SIZE_USER64 exceeds user VSID range
72 #ifdef CONFIG_PPC_BOOK3S_64
74 * partition table and process table for ISA 3.0
76 struct prtb_entry *process_tb;
77 struct patb_entry *partition_tb;
79 unsigned long ioremap_bot = IOREMAP_BASE;
82 * __ioremap_at - Low level function to establish the page tables
85 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
90 /* Make sure we have the base flags */
91 if ((flags & _PAGE_PRESENT) == 0)
92 flags |= pgprot_val(PAGE_KERNEL);
94 /* We don't support the 4K PFN hack with ioremap */
95 if (flags & _PAGE_4K_PFN)
98 WARN_ON(pa & ~PAGE_MASK);
99 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
100 WARN_ON(size & ~PAGE_MASK);
102 for (i = 0; i < size; i += PAGE_SIZE)
103 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
106 return (void __iomem *)ea;
110 * __iounmap_from - Low level function to tear down the page tables
111 * for an IO mapping. This is used for mappings that
112 * are manipulated manually, like partial unmapping of
113 * PCI IOs or ISA space.
115 void __iounmap_at(void *ea, unsigned long size)
117 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
118 WARN_ON(size & ~PAGE_MASK);
120 unmap_kernel_range((unsigned long)ea, size);
123 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
124 unsigned long flags, void *caller)
126 phys_addr_t paligned;
130 * Choose an address to map it to.
131 * Once the imalloc system is running, we use it.
132 * Before that, we map using addresses going
133 * up from ioremap_bot. imalloc will use
134 * the addresses from ioremap_bot through
138 paligned = addr & PAGE_MASK;
139 size = PAGE_ALIGN(addr + size) - paligned;
141 if ((size == 0) || (paligned == 0))
144 if (slab_is_available()) {
145 struct vm_struct *area;
147 area = __get_vm_area_caller(size, VM_IOREMAP,
148 ioremap_bot, IOREMAP_END,
153 area->phys_addr = paligned;
154 ret = __ioremap_at(paligned, area->addr, size, flags);
158 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
164 ret += addr & ~PAGE_MASK;
168 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
171 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
174 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
176 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
177 void *caller = __builtin_return_address(0);
180 return ppc_md.ioremap(addr, size, flags, caller);
181 return __ioremap_caller(addr, size, flags, caller);
184 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
186 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
187 void *caller = __builtin_return_address(0);
190 return ppc_md.ioremap(addr, size, flags, caller);
191 return __ioremap_caller(addr, size, flags, caller);
194 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
197 void *caller = __builtin_return_address(0);
199 /* writeable implies dirty for kernel addresses */
200 if (flags & _PAGE_WRITE)
201 flags |= _PAGE_DIRTY;
203 /* we don't want to let _PAGE_EXEC leak out */
204 flags &= ~_PAGE_EXEC;
206 * Force kernel mapping.
208 #if defined(CONFIG_PPC_BOOK3S_64)
209 flags |= _PAGE_PRIVILEGED;
211 flags &= ~_PAGE_USER;
216 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
217 * which means that we just cleared supervisor access... oops ;-) This
220 flags |= _PAGE_BAP_SR;
224 return ppc_md.ioremap(addr, size, flags, caller);
225 return __ioremap_caller(addr, size, flags, caller);
230 * Unmap an IO region and remove it from imalloc'd list.
231 * Access to IO memory should be serialized by driver.
233 void __iounmap(volatile void __iomem *token)
237 if (!slab_is_available())
240 addr = (void *) ((unsigned long __force)
241 PCI_FIX_ADDR(token) & PAGE_MASK);
242 if ((unsigned long)addr < ioremap_bot) {
243 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
250 void iounmap(volatile void __iomem *token)
253 ppc_md.iounmap(token);
258 EXPORT_SYMBOL(ioremap);
259 EXPORT_SYMBOL(ioremap_wc);
260 EXPORT_SYMBOL(ioremap_prot);
261 EXPORT_SYMBOL(__ioremap);
262 EXPORT_SYMBOL(__ioremap_at);
263 EXPORT_SYMBOL(iounmap);
264 EXPORT_SYMBOL(__iounmap);
265 EXPORT_SYMBOL(__iounmap_at);
267 #ifndef __PAGETABLE_PUD_FOLDED
268 /* 4 level page table */
269 struct page *pgd_page(pgd_t pgd)
272 return pte_page(pgd_pte(pgd));
273 return virt_to_page(pgd_page_vaddr(pgd));
277 struct page *pud_page(pud_t pud)
280 return pte_page(pud_pte(pud));
281 return virt_to_page(pud_page_vaddr(pud));
285 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
286 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
288 struct page *pmd_page(pmd_t pmd)
290 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
291 return pte_page(pmd_pte(pmd));
292 return virt_to_page(pmd_page_vaddr(pmd));
295 #ifdef CONFIG_PPC_64K_PAGES
296 static pte_t *get_from_cache(struct mm_struct *mm)
298 void *pte_frag, *ret;
300 spin_lock(&mm->page_table_lock);
301 ret = mm->context.pte_frag;
303 pte_frag = ret + PTE_FRAG_SIZE;
305 * If we have taken up all the fragments mark PTE page NULL
307 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
309 mm->context.pte_frag = pte_frag;
311 spin_unlock(&mm->page_table_lock);
315 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
318 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
319 __GFP_REPEAT | __GFP_ZERO);
322 if (!kernel && !pgtable_page_ctor(page)) {
327 ret = page_address(page);
328 spin_lock(&mm->page_table_lock);
330 * If we find pgtable_page set, we return
331 * the allocated page with single fragement
334 if (likely(!mm->context.pte_frag)) {
335 set_page_count(page, PTE_FRAG_NR);
336 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
338 spin_unlock(&mm->page_table_lock);
343 pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
347 pte = get_from_cache(mm);
351 return __alloc_for_cache(mm, kernel);
354 void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
356 struct page *page = virt_to_page(table);
357 if (put_page_testzero(page)) {
359 pgtable_page_dtor(page);
360 free_hot_cold_page(page, 0);
365 static void page_table_free_rcu(void *table)
367 struct page *page = virt_to_page(table);
368 if (put_page_testzero(page)) {
369 pgtable_page_dtor(page);
370 free_hot_cold_page(page, 0);
374 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
376 unsigned long pgf = (unsigned long)table;
378 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
380 tlb_remove_table(tlb, (void *)pgf);
383 void __tlb_remove_table(void *_table)
385 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
386 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
389 /* PTE page needs special handling */
390 page_table_free_rcu(table);
392 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
393 kmem_cache_free(PGT_CACHE(shift), table);
397 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
400 /* PTE page needs special handling */
401 struct page *page = virt_to_page(table);
402 if (put_page_testzero(page)) {
403 pgtable_page_dtor(page);
404 free_hot_cold_page(page, 0);
407 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
408 kmem_cache_free(PGT_CACHE(shift), table);
412 #endif /* CONFIG_PPC_64K_PAGES */
414 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
417 * This is called when relaxing access to a hugepage. It's also called in the page
418 * fault path when we don't hit any of the major fault cases, ie, a minor
419 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
420 * handled those two for us, we additionally deal with missing execute
421 * permission here on some processors
423 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
424 pmd_t *pmdp, pmd_t entry, int dirty)
427 #ifdef CONFIG_DEBUG_VM
428 WARN_ON(!pmd_trans_huge(*pmdp));
429 assert_spin_locked(&vma->vm_mm->page_table_lock);
431 changed = !pmd_same(*(pmdp), entry);
433 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
435 * Since we are not supporting SW TLB systems, we don't
436 * have any thing similar to flush_tlb_page_nohash()
442 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
443 pmd_t *pmdp, unsigned long clr,
450 #ifdef CONFIG_DEBUG_VM
451 WARN_ON(!pmd_trans_huge(*pmdp));
452 assert_spin_locked(&mm->page_table_lock);
455 __asm__ __volatile__(
463 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
464 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
465 "r" (cpu_to_be64(_PAGE_BUSY)), "r" (cpu_to_be64(set))
468 old = be64_to_cpu(old_be);
470 trace_hugepage_update(addr, old, clr, set);
471 if (old & _PAGE_HASHPTE)
472 hpte_do_hugepage_flush(mm, addr, pmdp, old);
476 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
481 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
482 VM_BUG_ON(pmd_trans_huge(*pmdp));
487 * Wait for all pending hash_page to finish. This is needed
488 * in case of subpage collapse. When we collapse normal pages
489 * to hugepage, we first clear the pmd, then invalidate all
490 * the PTE entries. The assumption here is that any low level
491 * page fault will see a none pmd and take the slow path that
492 * will wait on mmap_sem. But we could very well be in a
493 * hash_page with local ptep pointer value. Such a hash page
494 * can result in adding new HPTE entries for normal subpages.
495 * That means we could be modifying the page content as we
496 * copy them to a huge page. So wait for parallel hash_page
497 * to finish before invalidating HPTE entries. We can do this
498 * by sending an IPI to all the cpus and executing a dummy
501 kick_all_cpus_sync();
503 * Now invalidate the hpte entries in the range
504 * covered by pmd. This make sure we take a
505 * fault and will find the pmd as none, which will
506 * result in a major fault which takes mmap_sem and
507 * hence wait for collapse to complete. Without this
508 * the __collapse_huge_page_copy can result in copying
511 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
516 * We currently remove entries from the hashtable regardless of whether
517 * the entry was young or dirty.
519 * We should be more intelligent about this but for the moment we override
520 * these functions and force a tlb flush unconditionally
522 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
523 unsigned long address, pmd_t *pmdp)
525 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
529 * We want to put the pgtable in pmd and use pgtable for tracking
530 * the base page size hptes
532 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
535 pgtable_t *pgtable_slot;
536 assert_spin_locked(&mm->page_table_lock);
538 * we store the pgtable in the second half of PMD
540 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
541 *pgtable_slot = pgtable;
543 * expose the deposited pgtable to other cpus.
544 * before we set the hugepage PTE at pmd level
545 * hash fault code looks at the deposted pgtable
546 * to store hash index values.
551 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
554 pgtable_t *pgtable_slot;
556 assert_spin_locked(&mm->page_table_lock);
557 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
558 pgtable = *pgtable_slot;
560 * Once we withdraw, mark the entry NULL.
562 *pgtable_slot = NULL;
564 * We store HPTE information in the deposited PTE fragment.
565 * zero out the content on withdraw.
567 memset(pgtable, 0, PTE_FRAG_SIZE);
571 void pmdp_huge_split_prepare(struct vm_area_struct *vma,
572 unsigned long address, pmd_t *pmdp)
574 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
575 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
578 * We can't mark the pmd none here, because that will cause a race
579 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
580 * we spilt, but at the same time we wan't rest of the ppc64 code
581 * not to insert hash pte on this, because we will be modifying
582 * the deposited pgtable in the caller of this function. Hence
583 * clear the _PAGE_USER so that we move the fault handling to
584 * higher level function and that will serialize against ptl.
585 * We need to flush existing hash pte entries here even though,
586 * the translation is still valid, because we will withdraw
587 * pgtable_t after this.
589 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
594 * set a new huge pmd. We should not be called for updating
595 * an existing pmd entry. That should go via pmd_hugepage_update.
597 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
598 pmd_t *pmdp, pmd_t pmd)
600 #ifdef CONFIG_DEBUG_VM
601 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
602 assert_spin_locked(&mm->page_table_lock);
603 WARN_ON(!pmd_trans_huge(pmd));
605 trace_hugepage_set_pmd(addr, pmd_val(pmd));
606 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
610 * We use this to invalidate a pmdp entry before switching from a
611 * hugepte to regular pmd entry.
613 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
616 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
619 * This ensures that generic code that rely on IRQ disabling
620 * to prevent a parallel THP split work as expected.
622 kick_all_cpus_sync();
626 * A linux hugepage PMD was changed and the corresponding hash table entries
627 * neesd to be flushed.
629 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
630 pmd_t *pmdp, unsigned long old_pmd)
635 unsigned long flags = 0;
636 const struct cpumask *tmp;
638 /* get the base page size,vsid and segment size */
639 #ifdef CONFIG_DEBUG_VM
640 psize = get_slice_psize(mm, addr);
641 BUG_ON(psize == MMU_PAGE_16M);
643 if (old_pmd & _PAGE_COMBO)
646 psize = MMU_PAGE_64K;
648 if (!is_kernel_addr(addr)) {
649 ssize = user_segment_size(addr);
650 vsid = get_vsid(mm->context.id, addr, ssize);
653 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
654 ssize = mmu_kernel_ssize;
657 tmp = cpumask_of(smp_processor_id());
658 if (cpumask_equal(mm_cpumask(mm), tmp))
659 flags |= HPTE_LOCAL_UPDATE;
661 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
664 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
666 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
669 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
673 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
674 return pmd_set_protbits(__pmd(pmdv), pgprot);
677 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
679 return pfn_pmd(page_to_pfn(page), pgprot);
682 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
687 pmdv &= _HPAGE_CHG_MASK;
688 return pmd_set_protbits(__pmd(pmdv), newprot);
692 * This is called at the end of handling a user page fault, when the
693 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
694 * We use it to preload an HPTE into the hash table corresponding to
695 * the updated linux HUGE PMD entry.
697 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
703 pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
704 unsigned long addr, pmd_t *pmdp)
709 pgtable_t *pgtable_slot;
711 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
712 old_pmd = __pmd(old);
714 * We have pmd == none and we are holding page_table_lock.
715 * So we can safely go and clear the pgtable hash
718 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
719 pgtable = *pgtable_slot;
721 * Let's zero out old valid and hash index details
722 * hash fault look at them.
724 memset(pgtable, 0, PTE_FRAG_SIZE);
726 * Serialize against find_linux_pte_or_hugepte which does lock-less
727 * lookup in page tables with local interrupts disabled. For huge pages
728 * it casts pmd_t to pte_t. Since format of pte_t is different from
729 * pmd_t we want to prevent transit from pmd pointing to page table
730 * to pmd pointing to huge page (and back) while interrupts are disabled.
731 * We clear pmd to possibly replace it with page table pointer in
732 * different code paths. So make sure we wait for the parallel
733 * find_linux_pte_or_hugepage to finish.
735 kick_all_cpus_sync();
739 int has_transparent_hugepage(void)
742 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER,
743 "hugepages can't be allocated by the buddy allocator");
745 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2,
746 "We need more than 2 pages to do deferred thp split");
748 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
751 * We support THP only if PMD_SIZE is 16MB.
753 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
756 * We need to make sure that we support 16MB hugepage in a segement
757 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
761 * If we have 64K HPTE, we will be using that by default
763 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
764 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
767 * Ok we only have 4K HPTE
769 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
774 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */