4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
50 #include <linux/crash_dump.h>
52 #include <trace/events/xen.h>
54 #include <asm/pgtable.h>
55 #include <asm/tlbflush.h>
56 #include <asm/fixmap.h>
57 #include <asm/mmu_context.h>
58 #include <asm/setup.h>
59 #include <asm/paravirt.h>
61 #include <asm/linkage.h>
67 #include <asm/xen/hypercall.h>
68 #include <asm/xen/hypervisor.h>
72 #include <xen/interface/xen.h>
73 #include <xen/interface/hvm/hvm_op.h>
74 #include <xen/interface/version.h>
75 #include <xen/interface/memory.h>
76 #include <xen/hvc-console.h>
78 #include "multicalls.h"
83 * Protects atomic reservation decrease/increase against concurrent increases.
84 * Also protects non-atomic updates of current_pages and balloon lists.
86 DEFINE_SPINLOCK(xen_reservation_lock);
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
94 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
98 /* l3 pud for userspace vsyscall mapping */
99 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100 #endif /* CONFIG_X86_64 */
103 * Note about cr3 (pagetable base) values:
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
116 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
124 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
126 unsigned long arbitrary_virt_to_mfn(void *vaddr)
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
130 return PFN_DOWN(maddr.maddr);
133 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
135 unsigned long address = (unsigned long)vaddr;
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
147 /* otherwise we have to do a (slower) full page-table walk */
149 pte = lookup_address(address, &level);
151 offset = address & ~PAGE_MASK;
152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
154 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
156 void make_lowmem_page_readonly(void *vaddr)
159 unsigned long address = (unsigned long)vaddr;
162 pte = lookup_address(address, &level);
164 return; /* vaddr missing */
166 ptev = pte_wrprotect(*pte);
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
172 void make_lowmem_page_readwrite(void *vaddr)
175 unsigned long address = (unsigned long)vaddr;
178 pte = lookup_address(address, &level);
180 return; /* vaddr missing */
182 ptev = pte_mkwrite(*pte);
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
189 static bool xen_page_pinned(void *ptr)
191 struct page *page = virt_to_page(ptr);
193 return PagePinned(page);
196 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
198 struct multicall_space mcs;
199 struct mmu_update *u;
201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
203 mcs = xen_mc_entry(sizeof(*u));
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
207 u->ptr = virt_to_machine(ptep).maddr;
208 u->val = pte_val_ma(pteval);
210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
214 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
216 static void xen_extend_mmu_update(const struct mmu_update *update)
218 struct multicall_space mcs;
219 struct mmu_update *u;
221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
223 if (mcs.mc != NULL) {
226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
234 static void xen_extend_mmuext_op(const struct mmuext_op *op)
236 struct multicall_space mcs;
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
241 if (mcs.mc != NULL) {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
252 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
262 u.val = pmd_val_ma(val);
263 xen_extend_mmu_update(&u);
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
270 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
272 trace_xen_mmu_set_pmd(ptr, val);
274 /* If page is not pinned, we can just update the entry
276 if (!xen_page_pinned(ptr)) {
281 xen_set_pmd_hyper(ptr, val);
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
288 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
293 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
311 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
313 if (!xen_batched_set_pte(ptep, pteval)) {
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
329 static void xen_set_pte(pte_t *ptep, pte_t pteval)
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
335 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
336 pte_t *ptep, pte_t pteval)
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
342 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
345 /* Just return the pte as-is. We preserve the bits on commit */
346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
350 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
359 u.val = pte_val_ma(pte);
360 xen_extend_mmu_update(&u);
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
365 /* Assume pteval_t is equivalent to all the other *val_t types. */
366 static pteval_t pte_mfn_to_pfn(pteval_t val)
368 if (val & _PAGE_PRESENT) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn);
372 pteval_t flags = val & PTE_FLAGS_MASK;
373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
382 static pteval_t pte_pfn_to_mfn(pteval_t val)
384 if (val & _PAGE_PRESENT) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK;
389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
403 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
404 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
410 __visible pteval_t xen_pte_val(pte_t pte)
412 pteval_t pteval = pte.pte;
414 return pte_mfn_to_pfn(pteval);
416 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
418 __visible pgdval_t xen_pgd_val(pgd_t pgd)
420 return pte_mfn_to_pfn(pgd.pgd);
422 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
424 __visible pte_t xen_make_pte(pteval_t pte)
426 pte = pte_pfn_to_mfn(pte);
428 return native_make_pte(pte);
430 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
432 __visible pgd_t xen_make_pgd(pgdval_t pgd)
434 pgd = pte_pfn_to_mfn(pgd);
435 return native_make_pgd(pgd);
437 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
439 __visible pmdval_t xen_pmd_val(pmd_t pmd)
441 return pte_mfn_to_pfn(pmd.pmd);
443 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
445 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
453 /* ptr may be ioremapped for 64-bit pagetable setup */
454 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
455 u.val = pud_val_ma(val);
456 xen_extend_mmu_update(&u);
458 xen_mc_issue(PARAVIRT_LAZY_MMU);
463 static void xen_set_pud(pud_t *ptr, pud_t val)
465 trace_xen_mmu_set_pud(ptr, val);
467 /* If page is not pinned, we can just update the entry
469 if (!xen_page_pinned(ptr)) {
474 xen_set_pud_hyper(ptr, val);
477 #ifdef CONFIG_X86_PAE
478 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
480 trace_xen_mmu_set_pte_atomic(ptep, pte);
481 set_64bit((u64 *)ptep, native_pte_val(pte));
484 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
486 trace_xen_mmu_pte_clear(mm, addr, ptep);
487 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
488 native_pte_clear(mm, addr, ptep);
491 static void xen_pmd_clear(pmd_t *pmdp)
493 trace_xen_mmu_pmd_clear(pmdp);
494 set_pmd(pmdp, __pmd(0));
496 #endif /* CONFIG_X86_PAE */
498 __visible pmd_t xen_make_pmd(pmdval_t pmd)
500 pmd = pte_pfn_to_mfn(pmd);
501 return native_make_pmd(pmd);
503 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
505 #if PAGETABLE_LEVELS == 4
506 __visible pudval_t xen_pud_val(pud_t pud)
508 return pte_mfn_to_pfn(pud.pud);
510 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
512 __visible pud_t xen_make_pud(pudval_t pud)
514 pud = pte_pfn_to_mfn(pud);
516 return native_make_pud(pud);
518 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
520 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
522 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
523 unsigned offset = pgd - pgd_page;
524 pgd_t *user_ptr = NULL;
526 if (offset < pgd_index(USER_LIMIT)) {
527 struct page *page = virt_to_page(pgd_page);
528 user_ptr = (pgd_t *)page->private;
536 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
540 u.ptr = virt_to_machine(ptr).maddr;
541 u.val = pgd_val_ma(val);
542 xen_extend_mmu_update(&u);
546 * Raw hypercall-based set_pgd, intended for in early boot before
547 * there's a page structure. This implies:
548 * 1. The only existing pagetable is the kernel's
549 * 2. It is always pinned
550 * 3. It has no user pagetable attached to it
552 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
558 __xen_set_pgd_hyper(ptr, val);
560 xen_mc_issue(PARAVIRT_LAZY_MMU);
565 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
567 pgd_t *user_ptr = xen_get_user_pgd(ptr);
569 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
571 /* If page is not pinned, we can just update the entry
573 if (!xen_page_pinned(ptr)) {
576 WARN_ON(xen_page_pinned(user_ptr));
582 /* If it's pinned, then we can at least batch the kernel and
583 user updates together. */
586 __xen_set_pgd_hyper(ptr, val);
588 __xen_set_pgd_hyper(user_ptr, val);
590 xen_mc_issue(PARAVIRT_LAZY_MMU);
592 #endif /* PAGETABLE_LEVELS == 4 */
595 * (Yet another) pagetable walker. This one is intended for pinning a
596 * pagetable. This means that it walks a pagetable and calls the
597 * callback function on each page it finds making up the page table,
598 * at every level. It walks the entire pagetable, but it only bothers
599 * pinning pte pages which are below limit. In the normal case this
600 * will be STACK_TOP_MAX, but at boot we need to pin up to
603 * For 32-bit the important bit is that we don't pin beyond there,
604 * because then we start getting into Xen's ptes.
606 * For 64-bit, we must skip the Xen hole in the middle of the address
607 * space, just after the big x86-64 virtual hole.
609 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
610 int (*func)(struct mm_struct *mm, struct page *,
615 unsigned hole_low, hole_high;
616 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
617 unsigned pgdidx, pudidx, pmdidx;
619 /* The limit is the last byte to be touched */
621 BUG_ON(limit >= FIXADDR_TOP);
623 if (xen_feature(XENFEAT_auto_translated_physmap))
627 * 64-bit has a great big hole in the middle of the address
628 * space, which contains the Xen mappings. On 32-bit these
629 * will end up making a zero-sized hole and so is a no-op.
631 hole_low = pgd_index(USER_LIMIT);
632 hole_high = pgd_index(PAGE_OFFSET);
634 pgdidx_limit = pgd_index(limit);
636 pudidx_limit = pud_index(limit);
641 pmdidx_limit = pmd_index(limit);
646 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
649 if (pgdidx >= hole_low && pgdidx < hole_high)
652 if (!pgd_val(pgd[pgdidx]))
655 pud = pud_offset(&pgd[pgdidx], 0);
657 if (PTRS_PER_PUD > 1) /* not folded */
658 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
660 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
663 if (pgdidx == pgdidx_limit &&
664 pudidx > pudidx_limit)
667 if (pud_none(pud[pudidx]))
670 pmd = pmd_offset(&pud[pudidx], 0);
672 if (PTRS_PER_PMD > 1) /* not folded */
673 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
675 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
678 if (pgdidx == pgdidx_limit &&
679 pudidx == pudidx_limit &&
680 pmdidx > pmdidx_limit)
683 if (pmd_none(pmd[pmdidx]))
686 pte = pmd_page(pmd[pmdidx]);
687 flush |= (*func)(mm, pte, PT_PTE);
693 /* Do the top level last, so that the callbacks can use it as
694 a cue to do final things like tlb flushes. */
695 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
700 static int xen_pgd_walk(struct mm_struct *mm,
701 int (*func)(struct mm_struct *mm, struct page *,
705 return __xen_pgd_walk(mm, mm->pgd, func, limit);
708 /* If we're using split pte locks, then take the page's lock and
709 return a pointer to it. Otherwise return NULL. */
710 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
712 spinlock_t *ptl = NULL;
714 #if USE_SPLIT_PTE_PTLOCKS
715 ptl = ptlock_ptr(page);
716 spin_lock_nest_lock(ptl, &mm->page_table_lock);
722 static void xen_pte_unlock(void *v)
728 static void xen_do_pin(unsigned level, unsigned long pfn)
733 op.arg1.mfn = pfn_to_mfn(pfn);
735 xen_extend_mmuext_op(&op);
738 static int xen_pin_page(struct mm_struct *mm, struct page *page,
741 unsigned pgfl = TestSetPagePinned(page);
745 flush = 0; /* already pinned */
746 else if (PageHighMem(page))
747 /* kmaps need flushing if we found an unpinned
751 void *pt = lowmem_page_address(page);
752 unsigned long pfn = page_to_pfn(page);
753 struct multicall_space mcs = __xen_mc_entry(0);
759 * We need to hold the pagetable lock between the time
760 * we make the pagetable RO and when we actually pin
761 * it. If we don't, then other users may come in and
762 * attempt to update the pagetable by writing it,
763 * which will fail because the memory is RO but not
764 * pinned, so Xen won't do the trap'n'emulate.
766 * If we're using split pte locks, we can't hold the
767 * entire pagetable's worth of locks during the
768 * traverse, because we may wrap the preempt count (8
769 * bits). The solution is to mark RO and pin each PTE
770 * page while holding the lock. This means the number
771 * of locks we end up holding is never more than a
772 * batch size (~32 entries, at present).
774 * If we're not using split pte locks, we needn't pin
775 * the PTE pages independently, because we're
776 * protected by the overall pagetable lock.
780 ptl = xen_pte_lock(page, mm);
782 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
783 pfn_pte(pfn, PAGE_KERNEL_RO),
784 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
787 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
789 /* Queue a deferred unlock for when this batch
791 xen_mc_callback(xen_pte_unlock, ptl);
798 /* This is called just after a mm has been created, but it has not
799 been used yet. We need to make sure that its pagetable is all
800 read-only, and can be pinned. */
801 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
803 trace_xen_mmu_pgd_pin(mm, pgd);
807 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
808 /* re-enable interrupts for flushing */
818 pgd_t *user_pgd = xen_get_user_pgd(pgd);
820 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
823 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
824 xen_do_pin(MMUEXT_PIN_L4_TABLE,
825 PFN_DOWN(__pa(user_pgd)));
828 #else /* CONFIG_X86_32 */
829 #ifdef CONFIG_X86_PAE
830 /* Need to make sure unshared kernel PMD is pinnable */
831 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
834 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
835 #endif /* CONFIG_X86_64 */
839 static void xen_pgd_pin(struct mm_struct *mm)
841 __xen_pgd_pin(mm, mm->pgd);
845 * On save, we need to pin all pagetables to make sure they get their
846 * mfns turned into pfns. Search the list for any unpinned pgds and pin
847 * them (unpinned pgds are not currently in use, probably because the
848 * process is under construction or destruction).
850 * Expected to be called in stop_machine() ("equivalent to taking
851 * every spinlock in the system"), so the locking doesn't really
852 * matter all that much.
854 void xen_mm_pin_all(void)
858 spin_lock(&pgd_lock);
860 list_for_each_entry(page, &pgd_list, lru) {
861 if (!PagePinned(page)) {
862 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
863 SetPageSavePinned(page);
867 spin_unlock(&pgd_lock);
871 * The init_mm pagetable is really pinned as soon as its created, but
872 * that's before we have page structures to store the bits. So do all
873 * the book-keeping now.
875 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
882 static void __init xen_mark_init_mm_pinned(void)
884 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
887 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
890 unsigned pgfl = TestClearPagePinned(page);
892 if (pgfl && !PageHighMem(page)) {
893 void *pt = lowmem_page_address(page);
894 unsigned long pfn = page_to_pfn(page);
895 spinlock_t *ptl = NULL;
896 struct multicall_space mcs;
899 * Do the converse to pin_page. If we're using split
900 * pte locks, we must be holding the lock for while
901 * the pte page is unpinned but still RO to prevent
902 * concurrent updates from seeing it in this
903 * partially-pinned state.
905 if (level == PT_PTE) {
906 ptl = xen_pte_lock(page, mm);
909 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
912 mcs = __xen_mc_entry(0);
914 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
915 pfn_pte(pfn, PAGE_KERNEL),
916 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
919 /* unlock when batch completed */
920 xen_mc_callback(xen_pte_unlock, ptl);
924 return 0; /* never need to flush on unpin */
927 /* Release a pagetables pages back as normal RW */
928 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
930 trace_xen_mmu_pgd_unpin(mm, pgd);
934 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
938 pgd_t *user_pgd = xen_get_user_pgd(pgd);
941 xen_do_pin(MMUEXT_UNPIN_TABLE,
942 PFN_DOWN(__pa(user_pgd)));
943 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
948 #ifdef CONFIG_X86_PAE
949 /* Need to make sure unshared kernel PMD is unpinned */
950 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
954 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
959 static void xen_pgd_unpin(struct mm_struct *mm)
961 __xen_pgd_unpin(mm, mm->pgd);
965 * On resume, undo any pinning done at save, so that the rest of the
966 * kernel doesn't see any unexpected pinned pagetables.
968 void xen_mm_unpin_all(void)
972 spin_lock(&pgd_lock);
974 list_for_each_entry(page, &pgd_list, lru) {
975 if (PageSavePinned(page)) {
976 BUG_ON(!PagePinned(page));
977 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
978 ClearPageSavePinned(page);
982 spin_unlock(&pgd_lock);
985 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
987 spin_lock(&next->page_table_lock);
989 spin_unlock(&next->page_table_lock);
992 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
994 spin_lock(&mm->page_table_lock);
996 spin_unlock(&mm->page_table_lock);
1001 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1002 we need to repoint it somewhere else before we can unpin it. */
1003 static void drop_other_mm_ref(void *info)
1005 struct mm_struct *mm = info;
1006 struct mm_struct *active_mm;
1008 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1010 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1011 leave_mm(smp_processor_id());
1013 /* If this cpu still has a stale cr3 reference, then make sure
1014 it has been flushed. */
1015 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1016 load_cr3(swapper_pg_dir);
1019 static void xen_drop_mm_ref(struct mm_struct *mm)
1024 if (current->active_mm == mm) {
1025 if (current->mm == mm)
1026 load_cr3(swapper_pg_dir);
1028 leave_mm(smp_processor_id());
1031 /* Get the "official" set of cpus referring to our pagetable. */
1032 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1033 for_each_online_cpu(cpu) {
1034 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1035 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1037 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1041 cpumask_copy(mask, mm_cpumask(mm));
1043 /* It's possible that a vcpu may have a stale reference to our
1044 cr3, because its in lazy mode, and it hasn't yet flushed
1045 its set of pending hypercalls yet. In this case, we can
1046 look at its actual current cr3 value, and force it to flush
1048 for_each_online_cpu(cpu) {
1049 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1050 cpumask_set_cpu(cpu, mask);
1053 if (!cpumask_empty(mask))
1054 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1055 free_cpumask_var(mask);
1058 static void xen_drop_mm_ref(struct mm_struct *mm)
1060 if (current->active_mm == mm)
1061 load_cr3(swapper_pg_dir);
1066 * While a process runs, Xen pins its pagetables, which means that the
1067 * hypervisor forces it to be read-only, and it controls all updates
1068 * to it. This means that all pagetable updates have to go via the
1069 * hypervisor, which is moderately expensive.
1071 * Since we're pulling the pagetable down, we switch to use init_mm,
1072 * unpin old process pagetable and mark it all read-write, which
1073 * allows further operations on it to be simple memory accesses.
1075 * The only subtle point is that another CPU may be still using the
1076 * pagetable because of lazy tlb flushing. This means we need need to
1077 * switch all CPUs off this pagetable before we can unpin it.
1079 static void xen_exit_mmap(struct mm_struct *mm)
1081 get_cpu(); /* make sure we don't move around */
1082 xen_drop_mm_ref(mm);
1085 spin_lock(&mm->page_table_lock);
1087 /* pgd may not be pinned in the error exit path of execve */
1088 if (xen_page_pinned(mm->pgd))
1091 spin_unlock(&mm->page_table_lock);
1094 static void xen_post_allocator_init(void);
1096 #ifdef CONFIG_X86_64
1097 static void __init xen_cleanhighmap(unsigned long vaddr,
1098 unsigned long vaddr_end)
1100 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1101 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1103 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1104 * We include the PMD passed in on _both_ boundaries. */
1105 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1106 pmd++, vaddr += PMD_SIZE) {
1109 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1110 set_pmd(pmd, __pmd(0));
1112 /* In case we did something silly, we should crash in this function
1113 * instead of somewhere later and be confusing. */
1116 static void __init xen_pagetable_p2m_copy(void)
1120 unsigned long new_mfn_list;
1122 if (xen_feature(XENFEAT_auto_translated_physmap))
1125 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1127 new_mfn_list = xen_revector_p2m_tree();
1128 /* No memory or already called. */
1129 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
1132 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1133 memset((void *)xen_start_info->mfn_list, 0xff, size);
1135 /* We should be in __ka space. */
1136 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1137 addr = xen_start_info->mfn_list;
1138 /* We roundup to the PMD, which means that if anybody at this stage is
1139 * using the __ka address of xen_start_info or xen_start_info->shared_info
1140 * they are in going to crash. Fortunatly we have already revectored
1141 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1142 size = roundup(size, PMD_SIZE);
1143 xen_cleanhighmap(addr, addr + size);
1145 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1146 memblock_free(__pa(xen_start_info->mfn_list), size);
1147 /* And revector! Bye bye old array */
1148 xen_start_info->mfn_list = new_mfn_list;
1150 /* At this stage, cleanup_highmap has already cleaned __ka space
1151 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1152 * the ramdisk). We continue on, erasing PMD entries that point to page
1153 * tables - do note that they are accessible at this stage via __va.
1154 * For good measure we also round up to the PMD - which means that if
1155 * anybody is using __ka address to the initial boot-stack - and try
1156 * to use it - they are going to crash. The xen_start_info has been
1157 * taken care of already in xen_setup_kernel_pagetable. */
1158 addr = xen_start_info->pt_base;
1159 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1161 xen_cleanhighmap(addr, addr + size);
1162 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1164 /* This is superflous and is not neccessary, but you know what
1165 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1166 * anything at this stage. */
1167 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1172 static void __init xen_pagetable_init(void)
1175 #ifdef CONFIG_X86_64
1176 xen_pagetable_p2m_copy();
1178 /* Allocate and initialize top and mid mfn levels for p2m structure */
1179 xen_build_mfn_list_list();
1181 xen_setup_shared_info();
1182 xen_post_allocator_init();
1184 static void xen_write_cr2(unsigned long cr2)
1186 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1189 static unsigned long xen_read_cr2(void)
1191 return this_cpu_read(xen_vcpu)->arch.cr2;
1194 unsigned long xen_read_cr2_direct(void)
1196 return this_cpu_read(xen_vcpu_info.arch.cr2);
1199 void xen_flush_tlb_all(void)
1201 struct mmuext_op *op;
1202 struct multicall_space mcs;
1204 trace_xen_mmu_flush_tlb_all(0);
1208 mcs = xen_mc_entry(sizeof(*op));
1211 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1212 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1214 xen_mc_issue(PARAVIRT_LAZY_MMU);
1218 static void xen_flush_tlb(void)
1220 struct mmuext_op *op;
1221 struct multicall_space mcs;
1223 trace_xen_mmu_flush_tlb(0);
1227 mcs = xen_mc_entry(sizeof(*op));
1230 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1231 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1233 xen_mc_issue(PARAVIRT_LAZY_MMU);
1238 static void xen_flush_tlb_single(unsigned long addr)
1240 struct mmuext_op *op;
1241 struct multicall_space mcs;
1243 trace_xen_mmu_flush_tlb_single(addr);
1247 mcs = xen_mc_entry(sizeof(*op));
1249 op->cmd = MMUEXT_INVLPG_LOCAL;
1250 op->arg1.linear_addr = addr & PAGE_MASK;
1251 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1253 xen_mc_issue(PARAVIRT_LAZY_MMU);
1258 static void xen_flush_tlb_others(const struct cpumask *cpus,
1259 struct mm_struct *mm, unsigned long start,
1263 struct mmuext_op op;
1265 DECLARE_BITMAP(mask, num_processors);
1267 DECLARE_BITMAP(mask, NR_CPUS);
1270 struct multicall_space mcs;
1272 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1274 if (cpumask_empty(cpus))
1275 return; /* nothing to do */
1277 mcs = xen_mc_entry(sizeof(*args));
1279 args->op.arg2.vcpumask = to_cpumask(args->mask);
1281 /* Remove us, and any offline CPUS. */
1282 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1286 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1287 args->op.cmd = MMUEXT_INVLPG_MULTI;
1288 args->op.arg1.linear_addr = start;
1291 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1293 xen_mc_issue(PARAVIRT_LAZY_MMU);
1296 static unsigned long xen_read_cr3(void)
1298 return this_cpu_read(xen_cr3);
1301 static void set_current_cr3(void *v)
1303 this_cpu_write(xen_current_cr3, (unsigned long)v);
1306 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1308 struct mmuext_op op;
1311 trace_xen_mmu_write_cr3(kernel, cr3);
1314 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1318 WARN_ON(mfn == 0 && kernel);
1320 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1323 xen_extend_mmuext_op(&op);
1326 this_cpu_write(xen_cr3, cr3);
1328 /* Update xen_current_cr3 once the batch has actually
1330 xen_mc_callback(set_current_cr3, (void *)cr3);
1333 static void xen_write_cr3(unsigned long cr3)
1335 BUG_ON(preemptible());
1337 xen_mc_batch(); /* disables interrupts */
1339 /* Update while interrupts are disabled, so its atomic with
1341 this_cpu_write(xen_cr3, cr3);
1343 __xen_write_cr3(true, cr3);
1345 #ifdef CONFIG_X86_64
1347 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1349 __xen_write_cr3(false, __pa(user_pgd));
1351 __xen_write_cr3(false, 0);
1355 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1358 #ifdef CONFIG_X86_64
1360 * At the start of the day - when Xen launches a guest, it has already
1361 * built pagetables for the guest. We diligently look over them
1362 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1363 * init_level4_pgt and its friends. Then when we are happy we load
1364 * the new init_level4_pgt - and continue on.
1366 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1367 * up the rest of the pagetables. When it has completed it loads the cr3.
1368 * N.B. that baremetal would start at 'start_kernel' (and the early
1369 * #PF handler would create bootstrap pagetables) - so we are running
1370 * with the same assumptions as what to do when write_cr3 is executed
1373 * Since there are no user-page tables at all, we have two variants
1374 * of xen_write_cr3 - the early bootup (this one), and the late one
1375 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1376 * the Linux kernel and user-space are both in ring 3 while the
1377 * hypervisor is in ring 0.
1379 static void __init xen_write_cr3_init(unsigned long cr3)
1381 BUG_ON(preemptible());
1383 xen_mc_batch(); /* disables interrupts */
1385 /* Update while interrupts are disabled, so its atomic with
1387 this_cpu_write(xen_cr3, cr3);
1389 __xen_write_cr3(true, cr3);
1391 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1395 static int xen_pgd_alloc(struct mm_struct *mm)
1397 pgd_t *pgd = mm->pgd;
1400 BUG_ON(PagePinned(virt_to_page(pgd)));
1402 #ifdef CONFIG_X86_64
1404 struct page *page = virt_to_page(pgd);
1407 BUG_ON(page->private != 0);
1411 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1412 page->private = (unsigned long)user_pgd;
1414 if (user_pgd != NULL) {
1415 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1416 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1417 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1422 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1429 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1431 #ifdef CONFIG_X86_64
1432 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1435 free_page((unsigned long)user_pgd);
1439 #ifdef CONFIG_X86_32
1440 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1442 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1443 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1444 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1449 #else /* CONFIG_X86_64 */
1450 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1454 #endif /* CONFIG_X86_64 */
1457 * Init-time set_pte while constructing initial pagetables, which
1458 * doesn't allow RO page table pages to be remapped RW.
1460 * If there is no MFN for this PFN then this page is initially
1461 * ballooned out so clear the PTE (as in decrease_reservation() in
1462 * drivers/xen/balloon.c).
1464 * Many of these PTE updates are done on unpinned and writable pages
1465 * and doing a hypercall for these is unnecessary and expensive. At
1466 * this point it is not possible to tell if a page is pinned or not,
1467 * so always write the PTE directly and rely on Xen trapping and
1468 * emulating any updates as necessary.
1470 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1472 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1473 pte = mask_rw_pte(ptep, pte);
1477 native_set_pte(ptep, pte);
1480 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1482 struct mmuext_op op;
1484 op.arg1.mfn = pfn_to_mfn(pfn);
1485 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1489 /* Early in boot, while setting up the initial pagetable, assume
1490 everything is pinned. */
1491 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1493 #ifdef CONFIG_FLATMEM
1494 BUG_ON(mem_map); /* should only be used early */
1496 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1497 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1500 /* Used for pmd and pud */
1501 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1503 #ifdef CONFIG_FLATMEM
1504 BUG_ON(mem_map); /* should only be used early */
1506 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1509 /* Early release_pte assumes that all pts are pinned, since there's
1510 only init_mm and anything attached to that is pinned. */
1511 static void __init xen_release_pte_init(unsigned long pfn)
1513 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1514 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1517 static void __init xen_release_pmd_init(unsigned long pfn)
1519 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1522 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1524 struct multicall_space mcs;
1525 struct mmuext_op *op;
1527 mcs = __xen_mc_entry(sizeof(*op));
1530 op->arg1.mfn = pfn_to_mfn(pfn);
1532 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1535 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1537 struct multicall_space mcs;
1538 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1540 mcs = __xen_mc_entry(0);
1541 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1542 pfn_pte(pfn, prot), 0);
1545 /* This needs to make sure the new pte page is pinned iff its being
1546 attached to a pinned pagetable. */
1547 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1550 bool pinned = PagePinned(virt_to_page(mm->pgd));
1552 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1555 struct page *page = pfn_to_page(pfn);
1557 SetPagePinned(page);
1559 if (!PageHighMem(page)) {
1562 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1564 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1565 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1567 xen_mc_issue(PARAVIRT_LAZY_MMU);
1569 /* make sure there are no stray mappings of
1571 kmap_flush_unused();
1576 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1578 xen_alloc_ptpage(mm, pfn, PT_PTE);
1581 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1583 xen_alloc_ptpage(mm, pfn, PT_PMD);
1586 /* This should never happen until we're OK to use struct page */
1587 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1589 struct page *page = pfn_to_page(pfn);
1590 bool pinned = PagePinned(page);
1592 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1595 if (!PageHighMem(page)) {
1598 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1599 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1601 __set_pfn_prot(pfn, PAGE_KERNEL);
1603 xen_mc_issue(PARAVIRT_LAZY_MMU);
1605 ClearPagePinned(page);
1609 static void xen_release_pte(unsigned long pfn)
1611 xen_release_ptpage(pfn, PT_PTE);
1614 static void xen_release_pmd(unsigned long pfn)
1616 xen_release_ptpage(pfn, PT_PMD);
1619 #if PAGETABLE_LEVELS == 4
1620 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1622 xen_alloc_ptpage(mm, pfn, PT_PUD);
1625 static void xen_release_pud(unsigned long pfn)
1627 xen_release_ptpage(pfn, PT_PUD);
1631 void __init xen_reserve_top(void)
1633 #ifdef CONFIG_X86_32
1634 unsigned long top = HYPERVISOR_VIRT_START;
1635 struct xen_platform_parameters pp;
1637 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1638 top = pp.virt_start;
1640 reserve_top_address(-top);
1641 #endif /* CONFIG_X86_32 */
1645 * Like __va(), but returns address in the kernel mapping (which is
1646 * all we have until the physical memory mapping has been set up.
1648 static void *__ka(phys_addr_t paddr)
1650 #ifdef CONFIG_X86_64
1651 return (void *)(paddr + __START_KERNEL_map);
1657 /* Convert a machine address to physical address */
1658 static unsigned long m2p(phys_addr_t maddr)
1662 maddr &= PTE_PFN_MASK;
1663 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1668 /* Convert a machine address to kernel virtual */
1669 static void *m2v(phys_addr_t maddr)
1671 return __ka(m2p(maddr));
1674 /* Set the page permissions on an identity-mapped pages */
1675 static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1677 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1678 pte_t pte = pfn_pte(pfn, prot);
1680 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1681 if (xen_feature(XENFEAT_auto_translated_physmap))
1684 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1687 static void set_page_prot(void *addr, pgprot_t prot)
1689 return set_page_prot_flags(addr, prot, UVMF_NONE);
1691 #ifdef CONFIG_X86_32
1692 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1694 unsigned pmdidx, pteidx;
1698 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1703 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1706 /* Reuse or allocate a page of ptes */
1707 if (pmd_present(pmd[pmdidx]))
1708 pte_page = m2v(pmd[pmdidx].pmd);
1710 /* Check for free pte pages */
1711 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1714 pte_page = &level1_ident_pgt[ident_pte];
1715 ident_pte += PTRS_PER_PTE;
1717 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1720 /* Install mappings */
1721 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1724 #ifdef CONFIG_X86_32
1725 if (pfn > max_pfn_mapped)
1726 max_pfn_mapped = pfn;
1729 if (!pte_none(pte_page[pteidx]))
1732 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1733 pte_page[pteidx] = pte;
1737 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1738 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1740 set_page_prot(pmd, PAGE_KERNEL_RO);
1743 void __init xen_setup_machphys_mapping(void)
1745 struct xen_machphys_mapping mapping;
1747 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1748 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1749 machine_to_phys_nr = mapping.max_mfn + 1;
1751 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1753 #ifdef CONFIG_X86_32
1754 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1755 < machine_to_phys_mapping);
1759 #ifdef CONFIG_X86_64
1760 static void convert_pfn_mfn(void *v)
1765 /* All levels are converted the same way, so just treat them
1767 for (i = 0; i < PTRS_PER_PTE; i++)
1768 pte[i] = xen_make_pte(pte[i].pte);
1770 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1773 if (*pt_base == PFN_DOWN(__pa(addr))) {
1774 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1775 clear_page((void *)addr);
1778 if (*pt_end == PFN_DOWN(__pa(addr))) {
1779 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1780 clear_page((void *)addr);
1785 * Set up the initial kernel pagetable.
1787 * We can construct this by grafting the Xen provided pagetable into
1788 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1789 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1790 * kernel has a physical mapping to start with - but that's enough to
1791 * get __va working. We need to fill in the rest of the physical
1792 * mapping once some sort of allocator has been set up. NOTE: for
1793 * PVH, the page tables are native.
1795 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1799 unsigned long addr[3];
1800 unsigned long pt_base, pt_end;
1803 /* max_pfn_mapped is the last pfn mapped in the initial memory
1804 * mappings. Considering that on Xen after the kernel mappings we
1805 * have the mappings of some pages that don't exist in pfn space, we
1806 * set max_pfn_mapped to the last real pfn mapped. */
1807 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1809 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1810 pt_end = pt_base + xen_start_info->nr_pt_frames;
1812 /* Zap identity mapping */
1813 init_level4_pgt[0] = __pgd(0);
1815 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1816 /* Pre-constructed entries are in pfn, so convert to mfn */
1817 /* L4[272] -> level3_ident_pgt
1818 * L4[511] -> level3_kernel_pgt */
1819 convert_pfn_mfn(init_level4_pgt);
1821 /* L3_i[0] -> level2_ident_pgt */
1822 convert_pfn_mfn(level3_ident_pgt);
1823 /* L3_k[510] -> level2_kernel_pgt
1824 * L3_k[511] -> level2_fixmap_pgt */
1825 convert_pfn_mfn(level3_kernel_pgt);
1827 /* L3_k[511][506] -> level1_fixmap_pgt */
1828 convert_pfn_mfn(level2_fixmap_pgt);
1830 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1831 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1832 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1834 addr[0] = (unsigned long)pgd;
1835 addr[1] = (unsigned long)l3;
1836 addr[2] = (unsigned long)l2;
1837 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1838 * Both L4[272][0] and L4[511][510] have entries that point to the same
1839 * L2 (PMD) tables. Meaning that if you modify it in __va space
1840 * it will be also modified in the __ka space! (But if you just
1841 * modify the PMD table to point to other PTE's or none, then you
1842 * are OK - which is what cleanup_highmap does) */
1843 copy_page(level2_ident_pgt, l2);
1844 /* Graft it onto L4[511][510] */
1845 copy_page(level2_kernel_pgt, l2);
1847 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1848 /* Make pagetable pieces RO */
1849 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1850 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1851 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1852 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1853 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1854 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1855 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1856 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1858 /* Pin down new L4 */
1859 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1860 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1862 /* Unpin Xen-provided one */
1863 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1866 * At this stage there can be no user pgd, and no page
1867 * structure to attach it to, so make sure we just set kernel
1871 __xen_write_cr3(true, __pa(init_level4_pgt));
1872 xen_mc_issue(PARAVIRT_LAZY_CPU);
1874 native_write_cr3(__pa(init_level4_pgt));
1876 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1877 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1878 * the initial domain. For guests using the toolstack, they are in:
1879 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1880 * rip out the [L4] (pgd), but for guests we shave off three pages.
1882 for (i = 0; i < ARRAY_SIZE(addr); i++)
1883 check_pt_base(&pt_base, &pt_end, addr[i]);
1885 /* Our (by three pages) smaller Xen pagetable that we are using */
1886 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1887 /* Revector the xen_start_info */
1888 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1890 #else /* !CONFIG_X86_64 */
1891 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1892 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1894 static void __init xen_write_cr3_init(unsigned long cr3)
1896 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1898 BUG_ON(read_cr3() != __pa(initial_page_table));
1899 BUG_ON(cr3 != __pa(swapper_pg_dir));
1902 * We are switching to swapper_pg_dir for the first time (from
1903 * initial_page_table) and therefore need to mark that page
1904 * read-only and then pin it.
1906 * Xen disallows sharing of kernel PMDs for PAE
1907 * guests. Therefore we must copy the kernel PMD from
1908 * initial_page_table into a new kernel PMD to be used in
1911 swapper_kernel_pmd =
1912 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1913 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
1914 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1915 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1916 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1918 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1920 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1922 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1923 PFN_DOWN(__pa(initial_page_table)));
1924 set_page_prot(initial_page_table, PAGE_KERNEL);
1925 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1927 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1930 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1934 initial_kernel_pmd =
1935 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1937 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1938 xen_start_info->nr_pt_frames * PAGE_SIZE +
1941 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1942 copy_page(initial_kernel_pmd, kernel_pmd);
1944 xen_map_identity_early(initial_kernel_pmd, max_pfn);
1946 copy_page(initial_page_table, pgd);
1947 initial_page_table[KERNEL_PGD_BOUNDARY] =
1948 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
1950 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1951 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
1952 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1954 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1956 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1957 PFN_DOWN(__pa(initial_page_table)));
1958 xen_write_cr3(__pa(initial_page_table));
1960 memblock_reserve(__pa(xen_start_info->pt_base),
1961 xen_start_info->nr_pt_frames * PAGE_SIZE);
1963 #endif /* CONFIG_X86_64 */
1965 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1967 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1971 phys >>= PAGE_SHIFT;
1974 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1976 #ifdef CONFIG_X86_32
1978 # ifdef CONFIG_HIGHMEM
1979 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1981 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
1984 case FIX_TEXT_POKE0:
1985 case FIX_TEXT_POKE1:
1986 /* All local page mappings */
1987 pte = pfn_pte(phys, prot);
1990 #ifdef CONFIG_X86_LOCAL_APIC
1991 case FIX_APIC_BASE: /* maps dummy local APIC */
1992 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1996 #ifdef CONFIG_X86_IO_APIC
1997 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1999 * We just don't map the IO APIC - all access is via
2000 * hypercalls. Keep the address in the pte for reference.
2002 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2006 case FIX_PARAVIRT_BOOTMAP:
2007 /* This is an MFN, but it isn't an IO mapping from the
2009 pte = mfn_pte(phys, prot);
2013 /* By default, set_fixmap is used for hardware mappings */
2014 pte = mfn_pte(phys, prot);
2018 __native_set_fixmap(idx, pte);
2020 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2021 /* Replicate changes to map the vsyscall page into the user
2022 pagetable vsyscall mapping. */
2023 if (idx == VSYSCALL_PAGE) {
2024 unsigned long vaddr = __fix_to_virt(idx);
2025 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2030 static void __init xen_post_allocator_init(void)
2032 if (xen_feature(XENFEAT_auto_translated_physmap))
2035 pv_mmu_ops.set_pte = xen_set_pte;
2036 pv_mmu_ops.set_pmd = xen_set_pmd;
2037 pv_mmu_ops.set_pud = xen_set_pud;
2038 #if PAGETABLE_LEVELS == 4
2039 pv_mmu_ops.set_pgd = xen_set_pgd;
2042 /* This will work as long as patching hasn't happened yet
2043 (which it hasn't) */
2044 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2045 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2046 pv_mmu_ops.release_pte = xen_release_pte;
2047 pv_mmu_ops.release_pmd = xen_release_pmd;
2048 #if PAGETABLE_LEVELS == 4
2049 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2050 pv_mmu_ops.release_pud = xen_release_pud;
2053 #ifdef CONFIG_X86_64
2054 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2055 SetPagePinned(virt_to_page(level3_user_vsyscall));
2057 xen_mark_init_mm_pinned();
2060 static void xen_leave_lazy_mmu(void)
2064 paravirt_leave_lazy_mmu();
2068 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2069 .read_cr2 = xen_read_cr2,
2070 .write_cr2 = xen_write_cr2,
2072 .read_cr3 = xen_read_cr3,
2073 .write_cr3 = xen_write_cr3_init,
2075 .flush_tlb_user = xen_flush_tlb,
2076 .flush_tlb_kernel = xen_flush_tlb,
2077 .flush_tlb_single = xen_flush_tlb_single,
2078 .flush_tlb_others = xen_flush_tlb_others,
2080 .pte_update = paravirt_nop,
2081 .pte_update_defer = paravirt_nop,
2083 .pgd_alloc = xen_pgd_alloc,
2084 .pgd_free = xen_pgd_free,
2086 .alloc_pte = xen_alloc_pte_init,
2087 .release_pte = xen_release_pte_init,
2088 .alloc_pmd = xen_alloc_pmd_init,
2089 .release_pmd = xen_release_pmd_init,
2091 .set_pte = xen_set_pte_init,
2092 .set_pte_at = xen_set_pte_at,
2093 .set_pmd = xen_set_pmd_hyper,
2095 .ptep_modify_prot_start = __ptep_modify_prot_start,
2096 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2098 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2099 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2101 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2102 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2104 #ifdef CONFIG_X86_PAE
2105 .set_pte_atomic = xen_set_pte_atomic,
2106 .pte_clear = xen_pte_clear,
2107 .pmd_clear = xen_pmd_clear,
2108 #endif /* CONFIG_X86_PAE */
2109 .set_pud = xen_set_pud_hyper,
2111 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2112 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2114 #if PAGETABLE_LEVELS == 4
2115 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2116 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2117 .set_pgd = xen_set_pgd_hyper,
2119 .alloc_pud = xen_alloc_pmd_init,
2120 .release_pud = xen_release_pmd_init,
2121 #endif /* PAGETABLE_LEVELS == 4 */
2123 .activate_mm = xen_activate_mm,
2124 .dup_mmap = xen_dup_mmap,
2125 .exit_mmap = xen_exit_mmap,
2128 .enter = paravirt_enter_lazy_mmu,
2129 .leave = xen_leave_lazy_mmu,
2130 .flush = paravirt_flush_lazy_mmu,
2133 .set_fixmap = xen_set_fixmap,
2136 void __init xen_init_mmu_ops(void)
2138 x86_init.paging.pagetable_init = xen_pagetable_init;
2140 /* Optimization - we can use the HVM one but it has no idea which
2141 * VCPUs are descheduled - which means that it will needlessly IPI
2142 * them. Xen knows so let it do the job.
2144 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2145 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2148 pv_mmu_ops = xen_mmu_ops;
2150 memset(dummy_mapping, 0xff, PAGE_SIZE);
2153 /* Protected by xen_reservation_lock. */
2154 #define MAX_CONTIG_ORDER 9 /* 2MB */
2155 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2157 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2158 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2159 unsigned long *in_frames,
2160 unsigned long *out_frames)
2163 struct multicall_space mcs;
2166 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2167 mcs = __xen_mc_entry(0);
2170 in_frames[i] = virt_to_mfn(vaddr);
2172 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2173 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2176 out_frames[i] = virt_to_pfn(vaddr);
2182 * Update the pfn-to-mfn mappings for a virtual address range, either to
2183 * point to an array of mfns, or contiguously from a single starting
2186 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2187 unsigned long *mfns,
2188 unsigned long first_mfn)
2195 limit = 1u << order;
2196 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2197 struct multicall_space mcs;
2200 mcs = __xen_mc_entry(0);
2204 mfn = first_mfn + i;
2206 if (i < (limit - 1))
2210 flags = UVMF_INVLPG | UVMF_ALL;
2212 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2215 MULTI_update_va_mapping(mcs.mc, vaddr,
2216 mfn_pte(mfn, PAGE_KERNEL), flags);
2218 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2225 * Perform the hypercall to exchange a region of our pfns to point to
2226 * memory with the required contiguous alignment. Takes the pfns as
2227 * input, and populates mfns as output.
2229 * Returns a success code indicating whether the hypervisor was able to
2230 * satisfy the request or not.
2232 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2233 unsigned long *pfns_in,
2234 unsigned long extents_out,
2235 unsigned int order_out,
2236 unsigned long *mfns_out,
2237 unsigned int address_bits)
2242 struct xen_memory_exchange exchange = {
2244 .nr_extents = extents_in,
2245 .extent_order = order_in,
2246 .extent_start = pfns_in,
2250 .nr_extents = extents_out,
2251 .extent_order = order_out,
2252 .extent_start = mfns_out,
2253 .address_bits = address_bits,
2258 BUG_ON(extents_in << order_in != extents_out << order_out);
2260 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2261 success = (exchange.nr_exchanged == extents_in);
2263 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2264 BUG_ON(success && (rc != 0));
2269 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2270 unsigned int address_bits,
2271 dma_addr_t *dma_handle)
2273 unsigned long *in_frames = discontig_frames, out_frame;
2274 unsigned long flags;
2276 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2279 * Currently an auto-translated guest will not perform I/O, nor will
2280 * it require PAE page directories below 4GB. Therefore any calls to
2281 * this function are redundant and can be ignored.
2284 if (xen_feature(XENFEAT_auto_translated_physmap))
2287 if (unlikely(order > MAX_CONTIG_ORDER))
2290 memset((void *) vstart, 0, PAGE_SIZE << order);
2292 spin_lock_irqsave(&xen_reservation_lock, flags);
2294 /* 1. Zap current PTEs, remembering MFNs. */
2295 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2297 /* 2. Get a new contiguous memory extent. */
2298 out_frame = virt_to_pfn(vstart);
2299 success = xen_exchange_memory(1UL << order, 0, in_frames,
2300 1, order, &out_frame,
2303 /* 3. Map the new extent in place of old pages. */
2305 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2307 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2309 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2311 *dma_handle = virt_to_machine(vstart).maddr;
2312 return success ? 0 : -ENOMEM;
2314 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2316 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2318 unsigned long *out_frames = discontig_frames, in_frame;
2319 unsigned long flags;
2321 unsigned long vstart;
2323 if (xen_feature(XENFEAT_auto_translated_physmap))
2326 if (unlikely(order > MAX_CONTIG_ORDER))
2329 vstart = (unsigned long)phys_to_virt(pstart);
2330 memset((void *) vstart, 0, PAGE_SIZE << order);
2332 spin_lock_irqsave(&xen_reservation_lock, flags);
2334 /* 1. Find start MFN of contiguous extent. */
2335 in_frame = virt_to_mfn(vstart);
2337 /* 2. Zap current PTEs. */
2338 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2340 /* 3. Do the exchange for non-contiguous MFNs. */
2341 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2344 /* 4. Map new pages in place of old pages. */
2346 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2348 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2350 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2352 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2354 #ifdef CONFIG_XEN_PVHVM
2355 #ifdef CONFIG_PROC_VMCORE
2357 * This function is used in two contexts:
2358 * - the kdump kernel has to check whether a pfn of the crashed kernel
2359 * was a ballooned page. vmcore is using this function to decide
2360 * whether to access a pfn of the crashed kernel.
2361 * - the kexec kernel has to check whether a pfn was ballooned by the
2362 * previous kernel. If the pfn is ballooned, handle it properly.
2363 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2364 * handle the pfn special in this case.
2366 static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2368 struct xen_hvm_get_mem_type a = {
2369 .domid = DOMID_SELF,
2374 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2377 switch (a.mem_type) {
2378 case HVMMEM_mmio_dm:
2392 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2394 struct xen_hvm_pagetable_dying a;
2397 a.domid = DOMID_SELF;
2398 a.gpa = __pa(mm->pgd);
2399 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2400 WARN_ON_ONCE(rc < 0);
2403 static int is_pagetable_dying_supported(void)
2405 struct xen_hvm_pagetable_dying a;
2408 a.domid = DOMID_SELF;
2410 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2412 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2418 void __init xen_hvm_init_mmu_ops(void)
2420 if (is_pagetable_dying_supported())
2421 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2422 #ifdef CONFIG_PROC_VMCORE
2423 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2428 #ifdef CONFIG_XEN_PVH
2430 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
2431 * space creating new guest on pvh dom0 and needing to map domU pages.
2433 static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
2437 xen_pfn_t gpfn = lpfn;
2438 xen_ulong_t idx = fgfn;
2440 struct xen_add_to_physmap_range xatp = {
2441 .domid = DOMID_SELF,
2442 .foreign_domid = domid,
2444 .space = XENMAPSPACE_gmfn_foreign,
2446 set_xen_guest_handle(xatp.idxs, &idx);
2447 set_xen_guest_handle(xatp.gpfns, &gpfn);
2448 set_xen_guest_handle(xatp.errs, &err);
2450 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
2456 static int xlate_remove_from_p2m(unsigned long spfn, int count)
2458 struct xen_remove_from_physmap xrp;
2461 for (i = 0; i < count; i++) {
2462 xrp.domid = DOMID_SELF;
2464 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
2471 struct xlate_remap_data {
2472 unsigned long fgfn; /* foreign domain's gfn */
2476 struct page **pages;
2479 static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
2483 struct xlate_remap_data *remap = data;
2484 unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
2485 pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
2487 rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
2490 native_set_pte(ptep, pteval);
2495 static int xlate_remap_gfn_range(struct vm_area_struct *vma,
2496 unsigned long addr, unsigned long mfn,
2497 int nr, pgprot_t prot, unsigned domid,
2498 struct page **pages)
2501 struct xlate_remap_data pvhdata;
2506 pvhdata.prot = prot;
2507 pvhdata.domid = domid;
2509 pvhdata.pages = pages;
2510 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
2511 xlate_map_pte_fn, &pvhdata);
2517 #define REMAP_BATCH_SIZE 16
2522 struct mmu_update *mmu_update;
2525 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2526 unsigned long addr, void *data)
2528 struct remap_data *rmd = data;
2529 pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
2531 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2532 rmd->mmu_update->val = pte_val_ma(pte);
2538 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2540 xen_pfn_t mfn, int nr,
2541 pgprot_t prot, unsigned domid,
2542 struct page **pages)
2545 struct remap_data rmd;
2546 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2548 unsigned long range;
2551 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2553 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2554 #ifdef CONFIG_XEN_PVH
2555 /* We need to update the local page tables and the xen HAP */
2556 return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
2567 batch = min(REMAP_BATCH_SIZE, nr);
2568 range = (unsigned long)batch << PAGE_SHIFT;
2570 rmd.mmu_update = mmu_update;
2571 err = apply_to_page_range(vma->vm_mm, addr, range,
2572 remap_area_mfn_pte_fn, &rmd);
2576 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2587 xen_flush_tlb_all();
2591 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2593 /* Returns: 0 success */
2594 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2595 int numpgs, struct page **pages)
2597 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2600 #ifdef CONFIG_XEN_PVH
2603 * The mmu has already cleaned up the process mmu
2604 * resources at this point (lookup_address will return
2607 unsigned long pfn = page_to_pfn(pages[numpgs]);
2609 xlate_remove_from_p2m(pfn, 1);
2612 * We don't need to flush tlbs because as part of
2613 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
2614 * after removing the p2m entries from the EPT/NPT
2621 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);