2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
28 * uses for EPT without A/D paging type.
30 extern u64 __pure __using_nonexistent_pte_bit(void)
31 __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
34 #define pt_element_t u64
35 #define guest_walker guest_walker64
36 #define FNAME(name) paging##64_##name
37 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
38 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
39 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
40 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
41 #define PT_LEVEL_BITS PT64_LEVEL_BITS
42 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
43 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
44 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
45 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
47 #define PT_MAX_FULL_LEVELS 4
48 #define CMPXCHG cmpxchg
50 #define CMPXCHG cmpxchg64
51 #define PT_MAX_FULL_LEVELS 2
54 #define pt_element_t u32
55 #define guest_walker guest_walker32
56 #define FNAME(name) paging##32_##name
57 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
58 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
59 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
60 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
61 #define PT_LEVEL_BITS PT32_LEVEL_BITS
62 #define PT_MAX_FULL_LEVELS 2
63 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
64 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
65 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
66 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
67 #define CMPXCHG cmpxchg
68 #elif PTTYPE == PTTYPE_EPT
69 #define pt_element_t u64
70 #define guest_walker guest_walkerEPT
71 #define FNAME(name) ept_##name
72 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
73 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
74 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
75 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
76 #define PT_LEVEL_BITS PT64_LEVEL_BITS
77 #define PT_GUEST_ACCESSED_MASK 0
78 #define PT_GUEST_DIRTY_MASK 0
79 #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
80 #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
81 #define CMPXCHG cmpxchg64
82 #define PT_MAX_FULL_LEVELS 4
84 #error Invalid PTTYPE value
87 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
88 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
91 * The guest_walker structure emulates the behavior of the hardware page
97 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
98 pt_element_t ptes[PT_MAX_FULL_LEVELS];
99 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
100 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
101 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
105 struct x86_exception fault;
108 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
110 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
113 static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
117 /* dirty bit is not supported, so no need to track it */
118 if (!PT_GUEST_DIRTY_MASK)
121 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
123 mask = (unsigned)~ACC_WRITE_MASK;
124 /* Allow write access to dirty gptes */
125 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
130 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
134 bit7 = (gpte >> 7) & 1;
135 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
138 static inline int FNAME(is_present_gpte)(unsigned long pte)
140 #if PTTYPE != PTTYPE_EPT
141 return is_present_gpte(pte);
147 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148 pt_element_t __user *ptep_user, unsigned index,
149 pt_element_t orig_pte, pt_element_t new_pte)
156 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
157 /* Check if the user is doing something meaningless. */
158 if (unlikely(npages != 1))
161 table = kmap_atomic(page);
162 ret = CMPXCHG(&table[index], orig_pte, new_pte);
163 kunmap_atomic(table);
165 kvm_release_page_dirty(page);
167 return (ret != orig_pte);
170 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
171 struct kvm_mmu_page *sp, u64 *spte,
174 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
177 if (!FNAME(is_present_gpte)(gpte))
180 /* if accessed bit is not supported prefetch non accessed gpte */
181 if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
187 drop_spte(vcpu->kvm, spte);
191 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
194 #if PTTYPE == PTTYPE_EPT
195 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
196 ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
199 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
200 access &= ~(gpte >> PT64_NX_SHIFT);
206 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
208 struct guest_walker *walker,
211 unsigned level, index;
212 pt_element_t pte, orig_pte;
213 pt_element_t __user *ptep_user;
217 /* dirty/accessed bits are not supported, so no need to update them */
218 if (!PT_GUEST_DIRTY_MASK)
221 for (level = walker->max_level; level >= walker->level; --level) {
222 pte = orig_pte = walker->ptes[level - 1];
223 table_gfn = walker->table_gfn[level - 1];
224 ptep_user = walker->ptep_user[level - 1];
225 index = offset_in_page(ptep_user) / sizeof(pt_element_t);
226 if (!(pte & PT_GUEST_ACCESSED_MASK)) {
227 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
228 pte |= PT_GUEST_ACCESSED_MASK;
230 if (level == walker->level && write_fault &&
231 !(pte & PT_GUEST_DIRTY_MASK)) {
232 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
233 pte |= PT_GUEST_DIRTY_MASK;
238 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
242 mark_page_dirty(vcpu->kvm, table_gfn);
243 walker->ptes[level] = pte;
249 * Fetch a guest pte for a guest virtual address
251 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
252 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
253 gva_t addr, u32 access)
257 pt_element_t __user *uninitialized_var(ptep_user);
259 unsigned index, pt_access, pte_access, accessed_dirty;
262 const int write_fault = access & PFERR_WRITE_MASK;
263 const int user_fault = access & PFERR_USER_MASK;
264 const int fetch_fault = access & PFERR_FETCH_MASK;
269 trace_kvm_mmu_pagetable_walk(addr, access);
271 walker->level = mmu->root_level;
272 pte = mmu->get_cr3(vcpu);
275 if (walker->level == PT32E_ROOT_LEVEL) {
276 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
277 trace_kvm_mmu_paging_element(pte, walker->level);
278 if (!FNAME(is_present_gpte)(pte))
283 walker->max_level = walker->level;
284 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
285 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
287 accessed_dirty = PT_GUEST_ACCESSED_MASK;
288 pt_access = pte_access = ACC_ALL;
293 unsigned long host_addr;
295 pt_access &= pte_access;
298 index = PT_INDEX(addr, walker->level);
300 table_gfn = gpte_to_gfn(pte);
301 offset = index * sizeof(pt_element_t);
302 pte_gpa = gfn_to_gpa(table_gfn) + offset;
303 walker->table_gfn[walker->level - 1] = table_gfn;
304 walker->pte_gpa[walker->level - 1] = pte_gpa;
306 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
307 PFERR_USER_MASK|PFERR_WRITE_MASK);
308 if (unlikely(real_gfn == UNMAPPED_GVA))
310 real_gfn = gpa_to_gfn(real_gfn);
312 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
313 if (unlikely(kvm_is_error_hva(host_addr)))
316 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
317 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
319 walker->ptep_user[walker->level - 1] = ptep_user;
321 trace_kvm_mmu_paging_element(pte, walker->level);
323 if (unlikely(!FNAME(is_present_gpte)(pte)))
326 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
328 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
332 accessed_dirty &= pte;
333 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
335 walker->ptes[walker->level - 1] = pte;
336 } while (!is_last_gpte(mmu, walker->level, pte));
338 if (unlikely(permission_fault(mmu, pte_access, access))) {
339 errcode |= PFERR_PRESENT_MASK;
343 gfn = gpte_to_gfn_lvl(pte, walker->level);
344 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
346 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
347 gfn += pse36_gfn_delta(pte);
349 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
350 if (real_gpa == UNMAPPED_GVA)
353 walker->gfn = real_gpa >> PAGE_SHIFT;
356 FNAME(protect_clean_gpte)(&pte_access, pte);
359 * On a write fault, fold the dirty bit into accessed_dirty.
360 * For modes without A/D bits support accessed_dirty will be
363 accessed_dirty &= pte >>
364 (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
366 if (unlikely(!accessed_dirty)) {
367 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
368 if (unlikely(ret < 0))
374 walker->pt_access = pt_access;
375 walker->pte_access = pte_access;
376 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
377 __func__, (u64)pte, pte_access, pt_access);
381 errcode |= write_fault | user_fault;
382 if (fetch_fault && (mmu->nx ||
383 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
384 errcode |= PFERR_FETCH_MASK;
386 walker->fault.vector = PF_VECTOR;
387 walker->fault.error_code_valid = true;
388 walker->fault.error_code = errcode;
389 walker->fault.address = addr;
390 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
392 trace_kvm_mmu_walker_error(walker->fault.error_code);
396 static int FNAME(walk_addr)(struct guest_walker *walker,
397 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
399 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
403 #if PTTYPE != PTTYPE_EPT
404 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
405 struct kvm_vcpu *vcpu, gva_t addr,
408 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
414 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
415 u64 *spte, pt_element_t gpte, bool no_dirty_log)
421 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
424 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
426 gfn = gpte_to_gfn(gpte);
427 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
428 FNAME(protect_clean_gpte)(&pte_access, gpte);
429 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
430 no_dirty_log && (pte_access & ACC_WRITE_MASK));
431 if (is_error_pfn(pfn))
435 * we call mmu_set_spte() with host_writable = true because
436 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
438 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
439 gfn, pfn, true, true);
444 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
445 u64 *spte, const void *pte)
447 pt_element_t gpte = *(const pt_element_t *)pte;
449 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
452 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
453 struct guest_walker *gw, int level)
455 pt_element_t curr_pte;
456 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
460 if (level == PT_PAGE_TABLE_LEVEL) {
461 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
462 base_gpa = pte_gpa & ~mask;
463 index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
465 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
466 gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
467 curr_pte = gw->prefetch_ptes[index];
469 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
470 &curr_pte, sizeof(curr_pte));
472 return r || curr_pte != gw->ptes[level - 1];
475 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
478 struct kvm_mmu_page *sp;
479 pt_element_t *gptep = gw->prefetch_ptes;
483 sp = page_header(__pa(sptep));
485 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
489 return __direct_pte_prefetch(vcpu, sp, sptep);
491 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
494 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
498 if (is_shadow_present_pte(*spte))
501 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
507 * Fetch a shadow pte for a specific level in the paging hierarchy.
508 * If the guest tries to write a write-protected page, we need to
509 * emulate this operation, return 1 to indicate this case.
511 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
512 struct guest_walker *gw,
513 int write_fault, int hlevel,
514 pfn_t pfn, bool map_writable, bool prefault)
516 struct kvm_mmu_page *sp = NULL;
517 struct kvm_shadow_walk_iterator it;
518 unsigned direct_access, access = gw->pt_access;
519 int top_level, emulate = 0;
521 direct_access = gw->pte_access;
523 top_level = vcpu->arch.mmu.root_level;
524 if (top_level == PT32E_ROOT_LEVEL)
525 top_level = PT32_ROOT_LEVEL;
527 * Verify that the top-level gpte is still there. Since the page
528 * is a root page, it is either write protected (and cannot be
529 * changed from now on) or it is invalid (in which case, we don't
530 * really care if it changes underneath us after this point).
532 if (FNAME(gpte_changed)(vcpu, gw, top_level))
533 goto out_gpte_changed;
535 for (shadow_walk_init(&it, vcpu, addr);
536 shadow_walk_okay(&it) && it.level > gw->level;
537 shadow_walk_next(&it)) {
540 clear_sp_write_flooding_count(it.sptep);
541 drop_large_spte(vcpu, it.sptep);
544 if (!is_shadow_present_pte(*it.sptep)) {
545 table_gfn = gw->table_gfn[it.level - 2];
546 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
547 false, access, it.sptep);
551 * Verify that the gpte in the page we've just write
552 * protected is still there.
554 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
555 goto out_gpte_changed;
558 link_shadow_page(it.sptep, sp);
562 shadow_walk_okay(&it) && it.level > hlevel;
563 shadow_walk_next(&it)) {
566 clear_sp_write_flooding_count(it.sptep);
567 validate_direct_spte(vcpu, it.sptep, direct_access);
569 drop_large_spte(vcpu, it.sptep);
571 if (is_shadow_present_pte(*it.sptep))
574 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
576 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
577 true, direct_access, it.sptep);
578 link_shadow_page(it.sptep, sp);
581 clear_sp_write_flooding_count(it.sptep);
582 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
583 it.level, gw->gfn, pfn, prefault, map_writable);
584 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
590 kvm_mmu_put_page(sp, it.sptep);
591 kvm_release_pfn_clean(pfn);
596 * To see whether the mapped gfn can write its page table in the current
599 * It is the helper function of FNAME(page_fault). When guest uses large page
600 * size to map the writable gfn which is used as current page table, we should
601 * force kvm to use small page size to map it because new shadow page will be
602 * created when kvm establishes shadow page table that stop kvm using large
603 * page size. Do it early can avoid unnecessary #PF and emulation.
605 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
606 * currently used as its page table.
608 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
609 * since the PDPT is always shadowed, that means, we can not use large page
610 * size to map the gfn which is used as PDPT.
613 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
614 struct guest_walker *walker, int user_fault,
615 bool *write_fault_to_shadow_pgtable)
618 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
619 bool self_changed = false;
621 if (!(walker->pte_access & ACC_WRITE_MASK ||
622 (!is_write_protection(vcpu) && !user_fault)))
625 for (level = walker->level; level <= walker->max_level; level++) {
626 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
628 self_changed |= !(gfn & mask);
629 *write_fault_to_shadow_pgtable |= !gfn;
636 * Page fault handler. There are several causes for a page fault:
637 * - there is no shadow pte for the guest pte
638 * - write access through a shadow pte marked read only so that we can set
640 * - write access to a shadow pte marked read only so we can update the page
641 * dirty bitmap, when userspace requests it
642 * - mmio access; in this case we will never install a present shadow pte
643 * - normal guest page fault due to the guest pte marked not present, not
644 * writable, or not executable
646 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
647 * a negative value on error.
649 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
652 int write_fault = error_code & PFERR_WRITE_MASK;
653 int user_fault = error_code & PFERR_USER_MASK;
654 struct guest_walker walker;
657 int level = PT_PAGE_TABLE_LEVEL;
659 unsigned long mmu_seq;
660 bool map_writable, is_self_change_mapping;
662 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
664 if (unlikely(error_code & PFERR_RSVD_MASK)) {
665 r = handle_mmio_page_fault(vcpu, addr, error_code,
666 mmu_is_nested(vcpu));
667 if (likely(r != RET_MMIO_PF_INVALID))
671 r = mmu_topup_memory_caches(vcpu);
676 * Look up the guest pte for the faulting address.
678 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
681 * The page is not mapped by the guest. Let the guest handle it.
684 pgprintk("%s: guest page fault\n", __func__);
686 inject_page_fault(vcpu, &walker.fault);
691 vcpu->arch.write_fault_to_shadow_pgtable = false;
693 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
694 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
696 if (walker.level >= PT_DIRECTORY_LEVEL)
697 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
698 || is_self_change_mapping;
701 if (!force_pt_level) {
702 level = min(walker.level, mapping_level(vcpu, walker.gfn));
703 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
706 mmu_seq = vcpu->kvm->mmu_notifier_seq;
709 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
713 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
714 walker.gfn, pfn, walker.pte_access, &r))
718 * Do not change pte_access if the pfn is a mmio page, otherwise
719 * we will cache the incorrect access into mmio spte.
721 if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
722 !is_write_protection(vcpu) && !user_fault &&
723 !is_noslot_pfn(pfn)) {
724 walker.pte_access |= ACC_WRITE_MASK;
725 walker.pte_access &= ~ACC_USER_MASK;
728 * If we converted a user page to a kernel page,
729 * so that the kernel can write to it when cr0.wp=0,
730 * then we should prevent the kernel from executing it
731 * if SMEP is enabled.
733 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
734 walker.pte_access &= ~ACC_EXEC_MASK;
737 spin_lock(&vcpu->kvm->mmu_lock);
738 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
741 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
742 make_mmu_pages_available(vcpu);
744 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
745 r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
746 level, pfn, map_writable, prefault);
747 ++vcpu->stat.pf_fixed;
748 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
749 spin_unlock(&vcpu->kvm->mmu_lock);
754 spin_unlock(&vcpu->kvm->mmu_lock);
755 kvm_release_pfn_clean(pfn);
759 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
763 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
766 offset = sp->role.quadrant << PT64_LEVEL_BITS;
768 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
771 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
773 struct kvm_shadow_walk_iterator iterator;
774 struct kvm_mmu_page *sp;
778 vcpu_clear_mmio_info(vcpu, gva);
781 * No need to check return value here, rmap_can_add() can
782 * help us to skip pte prefetch later.
784 mmu_topup_memory_caches(vcpu);
786 spin_lock(&vcpu->kvm->mmu_lock);
787 for_each_shadow_entry(vcpu, gva, iterator) {
788 level = iterator.level;
789 sptep = iterator.sptep;
791 sp = page_header(__pa(sptep));
792 if (is_last_spte(*sptep, level)) {
799 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
800 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
802 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
803 kvm_flush_remote_tlbs(vcpu->kvm);
805 if (!rmap_can_add(vcpu))
808 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
809 sizeof(pt_element_t)))
812 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
815 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
818 spin_unlock(&vcpu->kvm->mmu_lock);
821 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
822 struct x86_exception *exception)
824 struct guest_walker walker;
825 gpa_t gpa = UNMAPPED_GVA;
828 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
831 gpa = gfn_to_gpa(walker.gfn);
832 gpa |= vaddr & ~PAGE_MASK;
833 } else if (exception)
834 *exception = walker.fault;
839 #if PTTYPE != PTTYPE_EPT
840 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
842 struct x86_exception *exception)
844 struct guest_walker walker;
845 gpa_t gpa = UNMAPPED_GVA;
848 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
851 gpa = gfn_to_gpa(walker.gfn);
852 gpa |= vaddr & ~PAGE_MASK;
853 } else if (exception)
854 *exception = walker.fault;
861 * Using the cached information from sp->gfns is safe because:
862 * - The spte has a reference to the struct page, so the pfn for a given gfn
863 * can't change unless all sptes pointing to it are nuked first.
866 * We should flush all tlbs if spte is dropped even though guest is
867 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
868 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
869 * used by guest then tlbs are not flushed, so guest is allowed to access the
871 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
873 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
875 int i, nr_present = 0;
879 /* direct kvm_mmu_page can not be unsync. */
880 BUG_ON(sp->role.direct);
882 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
884 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
893 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
895 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
896 sizeof(pt_element_t)))
899 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
900 vcpu->kvm->tlbs_dirty++;
904 gfn = gpte_to_gfn(gpte);
905 pte_access = sp->role.access;
906 pte_access &= FNAME(gpte_access)(vcpu, gpte);
907 FNAME(protect_clean_gpte)(&pte_access, gpte);
909 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
913 if (gfn != sp->gfns[i]) {
914 drop_spte(vcpu->kvm, &sp->spt[i]);
915 vcpu->kvm->tlbs_dirty++;
921 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
923 set_spte(vcpu, &sp->spt[i], pte_access,
924 PT_PAGE_TABLE_LEVEL, gfn,
925 spte_to_pfn(sp->spt[i]), true, false,
935 #undef PT_BASE_ADDR_MASK
937 #undef PT_LVL_ADDR_MASK
938 #undef PT_LVL_OFFSET_MASK
940 #undef PT_MAX_FULL_LEVELS
942 #undef gpte_to_gfn_lvl
944 #undef PT_GUEST_ACCESSED_MASK
945 #undef PT_GUEST_DIRTY_MASK
946 #undef PT_GUEST_DIRTY_SHIFT
947 #undef PT_GUEST_ACCESSED_SHIFT