KVM: MMU: Clean up MMU functions to take struct kvm when appropriate
[cascardo/linux.git] / drivers / kvm / mmu.c
index 6d84d30..a5ca945 100644 (file)
@@ -90,7 +90,8 @@ static int dbg = 1;
 
 #define PT32_DIR_PSE36_SIZE 4
 #define PT32_DIR_PSE36_SHIFT 13
-#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
+#define PT32_DIR_PSE36_MASK \
+       (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 
 
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
@@ -103,7 +104,7 @@ static int dbg = 1;
 #define PT64_LEVEL_BITS 9
 
 #define PT64_LEVEL_SHIFT(level) \
-               ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
+               (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
 
 #define PT64_LEVEL_MASK(level) \
                (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
@@ -115,7 +116,7 @@ static int dbg = 1;
 #define PT32_LEVEL_BITS 10
 
 #define PT32_LEVEL_SHIFT(level) \
-               ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
+               (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
 
 #define PT32_LEVEL_MASK(level) \
                (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
@@ -156,6 +157,16 @@ static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
 
+static u64 __read_mostly shadow_trap_nonpresent_pte;
+static u64 __read_mostly shadow_notrap_nonpresent_pte;
+
+void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
+{
+       shadow_trap_nonpresent_pte = trap_pte;
+       shadow_notrap_nonpresent_pte = notrap_pte;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->cr0 & X86_CR0_WP;
@@ -176,6 +187,13 @@ static int is_present_pte(unsigned long pte)
        return pte & PT_PRESENT_MASK;
 }
 
+static int is_shadow_present_pte(u64 pte)
+{
+       pte &= ~PT_SHADOW_IO_MARK;
+       return pte != shadow_trap_nonpresent_pte
+               && pte != shadow_notrap_nonpresent_pte;
+}
+
 static int is_writeble_pte(unsigned long pte)
 {
        return pte & PT_WRITABLE_MASK;
@@ -259,7 +277,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
                                   rmap_desc_cache, 1);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4);
+       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
@@ -309,36 +327,53 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
        kfree(rd);
 }
 
+/*
+ * Take gfn and return the reverse mapping to it.
+ * Note: gfn must be unaliased before this function get called
+ */
+
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       return &slot->rmap[gfn - slot->base_gfn];
+}
+
 /*
  * Reverse mapping data structures:
  *
- * If page->private bit zero is zero, then page->private points to the
- * shadow page table entry that points to page_address(page).
+ * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
+ * that points to page_address(page).
  *
- * If page->private bit zero is one, (then page->private & ~1) points
- * to a struct kvm_rmap_desc containing more mappings.
+ * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
+ * containing more mappings.
  */
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
-       struct page *page;
+       struct kvm_mmu_page *page;
        struct kvm_rmap_desc *desc;
+       unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
-       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page_private(page)) {
+       gfn = unalias_gfn(vcpu->kvm, gfn);
+       page = page_header(__pa(spte));
+       page->gfns[spte - page->spt] = gfn;
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn);
+       if (!*rmapp) {
                rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
-               set_page_private(page,(unsigned long)spte);
-       } else if (!(page_private(page) & 1)) {
+               *rmapp = (unsigned long)spte;
+       } else if (!(*rmapp & 1)) {
                rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
                desc = mmu_alloc_rmap_desc(vcpu);
-               desc->shadow_ptes[0] = (u64 *)page_private(page);
+               desc->shadow_ptes[0] = (u64 *)*rmapp;
                desc->shadow_ptes[1] = spte;
-               set_page_private(page,(unsigned long)desc | 1);
+               *rmapp = (unsigned long)desc | 1;
        } else {
                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+               desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
                        desc = desc->more;
                if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -351,7 +386,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
        }
 }
 
-static void rmap_desc_remove_entry(struct page *page,
+static void rmap_desc_remove_entry(unsigned long *rmapp,
                                   struct kvm_rmap_desc *desc,
                                   int i,
                                   struct kvm_rmap_desc *prev_desc)
@@ -365,44 +400,46 @@ static void rmap_desc_remove_entry(struct page *page,
        if (j != 0)
                return;
        if (!prev_desc && !desc->more)
-               set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
+               *rmapp = (unsigned long)desc->shadow_ptes[0];
        else
                if (prev_desc)
                        prev_desc->more = desc->more;
                else
-                       set_page_private(page,(unsigned long)desc->more | 1);
+                       *rmapp = (unsigned long)desc->more | 1;
        mmu_free_rmap_desc(desc);
 }
 
-static void rmap_remove(u64 *spte)
+static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
-       struct page *page;
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
+       struct kvm_mmu_page *page;
+       unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
-       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page_private(page)) {
+       page = page_header(__pa(spte));
+       rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
+       if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
                BUG();
-       } else if (!(page_private(page) & 1)) {
+       } else if (!(*rmapp & 1)) {
                rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
-               if ((u64 *)page_private(page) != spte) {
+               if ((u64 *)*rmapp != spte) {
                        printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
                               spte, *spte);
                        BUG();
                }
-               set_page_private(page,0);
+               *rmapp = 0;
        } else {
                rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+               desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                prev_desc = NULL;
                while (desc) {
                        for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
                                if (desc->shadow_ptes[i] == spte) {
-                                       rmap_desc_remove_entry(page,
+                                       rmap_desc_remove_entry(rmapp,
                                                               desc, i,
                                                               prev_desc);
                                        return;
@@ -416,28 +453,25 @@ static void rmap_remove(u64 *spte)
 
 static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct page *page;
        struct kvm_rmap_desc *desc;
+       unsigned long *rmapp;
        u64 *spte;
 
-       page = gfn_to_page(kvm, gfn);
-       BUG_ON(!page);
+       gfn = unalias_gfn(vcpu->kvm, gfn);
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn);
 
-       while (page_private(page)) {
-               if (!(page_private(page) & 1))
-                       spte = (u64 *)page_private(page);
+       while (*rmapp) {
+               if (!(*rmapp & 1))
+                       spte = (u64 *)*rmapp;
                else {
-                       desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+                       desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                        spte = desc->shadow_ptes[0];
                }
                BUG_ON(!spte);
-               BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
-                      != page_to_pfn(page));
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               rmap_remove(spte);
+               rmap_remove(vcpu->kvm, spte);
                set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
                kvm_flush_remote_tlbs(vcpu->kvm);
        }
@@ -450,7 +484,7 @@ static int is_empty_shadow_page(u64 *spt)
        u64 *end;
 
        for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
-               if (*pos != 0) {
+               if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
                        printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
                               pos, *pos);
                        return 0;
@@ -465,6 +499,7 @@ static void kvm_mmu_free_page(struct kvm *kvm,
        ASSERT(is_empty_shadow_page(page_head->spt));
        list_del(&page_head->link);
        __free_page(virt_to_page(page_head->spt));
+       __free_page(virt_to_page(page_head->gfns));
        kfree(page_head);
        ++kvm->n_free_mmu_pages;
 }
@@ -485,6 +520,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
                                      sizeof *page);
        page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+       page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
        set_page_private(virt_to_page(page->spt), (unsigned long)page);
        list_add(&page->link, &vcpu->kvm->active_mmu_pages);
        ASSERT(is_empty_shadow_page(page->spt));
@@ -570,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
        BUG();
 }
 
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
+static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
                                                gfn_t gfn)
 {
        unsigned index;
@@ -580,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
 
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
+       bucket = &kvm->mmu_page_hash[index];
        hlist_for_each_entry(page, node, bucket, hash_link)
                if (page->gfn == gfn && !page->role.metaphysical) {
                        pgprintk("%s: found role %x\n",
@@ -632,6 +668,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        page->gfn = gfn;
        page->role = role;
        hlist_add_head(&page->hash_link, bucket);
+       vcpu->mmu.prefetch_page(vcpu, page);
        if (!metaphysical)
                rmap_write_protect(vcpu, gfn);
        return page;
@@ -648,9 +685,9 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
 
        if (page->role.level == PT_PAGE_TABLE_LEVEL) {
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-                       if (pt[i] & PT_PRESENT_MASK)
-                               rmap_remove(&pt[i]);
-                       pt[i] = 0;
+                       if (is_shadow_present_pte(pt[i]))
+                               rmap_remove(kvm, &pt[i]);
+                       pt[i] = shadow_trap_nonpresent_pte;
                }
                kvm_flush_remote_tlbs(kvm);
                return;
@@ -659,8 +696,8 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                ent = pt[i];
 
-               pt[i] = 0;
-               if (!(ent & PT_PRESENT_MASK))
+               pt[i] = shadow_trap_nonpresent_pte;
+               if (!is_shadow_present_pte(ent))
                        continue;
                ent &= PT64_BASE_ADDR_MASK;
                mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
@@ -674,6 +711,15 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *page,
        mmu_page_remove_parent_pte(page, parent_pte);
 }
 
+static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
+{
+       int i;
+
+       for (i = 0; i < KVM_MAX_VCPUS; ++i)
+               if (kvm->vcpus[i])
+                       kvm->vcpus[i]->last_pte_updated = NULL;
+}
+
 static void kvm_mmu_zap_page(struct kvm *kvm,
                             struct kvm_mmu_page *page)
 {
@@ -691,7 +737,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
                }
                BUG_ON(!parent_pte);
                kvm_mmu_put_page(page, parent_pte);
-               set_shadow_pte(parent_pte, 0);
+               set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
        }
        kvm_mmu_page_unlink_children(kvm, page);
        if (!page->root_count) {
@@ -699,9 +745,44 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
                kvm_mmu_free_page(kvm, page);
        } else
                list_move(&page->link, &kvm->active_mmu_pages);
+       kvm_mmu_reset_last_pte_updated(kvm);
+}
+
+/*
+ * Changing the number of mmu pages allocated to the vm
+ * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
+ */
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+{
+       /*
+        * If we set the number of mmu pages to be smaller be than the
+        * number of actived pages , we must to free some mmu pages before we
+        * change the value
+        */
+
+       if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
+           kvm_nr_mmu_pages) {
+               int n_used_mmu_pages = kvm->n_alloc_mmu_pages
+                                      - kvm->n_free_mmu_pages;
+
+               while (n_used_mmu_pages > kvm_nr_mmu_pages) {
+                       struct kvm_mmu_page *page;
+
+                       page = container_of(kvm->active_mmu_pages.prev,
+                                           struct kvm_mmu_page, link);
+                       kvm_mmu_zap_page(kvm, page);
+                       n_used_mmu_pages--;
+               }
+               kvm->n_free_mmu_pages = 0;
+       }
+       else
+               kvm->n_free_mmu_pages += kvm_nr_mmu_pages
+                                        - kvm->n_alloc_mmu_pages;
+
+       kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
        struct hlist_head *bucket;
@@ -712,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        r = 0;
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
+       bucket = &kvm->mmu_page_hash[index];
        hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
                if (page->gfn == gfn && !page->role.metaphysical) {
                        pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
                                 page->role.word);
-                       kvm_mmu_zap_page(vcpu->kvm, page);
+                       kvm_mmu_zap_page(kvm, page);
                        r = 1;
                }
        return r;
 }
 
-static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_mmu_page *page;
 
-       while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+       while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
                pgprintk("%s: zap %lx %x\n",
                         __FUNCTION__, gfn, page->role.word);
-               kvm_mmu_zap_page(vcpu->kvm, page);
+               kvm_mmu_zap_page(kvm, page);
        }
 }
 
@@ -798,17 +879,17 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 
                if (level == 1) {
                        pte = table[index];
-                       if (is_present_pte(pte) && is_writeble_pte(pte))
+                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
                                return 0;
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table, v);
                        table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
                                                                PT_USER_MASK;
-                       rmap_add(vcpu, &table[index]);
+                       rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
                        return 0;
                }
 
-               if (table[index] == 0) {
+               if (table[index] == shadow_trap_nonpresent_pte) {
                        struct kvm_mmu_page *new_table;
                        gfn_t pseudo_gfn;
 
@@ -829,6 +910,15 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
        }
 }
 
+static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *sp)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               sp->spt[i] = shadow_trap_nonpresent_pte;
+}
+
 static void mmu_free_roots(struct kvm_vcpu *vcpu)
 {
        int i;
@@ -943,6 +1033,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->free = nonpaging_free;
+       context->prefetch_page = nonpaging_prefetch_page;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -989,6 +1080,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
+       context->prefetch_page = paging64_prefetch_page;
        context->free = paging_free;
        context->root_level = level;
        context->shadow_root_level = level;
@@ -1009,6 +1101,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->free = paging_free;
+       context->prefetch_page = paging32_prefetch_page;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -1049,6 +1142,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
        destroy_kvm_mmu(vcpu);
        return init_kvm_mmu(vcpu);
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
 
 int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
@@ -1080,30 +1174,40 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
        struct kvm_mmu_page *child;
 
        pte = *spte;
-       if (is_present_pte(pte)) {
+       if (is_shadow_present_pte(pte)) {
                if (page->role.level == PT_PAGE_TABLE_LEVEL)
-                       rmap_remove(spte);
+                       rmap_remove(vcpu->kvm, spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        mmu_page_remove_parent_pte(child, spte);
                }
        }
-       *spte = 0;
+       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
        kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *page,
                                  u64 *spte,
-                                 const void *new, int bytes)
+                                 const void *new, int bytes,
+                                 int offset_in_pte)
 {
        if (page->role.level != PT_PAGE_TABLE_LEVEL)
                return;
 
        if (page->role.glevels == PT32_ROOT_LEVEL)
-               paging32_update_pte(vcpu, page, spte, new, bytes);
+               paging32_update_pte(vcpu, page, spte, new, bytes,
+                                   offset_in_pte);
        else
-               paging64_update_pte(vcpu, page, spte, new, bytes);
+               paging64_update_pte(vcpu, page, spte, new, bytes,
+                                   offset_in_pte);
+}
+
+static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+{
+       u64 *spte = vcpu->last_pte_updated;
+
+       return !!(spte && (*spte & PT_ACCESSED_MASK));
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1125,13 +1229,16 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
-       if (gfn == vcpu->last_pt_write_gfn) {
+       kvm_mmu_audit(vcpu, "pre pte write");
+       if (gfn == vcpu->last_pt_write_gfn
+           && !last_updated_pte_accessed(vcpu)) {
                ++vcpu->last_pt_write_count;
                if (vcpu->last_pt_write_count >= 3)
                        flooded = 1;
        } else {
                vcpu->last_pt_write_gfn = gfn;
                vcpu->last_pt_write_count = 1;
+               vcpu->last_pte_updated = NULL;
        }
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
        bucket = &vcpu->kvm->mmu_page_hash[index];
@@ -1180,17 +1287,19 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                spte = &page->spt[page_offset / sizeof(*spte)];
                while (npte--) {
                        mmu_pte_write_zap_pte(vcpu, page, spte);
-                       mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
+                       mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
+                                             page_offset & (pte_size - 1));
                        ++spte;
                }
        }
+       kvm_mmu_audit(vcpu, "post pte write");
 }
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
 
-       return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
+       return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
@@ -1223,8 +1332,10 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 
        ASSERT(vcpu);
 
-       vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
-
+       if (vcpu->kvm->n_requested_mmu_pages)
+               vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
+       else
+               vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
        /*
         * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
         * Therefore we need to allocate shadow page tables in the first
@@ -1284,7 +1395,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
                        if (pt[i] & PT_WRITABLE_MASK) {
-                               rmap_remove(&pt[i]);
+                               rmap_remove(kvm, &pt[i]);
                                pt[i] &= ~PT_WRITABLE_MASK;
                        }
        }
@@ -1358,22 +1469,34 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
                u64 ent = pt[i];
 
-               if (!(ent & PT_PRESENT_MASK))
+               if (ent == shadow_trap_nonpresent_pte)
                        continue;
 
                va = canonicalize(va);
-               if (level > 1)
+               if (level > 1) {
+                       if (ent == shadow_notrap_nonpresent_pte)
+                               printk(KERN_ERR "audit: (%s) nontrapping pte"
+                                      " in nonleaf level: levels %d gva %lx"
+                                      " level %d pte %llx\n", audit_msg,
+                                      vcpu->mmu.root_level, va, level, ent);
+
                        audit_mappings_page(vcpu, ent, va, level - 1);
-               else {
+               else {
                        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
                        hpa_t hpa = gpa_to_hpa(vcpu, gpa);
 
-                       if ((ent & PT_PRESENT_MASK)
+                       if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
-                               printk(KERN_ERR "audit error: (%s) levels %d"
-                                      " gva %lx gpa %llx hpa %llx ent %llx\n",
+                               printk(KERN_ERR "xx audit error: (%s) levels %d"
+                                      " gva %lx gpa %llx hpa %llx ent %llx %d\n",
                                       audit_msg, vcpu->mmu.root_level,
-                                      va, gpa, hpa, ent);
+                                      va, gpa, hpa, ent,
+                                      is_shadow_present_pte(ent));
+                       else if (ent == shadow_notrap_nonpresent_pte
+                                && !is_error_hpa(hpa))
+                               printk(KERN_ERR "audit: (%s) notrap shadow,"
+                                      " valid guest gva %lx\n", audit_msg, va);
+
                }
        }
 }
@@ -1403,15 +1526,15 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
                struct kvm_rmap_desc *d;
 
                for (j = 0; j < m->npages; ++j) {
-                       struct page *page = m->phys_mem[j];
+                       unsigned long *rmapp = &m->rmap[j];
 
-                       if (!page->private)
+                       if (!*rmapp)
                                continue;
-                       if (!(page->private & 1)) {
+                       if (!(*rmapp & 1)) {
                                ++nmaps;
                                continue;
                        }
-                       d = (struct kvm_rmap_desc *)(page->private & ~1ul);
+                       d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                        while (d) {
                                for (k = 0; k < RMAP_EXT; ++k)
                                        if (d->shadow_ptes[k])
@@ -1463,18 +1586,18 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
 static void audit_write_protection(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *page;
+       struct kvm_memory_slot *slot;
+       unsigned long *rmapp;
+       gfn_t gfn;
 
        list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-               hfn_t hfn;
-               struct page *pg;
-
                if (page->role.metaphysical)
                        continue;
 
-               hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
-                       >> PAGE_SHIFT;
-               pg = pfn_to_page(hfn);
-               if (pg->private)
+               slot = gfn_to_memslot(vcpu->kvm, page->gfn);
+               gfn = unalias_gfn(vcpu->kvm, page->gfn);
+               rmapp = &slot->rmap[gfn - slot->base_gfn];
+               if (*rmapp)
                        printk(KERN_ERR "%s: (%s) shadow page has writable"
                               " mappings: gfn %lx role %x\n",
                               __FUNCTION__, audit_msg, page->gfn,