KVM: MMU: Move nonpaging_prefetch_page()
authorAvi Kivity <avi@qumranet.com>
Thu, 29 May 2008 11:55:03 +0000 (14:55 +0300)
committerAvi Kivity <avi@qumranet.com>
Sun, 20 Jul 2008 09:42:30 +0000 (12:42 +0300)
In preparation for next patch. No code change.

Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/x86/kvm/mmu.c

index 53f1ed8..62741b7 100644 (file)
@@ -776,6 +776,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
        BUG();
 }
 
+static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *sp)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               sp->spt[i] = shadow_trap_nonpresent_pte;
+}
+
 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
@@ -1213,15 +1222,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 }
 
 
-static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
-                                   struct kvm_mmu_page *sp)
-{
-       int i;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
-               sp->spt[i] = shadow_trap_nonpresent_pte;
-}
-
 static void mmu_free_roots(struct kvm_vcpu *vcpu)
 {
        int i;