KVM: Make mark_page_dirty() work for aliased pages too.
[cascardo/linux.git] / drivers / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "x86.h"
20 #include "x86_emulate.h"
21 #include "irq.h"
22
23 #include <linux/kvm.h>
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/percpu.h>
27 #include <linux/gfp.h>
28 #include <linux/mm.h>
29 #include <linux/miscdevice.h>
30 #include <linux/vmalloc.h>
31 #include <linux/reboot.h>
32 #include <linux/debugfs.h>
33 #include <linux/highmem.h>
34 #include <linux/file.h>
35 #include <linux/sysdev.h>
36 #include <linux/cpu.h>
37 #include <linux/sched.h>
38 #include <linux/cpumask.h>
39 #include <linux/smp.h>
40 #include <linux/anon_inodes.h>
41 #include <linux/profile.h>
42 #include <linux/kvm_para.h>
43 #include <linux/pagemap.h>
44 #include <linux/mman.h>
45
46 #include <asm/processor.h>
47 #include <asm/msr.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/desc.h>
51
52 MODULE_AUTHOR("Qumranet");
53 MODULE_LICENSE("GPL");
54
55 static DEFINE_SPINLOCK(kvm_lock);
56 static LIST_HEAD(vm_list);
57
58 static cpumask_t cpus_hardware_enabled;
59
60 struct kvm_x86_ops *kvm_x86_ops;
61 struct kmem_cache *kvm_vcpu_cache;
62 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
63
64 static __read_mostly struct preempt_ops kvm_preempt_ops;
65
66 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
67
68 static struct kvm_stats_debugfs_item {
69         const char *name;
70         int offset;
71         struct dentry *dentry;
72 } debugfs_entries[] = {
73         { "pf_fixed", STAT_OFFSET(pf_fixed) },
74         { "pf_guest", STAT_OFFSET(pf_guest) },
75         { "tlb_flush", STAT_OFFSET(tlb_flush) },
76         { "invlpg", STAT_OFFSET(invlpg) },
77         { "exits", STAT_OFFSET(exits) },
78         { "io_exits", STAT_OFFSET(io_exits) },
79         { "mmio_exits", STAT_OFFSET(mmio_exits) },
80         { "signal_exits", STAT_OFFSET(signal_exits) },
81         { "irq_window", STAT_OFFSET(irq_window_exits) },
82         { "halt_exits", STAT_OFFSET(halt_exits) },
83         { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
84         { "request_irq", STAT_OFFSET(request_irq_exits) },
85         { "irq_exits", STAT_OFFSET(irq_exits) },
86         { "light_exits", STAT_OFFSET(light_exits) },
87         { "efer_reload", STAT_OFFSET(efer_reload) },
88         { NULL }
89 };
90
91 static struct dentry *debugfs_dir;
92
93 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
94                            unsigned long arg);
95
96 static inline int valid_vcpu(int n)
97 {
98         return likely(n >= 0 && n < KVM_MAX_VCPUS);
99 }
100
101 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
102 {
103         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
104                 return;
105
106         vcpu->guest_fpu_loaded = 1;
107         fx_save(&vcpu->host_fx_image);
108         fx_restore(&vcpu->guest_fx_image);
109 }
110 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
111
112 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
113 {
114         if (!vcpu->guest_fpu_loaded)
115                 return;
116
117         vcpu->guest_fpu_loaded = 0;
118         fx_save(&vcpu->guest_fx_image);
119         fx_restore(&vcpu->host_fx_image);
120 }
121 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
122
123 /*
124  * Switches to specified vcpu, until a matching vcpu_put()
125  */
126 void vcpu_load(struct kvm_vcpu *vcpu)
127 {
128         int cpu;
129
130         mutex_lock(&vcpu->mutex);
131         cpu = get_cpu();
132         preempt_notifier_register(&vcpu->preempt_notifier);
133         kvm_arch_vcpu_load(vcpu, cpu);
134         put_cpu();
135 }
136
137 void vcpu_put(struct kvm_vcpu *vcpu)
138 {
139         preempt_disable();
140         kvm_arch_vcpu_put(vcpu);
141         preempt_notifier_unregister(&vcpu->preempt_notifier);
142         preempt_enable();
143         mutex_unlock(&vcpu->mutex);
144 }
145
146 static void ack_flush(void *_completed)
147 {
148 }
149
150 void kvm_flush_remote_tlbs(struct kvm *kvm)
151 {
152         int i, cpu;
153         cpumask_t cpus;
154         struct kvm_vcpu *vcpu;
155
156         cpus_clear(cpus);
157         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
158                 vcpu = kvm->vcpus[i];
159                 if (!vcpu)
160                         continue;
161                 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
162                         continue;
163                 cpu = vcpu->cpu;
164                 if (cpu != -1 && cpu != raw_smp_processor_id())
165                         cpu_set(cpu, cpus);
166         }
167         smp_call_function_mask(cpus, ack_flush, NULL, 1);
168 }
169
170 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
171 {
172         struct page *page;
173         int r;
174
175         mutex_init(&vcpu->mutex);
176         vcpu->cpu = -1;
177         vcpu->mmu.root_hpa = INVALID_PAGE;
178         vcpu->kvm = kvm;
179         vcpu->vcpu_id = id;
180         if (!irqchip_in_kernel(kvm) || id == 0)
181                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
182         else
183                 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
184         init_waitqueue_head(&vcpu->wq);
185
186         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
187         if (!page) {
188                 r = -ENOMEM;
189                 goto fail;
190         }
191         vcpu->run = page_address(page);
192
193         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
194         if (!page) {
195                 r = -ENOMEM;
196                 goto fail_free_run;
197         }
198         vcpu->pio_data = page_address(page);
199
200         r = kvm_mmu_create(vcpu);
201         if (r < 0)
202                 goto fail_free_pio_data;
203
204         if (irqchip_in_kernel(kvm)) {
205                 r = kvm_create_lapic(vcpu);
206                 if (r < 0)
207                         goto fail_mmu_destroy;
208         }
209
210         return 0;
211
212 fail_mmu_destroy:
213         kvm_mmu_destroy(vcpu);
214 fail_free_pio_data:
215         free_page((unsigned long)vcpu->pio_data);
216 fail_free_run:
217         free_page((unsigned long)vcpu->run);
218 fail:
219         return r;
220 }
221 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
222
223 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
224 {
225         kvm_free_lapic(vcpu);
226         kvm_mmu_destroy(vcpu);
227         free_page((unsigned long)vcpu->pio_data);
228         free_page((unsigned long)vcpu->run);
229 }
230 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
231
232 static struct kvm *kvm_create_vm(void)
233 {
234         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
235
236         if (!kvm)
237                 return ERR_PTR(-ENOMEM);
238
239         kvm_io_bus_init(&kvm->pio_bus);
240         mutex_init(&kvm->lock);
241         INIT_LIST_HEAD(&kvm->active_mmu_pages);
242         kvm_io_bus_init(&kvm->mmio_bus);
243         spin_lock(&kvm_lock);
244         list_add(&kvm->vm_list, &vm_list);
245         spin_unlock(&kvm_lock);
246         return kvm;
247 }
248
249 /*
250  * Free any memory in @free but not in @dont.
251  */
252 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
253                                   struct kvm_memory_slot *dont)
254 {
255         if (!dont || free->rmap != dont->rmap)
256                 vfree(free->rmap);
257
258         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
259                 vfree(free->dirty_bitmap);
260
261         free->npages = 0;
262         free->dirty_bitmap = NULL;
263         free->rmap = NULL;
264 }
265
266 static void kvm_free_physmem(struct kvm *kvm)
267 {
268         int i;
269
270         for (i = 0; i < kvm->nmemslots; ++i)
271                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
272 }
273
274 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
275 {
276         vcpu_load(vcpu);
277         kvm_mmu_unload(vcpu);
278         vcpu_put(vcpu);
279 }
280
281 static void kvm_free_vcpus(struct kvm *kvm)
282 {
283         unsigned int i;
284
285         /*
286          * Unpin any mmu pages first.
287          */
288         for (i = 0; i < KVM_MAX_VCPUS; ++i)
289                 if (kvm->vcpus[i])
290                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
291         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
292                 if (kvm->vcpus[i]) {
293                         kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
294                         kvm->vcpus[i] = NULL;
295                 }
296         }
297
298 }
299
300 static void kvm_destroy_vm(struct kvm *kvm)
301 {
302         spin_lock(&kvm_lock);
303         list_del(&kvm->vm_list);
304         spin_unlock(&kvm_lock);
305         kvm_io_bus_destroy(&kvm->pio_bus);
306         kvm_io_bus_destroy(&kvm->mmio_bus);
307         kfree(kvm->vpic);
308         kfree(kvm->vioapic);
309         kvm_free_vcpus(kvm);
310         kvm_free_physmem(kvm);
311         kfree(kvm);
312 }
313
314 static int kvm_vm_release(struct inode *inode, struct file *filp)
315 {
316         struct kvm *kvm = filp->private_data;
317
318         kvm_destroy_vm(kvm);
319         return 0;
320 }
321
322 void fx_init(struct kvm_vcpu *vcpu)
323 {
324         unsigned after_mxcsr_mask;
325
326         /* Initialize guest FPU by resetting ours and saving into guest's */
327         preempt_disable();
328         fx_save(&vcpu->host_fx_image);
329         fpu_init();
330         fx_save(&vcpu->guest_fx_image);
331         fx_restore(&vcpu->host_fx_image);
332         preempt_enable();
333
334         vcpu->cr0 |= X86_CR0_ET;
335         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
336         vcpu->guest_fx_image.mxcsr = 0x1f80;
337         memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
338                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
339 }
340 EXPORT_SYMBOL_GPL(fx_init);
341
342 /*
343  * Allocate some memory and give it an address in the guest physical address
344  * space.
345  *
346  * Discontiguous memory is allowed, mostly for framebuffers.
347  *
348  * Must be called holding kvm->lock.
349  */
350 int __kvm_set_memory_region(struct kvm *kvm,
351                             struct kvm_userspace_memory_region *mem,
352                             int user_alloc)
353 {
354         int r;
355         gfn_t base_gfn;
356         unsigned long npages;
357         unsigned long i;
358         struct kvm_memory_slot *memslot;
359         struct kvm_memory_slot old, new;
360
361         r = -EINVAL;
362         /* General sanity checks */
363         if (mem->memory_size & (PAGE_SIZE - 1))
364                 goto out;
365         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
366                 goto out;
367         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
368                 goto out;
369         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
370                 goto out;
371
372         memslot = &kvm->memslots[mem->slot];
373         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
374         npages = mem->memory_size >> PAGE_SHIFT;
375
376         if (!npages)
377                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
378
379         new = old = *memslot;
380
381         new.base_gfn = base_gfn;
382         new.npages = npages;
383         new.flags = mem->flags;
384
385         /* Disallow changing a memory slot's size. */
386         r = -EINVAL;
387         if (npages && old.npages && npages != old.npages)
388                 goto out_free;
389
390         /* Check for overlaps */
391         r = -EEXIST;
392         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
393                 struct kvm_memory_slot *s = &kvm->memslots[i];
394
395                 if (s == memslot)
396                         continue;
397                 if (!((base_gfn + npages <= s->base_gfn) ||
398                       (base_gfn >= s->base_gfn + s->npages)))
399                         goto out_free;
400         }
401
402         /* Free page dirty bitmap if unneeded */
403         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
404                 new.dirty_bitmap = NULL;
405
406         r = -ENOMEM;
407
408         /* Allocate if a slot is being created */
409         if (npages && !new.rmap) {
410                 new.rmap = vmalloc(npages * sizeof(struct page *));
411
412                 if (!new.rmap)
413                         goto out_free;
414
415                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
416
417                 new.user_alloc = user_alloc;
418                 if (user_alloc)
419                         new.userspace_addr = mem->userspace_addr;
420                 else {
421                         down_write(&current->mm->mmap_sem);
422                         new.userspace_addr = do_mmap(NULL, 0,
423                                                      npages * PAGE_SIZE,
424                                                      PROT_READ | PROT_WRITE,
425                                                      MAP_SHARED | MAP_ANONYMOUS,
426                                                      0);
427                         up_write(&current->mm->mmap_sem);
428
429                         if (IS_ERR((void *)new.userspace_addr))
430                                 goto out_free;
431                 }
432         } else {
433                 if (!old.user_alloc && old.rmap) {
434                         int ret;
435
436                         down_write(&current->mm->mmap_sem);
437                         ret = do_munmap(current->mm, old.userspace_addr,
438                                         old.npages * PAGE_SIZE);
439                         up_write(&current->mm->mmap_sem);
440                         if (ret < 0)
441                                 printk(KERN_WARNING
442                                        "kvm_vm_ioctl_set_memory_region: "
443                                        "failed to munmap memory\n");
444                 }
445         }
446
447         /* Allocate page dirty bitmap if needed */
448         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
449                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
450
451                 new.dirty_bitmap = vmalloc(dirty_bytes);
452                 if (!new.dirty_bitmap)
453                         goto out_free;
454                 memset(new.dirty_bitmap, 0, dirty_bytes);
455         }
456
457         if (mem->slot >= kvm->nmemslots)
458                 kvm->nmemslots = mem->slot + 1;
459
460         if (!kvm->n_requested_mmu_pages) {
461                 unsigned int n_pages;
462
463                 if (npages) {
464                         n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
465                         kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
466                                                  n_pages);
467                 } else {
468                         unsigned int nr_mmu_pages;
469
470                         n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
471                         nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
472                         nr_mmu_pages = max(nr_mmu_pages,
473                                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
474                         kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
475                 }
476         }
477
478         *memslot = new;
479
480         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
481         kvm_flush_remote_tlbs(kvm);
482
483         kvm_free_physmem_slot(&old, &new);
484         return 0;
485
486 out_free:
487         kvm_free_physmem_slot(&new, &old);
488 out:
489         return r;
490
491 }
492 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
493
494 int kvm_set_memory_region(struct kvm *kvm,
495                           struct kvm_userspace_memory_region *mem,
496                           int user_alloc)
497 {
498         int r;
499
500         mutex_lock(&kvm->lock);
501         r = __kvm_set_memory_region(kvm, mem, user_alloc);
502         mutex_unlock(&kvm->lock);
503         return r;
504 }
505 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
506
507 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
508                                    struct
509                                    kvm_userspace_memory_region *mem,
510                                    int user_alloc)
511 {
512         if (mem->slot >= KVM_MEMORY_SLOTS)
513                 return -EINVAL;
514         return kvm_set_memory_region(kvm, mem, user_alloc);
515 }
516
517 /*
518  * Get (and clear) the dirty memory log for a memory slot.
519  */
520 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
521                                       struct kvm_dirty_log *log)
522 {
523         struct kvm_memory_slot *memslot;
524         int r, i;
525         int n;
526         unsigned long any = 0;
527
528         mutex_lock(&kvm->lock);
529
530         r = -EINVAL;
531         if (log->slot >= KVM_MEMORY_SLOTS)
532                 goto out;
533
534         memslot = &kvm->memslots[log->slot];
535         r = -ENOENT;
536         if (!memslot->dirty_bitmap)
537                 goto out;
538
539         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
540
541         for (i = 0; !any && i < n/sizeof(long); ++i)
542                 any = memslot->dirty_bitmap[i];
543
544         r = -EFAULT;
545         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
546                 goto out;
547
548         /* If nothing is dirty, don't bother messing with page tables. */
549         if (any) {
550                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
551                 kvm_flush_remote_tlbs(kvm);
552                 memset(memslot->dirty_bitmap, 0, n);
553         }
554
555         r = 0;
556
557 out:
558         mutex_unlock(&kvm->lock);
559         return r;
560 }
561
562 int is_error_page(struct page *page)
563 {
564         return page == bad_page;
565 }
566 EXPORT_SYMBOL_GPL(is_error_page);
567
568 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
569 {
570         int i;
571         struct kvm_mem_alias *alias;
572
573         for (i = 0; i < kvm->naliases; ++i) {
574                 alias = &kvm->aliases[i];
575                 if (gfn >= alias->base_gfn
576                     && gfn < alias->base_gfn + alias->npages)
577                         return alias->target_gfn + gfn - alias->base_gfn;
578         }
579         return gfn;
580 }
581
582 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
583 {
584         int i;
585
586         for (i = 0; i < kvm->nmemslots; ++i) {
587                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
588
589                 if (gfn >= memslot->base_gfn
590                     && gfn < memslot->base_gfn + memslot->npages)
591                         return memslot;
592         }
593         return NULL;
594 }
595
596 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
597 {
598         gfn = unalias_gfn(kvm, gfn);
599         return __gfn_to_memslot(kvm, gfn);
600 }
601
602 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
603 {
604         int i;
605
606         gfn = unalias_gfn(kvm, gfn);
607         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
608                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
609
610                 if (gfn >= memslot->base_gfn
611                     && gfn < memslot->base_gfn + memslot->npages)
612                         return 1;
613         }
614         return 0;
615 }
616 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
617
618 /*
619  * Requires current->mm->mmap_sem to be held
620  */
621 static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
622 {
623         struct kvm_memory_slot *slot;
624         struct page *page[1];
625         int npages;
626
627         might_sleep();
628
629         gfn = unalias_gfn(kvm, gfn);
630         slot = __gfn_to_memslot(kvm, gfn);
631         if (!slot) {
632                 get_page(bad_page);
633                 return bad_page;
634         }
635
636         npages = get_user_pages(current, current->mm,
637                                 slot->userspace_addr
638                                 + (gfn - slot->base_gfn) * PAGE_SIZE, 1,
639                                 1, 1, page, NULL);
640         if (npages != 1) {
641                 get_page(bad_page);
642                 return bad_page;
643         }
644
645         return page[0];
646 }
647
648 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
649 {
650         struct page *page;
651
652         down_read(&current->mm->mmap_sem);
653         page = __gfn_to_page(kvm, gfn);
654         up_read(&current->mm->mmap_sem);
655
656         return page;
657 }
658
659 EXPORT_SYMBOL_GPL(gfn_to_page);
660
661 void kvm_release_page(struct page *page)
662 {
663         if (!PageReserved(page))
664                 SetPageDirty(page);
665         put_page(page);
666 }
667 EXPORT_SYMBOL_GPL(kvm_release_page);
668
669 static int next_segment(unsigned long len, int offset)
670 {
671         if (len > PAGE_SIZE - offset)
672                 return PAGE_SIZE - offset;
673         else
674                 return len;
675 }
676
677 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
678                         int len)
679 {
680         void *page_virt;
681         struct page *page;
682
683         page = gfn_to_page(kvm, gfn);
684         if (is_error_page(page)) {
685                 kvm_release_page(page);
686                 return -EFAULT;
687         }
688         page_virt = kmap_atomic(page, KM_USER0);
689
690         memcpy(data, page_virt + offset, len);
691
692         kunmap_atomic(page_virt, KM_USER0);
693         kvm_release_page(page);
694         return 0;
695 }
696 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
697
698 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
699 {
700         gfn_t gfn = gpa >> PAGE_SHIFT;
701         int seg;
702         int offset = offset_in_page(gpa);
703         int ret;
704
705         while ((seg = next_segment(len, offset)) != 0) {
706                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
707                 if (ret < 0)
708                         return ret;
709                 offset = 0;
710                 len -= seg;
711                 data += seg;
712                 ++gfn;
713         }
714         return 0;
715 }
716 EXPORT_SYMBOL_GPL(kvm_read_guest);
717
718 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
719                          int offset, int len)
720 {
721         void *page_virt;
722         struct page *page;
723
724         page = gfn_to_page(kvm, gfn);
725         if (is_error_page(page)) {
726                 kvm_release_page(page);
727                 return -EFAULT;
728         }
729         page_virt = kmap_atomic(page, KM_USER0);
730
731         memcpy(page_virt + offset, data, len);
732
733         kunmap_atomic(page_virt, KM_USER0);
734         mark_page_dirty(kvm, gfn);
735         kvm_release_page(page);
736         return 0;
737 }
738 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
739
740 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
741                     unsigned long len)
742 {
743         gfn_t gfn = gpa >> PAGE_SHIFT;
744         int seg;
745         int offset = offset_in_page(gpa);
746         int ret;
747
748         while ((seg = next_segment(len, offset)) != 0) {
749                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
750                 if (ret < 0)
751                         return ret;
752                 offset = 0;
753                 len -= seg;
754                 data += seg;
755                 ++gfn;
756         }
757         return 0;
758 }
759
760 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
761 {
762         void *page_virt;
763         struct page *page;
764
765         page = gfn_to_page(kvm, gfn);
766         if (is_error_page(page)) {
767                 kvm_release_page(page);
768                 return -EFAULT;
769         }
770         page_virt = kmap_atomic(page, KM_USER0);
771
772         memset(page_virt + offset, 0, len);
773
774         kunmap_atomic(page_virt, KM_USER0);
775         kvm_release_page(page);
776         return 0;
777 }
778 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
779
780 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
781 {
782         gfn_t gfn = gpa >> PAGE_SHIFT;
783         int seg;
784         int offset = offset_in_page(gpa);
785         int ret;
786
787         while ((seg = next_segment(len, offset)) != 0) {
788                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
789                 if (ret < 0)
790                         return ret;
791                 offset = 0;
792                 len -= seg;
793                 ++gfn;
794         }
795         return 0;
796 }
797 EXPORT_SYMBOL_GPL(kvm_clear_guest);
798
799 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
800 {
801         struct kvm_memory_slot *memslot;
802
803         gfn = unalias_gfn(kvm, gfn);
804         memslot = __gfn_to_memslot(kvm, gfn);
805         if (memslot && memslot->dirty_bitmap) {
806                 unsigned long rel_gfn = gfn - memslot->base_gfn;
807
808                 /* avoid RMW */
809                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
810                         set_bit(rel_gfn, memslot->dirty_bitmap);
811         }
812 }
813
814 /*
815  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
816  */
817 static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
818 {
819         DECLARE_WAITQUEUE(wait, current);
820
821         add_wait_queue(&vcpu->wq, &wait);
822
823         /*
824          * We will block until either an interrupt or a signal wakes us up
825          */
826         while (!kvm_cpu_has_interrupt(vcpu)
827                && !signal_pending(current)
828                && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
829                && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
830                 set_current_state(TASK_INTERRUPTIBLE);
831                 vcpu_put(vcpu);
832                 schedule();
833                 vcpu_load(vcpu);
834         }
835
836         __set_current_state(TASK_RUNNING);
837         remove_wait_queue(&vcpu->wq, &wait);
838 }
839
840 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
841 {
842         ++vcpu->stat.halt_exits;
843         if (irqchip_in_kernel(vcpu->kvm)) {
844                 vcpu->mp_state = VCPU_MP_STATE_HALTED;
845                 kvm_vcpu_block(vcpu);
846                 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
847                         return -EINTR;
848                 return 1;
849         } else {
850                 vcpu->run->exit_reason = KVM_EXIT_HLT;
851                 return 0;
852         }
853 }
854 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
855
856 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
857 {
858         unsigned long nr, a0, a1, a2, a3, ret;
859
860         kvm_x86_ops->cache_regs(vcpu);
861
862         nr = vcpu->regs[VCPU_REGS_RAX];
863         a0 = vcpu->regs[VCPU_REGS_RBX];
864         a1 = vcpu->regs[VCPU_REGS_RCX];
865         a2 = vcpu->regs[VCPU_REGS_RDX];
866         a3 = vcpu->regs[VCPU_REGS_RSI];
867
868         if (!is_long_mode(vcpu)) {
869                 nr &= 0xFFFFFFFF;
870                 a0 &= 0xFFFFFFFF;
871                 a1 &= 0xFFFFFFFF;
872                 a2 &= 0xFFFFFFFF;
873                 a3 &= 0xFFFFFFFF;
874         }
875
876         switch (nr) {
877         default:
878                 ret = -KVM_ENOSYS;
879                 break;
880         }
881         vcpu->regs[VCPU_REGS_RAX] = ret;
882         kvm_x86_ops->decache_regs(vcpu);
883         return 0;
884 }
885 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
886
887 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
888 {
889         char instruction[3];
890         int ret = 0;
891
892         mutex_lock(&vcpu->kvm->lock);
893
894         /*
895          * Blow out the MMU to ensure that no other VCPU has an active mapping
896          * to ensure that the updated hypercall appears atomically across all
897          * VCPUs.
898          */
899         kvm_mmu_zap_all(vcpu->kvm);
900
901         kvm_x86_ops->cache_regs(vcpu);
902         kvm_x86_ops->patch_hypercall(vcpu, instruction);
903         if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
904             != X86EMUL_CONTINUE)
905                 ret = -EFAULT;
906
907         mutex_unlock(&vcpu->kvm->lock);
908
909         return ret;
910 }
911
912 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
913 {
914         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
915 }
916
917 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
918 {
919         struct descriptor_table dt = { limit, base };
920
921         kvm_x86_ops->set_gdt(vcpu, &dt);
922 }
923
924 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
925 {
926         struct descriptor_table dt = { limit, base };
927
928         kvm_x86_ops->set_idt(vcpu, &dt);
929 }
930
931 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
932                    unsigned long *rflags)
933 {
934         lmsw(vcpu, msw);
935         *rflags = kvm_x86_ops->get_rflags(vcpu);
936 }
937
938 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
939 {
940         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
941         switch (cr) {
942         case 0:
943                 return vcpu->cr0;
944         case 2:
945                 return vcpu->cr2;
946         case 3:
947                 return vcpu->cr3;
948         case 4:
949                 return vcpu->cr4;
950         default:
951                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
952                 return 0;
953         }
954 }
955
956 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
957                      unsigned long *rflags)
958 {
959         switch (cr) {
960         case 0:
961                 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
962                 *rflags = kvm_x86_ops->get_rflags(vcpu);
963                 break;
964         case 2:
965                 vcpu->cr2 = val;
966                 break;
967         case 3:
968                 set_cr3(vcpu, val);
969                 break;
970         case 4:
971                 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
972                 break;
973         default:
974                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
975         }
976 }
977
978 void kvm_resched(struct kvm_vcpu *vcpu)
979 {
980         if (!need_resched())
981                 return;
982         cond_resched();
983 }
984 EXPORT_SYMBOL_GPL(kvm_resched);
985
986 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
987 {
988         int i;
989         u32 function;
990         struct kvm_cpuid_entry *e, *best;
991
992         kvm_x86_ops->cache_regs(vcpu);
993         function = vcpu->regs[VCPU_REGS_RAX];
994         vcpu->regs[VCPU_REGS_RAX] = 0;
995         vcpu->regs[VCPU_REGS_RBX] = 0;
996         vcpu->regs[VCPU_REGS_RCX] = 0;
997         vcpu->regs[VCPU_REGS_RDX] = 0;
998         best = NULL;
999         for (i = 0; i < vcpu->cpuid_nent; ++i) {
1000                 e = &vcpu->cpuid_entries[i];
1001                 if (e->function == function) {
1002                         best = e;
1003                         break;
1004                 }
1005                 /*
1006                  * Both basic or both extended?
1007                  */
1008                 if (((e->function ^ function) & 0x80000000) == 0)
1009                         if (!best || e->function > best->function)
1010                                 best = e;
1011         }
1012         if (best) {
1013                 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1014                 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1015                 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1016                 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1017         }
1018         kvm_x86_ops->decache_regs(vcpu);
1019         kvm_x86_ops->skip_emulated_instruction(vcpu);
1020 }
1021 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1022
1023 /*
1024  * Check if userspace requested an interrupt window, and that the
1025  * interrupt window is open.
1026  *
1027  * No need to exit to userspace if we already have an interrupt queued.
1028  */
1029 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1030                                           struct kvm_run *kvm_run)
1031 {
1032         return (!vcpu->irq_summary &&
1033                 kvm_run->request_interrupt_window &&
1034                 vcpu->interrupt_window_open &&
1035                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1036 }
1037
1038 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1039                               struct kvm_run *kvm_run)
1040 {
1041         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1042         kvm_run->cr8 = get_cr8(vcpu);
1043         kvm_run->apic_base = kvm_get_apic_base(vcpu);
1044         if (irqchip_in_kernel(vcpu->kvm))
1045                 kvm_run->ready_for_interrupt_injection = 1;
1046         else
1047                 kvm_run->ready_for_interrupt_injection =
1048                                         (vcpu->interrupt_window_open &&
1049                                          vcpu->irq_summary == 0);
1050 }
1051
1052 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1053 {
1054         int r;
1055
1056         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
1057                 pr_debug("vcpu %d received sipi with vector # %x\n",
1058                        vcpu->vcpu_id, vcpu->sipi_vector);
1059                 kvm_lapic_reset(vcpu);
1060                 r = kvm_x86_ops->vcpu_reset(vcpu);
1061                 if (r)
1062                         return r;
1063                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
1064         }
1065
1066 preempted:
1067         if (vcpu->guest_debug.enabled)
1068                 kvm_x86_ops->guest_debug_pre(vcpu);
1069
1070 again:
1071         r = kvm_mmu_reload(vcpu);
1072         if (unlikely(r))
1073                 goto out;
1074
1075         kvm_inject_pending_timer_irqs(vcpu);
1076
1077         preempt_disable();
1078
1079         kvm_x86_ops->prepare_guest_switch(vcpu);
1080         kvm_load_guest_fpu(vcpu);
1081
1082         local_irq_disable();
1083
1084         if (signal_pending(current)) {
1085                 local_irq_enable();
1086                 preempt_enable();
1087                 r = -EINTR;
1088                 kvm_run->exit_reason = KVM_EXIT_INTR;
1089                 ++vcpu->stat.signal_exits;
1090                 goto out;
1091         }
1092
1093         if (irqchip_in_kernel(vcpu->kvm))
1094                 kvm_x86_ops->inject_pending_irq(vcpu);
1095         else if (!vcpu->mmio_read_completed)
1096                 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
1097
1098         vcpu->guest_mode = 1;
1099         kvm_guest_enter();
1100
1101         if (vcpu->requests)
1102                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
1103                         kvm_x86_ops->tlb_flush(vcpu);
1104
1105         kvm_x86_ops->run(vcpu, kvm_run);
1106
1107         vcpu->guest_mode = 0;
1108         local_irq_enable();
1109
1110         ++vcpu->stat.exits;
1111
1112         /*
1113          * We must have an instruction between local_irq_enable() and
1114          * kvm_guest_exit(), so the timer interrupt isn't delayed by
1115          * the interrupt shadow.  The stat.exits increment will do nicely.
1116          * But we need to prevent reordering, hence this barrier():
1117          */
1118         barrier();
1119
1120         kvm_guest_exit();
1121
1122         preempt_enable();
1123
1124         /*
1125          * Profile KVM exit RIPs:
1126          */
1127         if (unlikely(prof_on == KVM_PROFILING)) {
1128                 kvm_x86_ops->cache_regs(vcpu);
1129                 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
1130         }
1131
1132         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
1133
1134         if (r > 0) {
1135                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1136                         r = -EINTR;
1137                         kvm_run->exit_reason = KVM_EXIT_INTR;
1138                         ++vcpu->stat.request_irq_exits;
1139                         goto out;
1140                 }
1141                 if (!need_resched()) {
1142                         ++vcpu->stat.light_exits;
1143                         goto again;
1144                 }
1145         }
1146
1147 out:
1148         if (r > 0) {
1149                 kvm_resched(vcpu);
1150                 goto preempted;
1151         }
1152
1153         post_kvm_run_save(vcpu, kvm_run);
1154
1155         return r;
1156 }
1157
1158
1159 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1160 {
1161         int r;
1162         sigset_t sigsaved;
1163
1164         vcpu_load(vcpu);
1165
1166         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
1167                 kvm_vcpu_block(vcpu);
1168                 vcpu_put(vcpu);
1169                 return -EAGAIN;
1170         }
1171
1172         if (vcpu->sigset_active)
1173                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1174
1175         /* re-sync apic's tpr */
1176         if (!irqchip_in_kernel(vcpu->kvm))
1177                 set_cr8(vcpu, kvm_run->cr8);
1178
1179         if (vcpu->pio.cur_count) {
1180                 r = complete_pio(vcpu);
1181                 if (r)
1182                         goto out;
1183         }
1184 #if CONFIG_HAS_IOMEM
1185         if (vcpu->mmio_needed) {
1186                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1187                 vcpu->mmio_read_completed = 1;
1188                 vcpu->mmio_needed = 0;
1189                 r = emulate_instruction(vcpu, kvm_run,
1190                                         vcpu->mmio_fault_cr2, 0, 1);
1191                 if (r == EMULATE_DO_MMIO) {
1192                         /*
1193                          * Read-modify-write.  Back to userspace.
1194                          */
1195                         r = 0;
1196                         goto out;
1197                 }
1198         }
1199 #endif
1200         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1201                 kvm_x86_ops->cache_regs(vcpu);
1202                 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1203                 kvm_x86_ops->decache_regs(vcpu);
1204         }
1205
1206         r = __vcpu_run(vcpu, kvm_run);
1207
1208 out:
1209         if (vcpu->sigset_active)
1210                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1211
1212         vcpu_put(vcpu);
1213         return r;
1214 }
1215
1216 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1217                                    struct kvm_regs *regs)
1218 {
1219         vcpu_load(vcpu);
1220
1221         kvm_x86_ops->cache_regs(vcpu);
1222
1223         regs->rax = vcpu->regs[VCPU_REGS_RAX];
1224         regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1225         regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1226         regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1227         regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1228         regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1229         regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1230         regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1231 #ifdef CONFIG_X86_64
1232         regs->r8 = vcpu->regs[VCPU_REGS_R8];
1233         regs->r9 = vcpu->regs[VCPU_REGS_R9];
1234         regs->r10 = vcpu->regs[VCPU_REGS_R10];
1235         regs->r11 = vcpu->regs[VCPU_REGS_R11];
1236         regs->r12 = vcpu->regs[VCPU_REGS_R12];
1237         regs->r13 = vcpu->regs[VCPU_REGS_R13];
1238         regs->r14 = vcpu->regs[VCPU_REGS_R14];
1239         regs->r15 = vcpu->regs[VCPU_REGS_R15];
1240 #endif
1241
1242         regs->rip = vcpu->rip;
1243         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
1244
1245         /*
1246          * Don't leak debug flags in case they were set for guest debugging
1247          */
1248         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1249                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1250
1251         vcpu_put(vcpu);
1252
1253         return 0;
1254 }
1255
1256 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1257                                    struct kvm_regs *regs)
1258 {
1259         vcpu_load(vcpu);
1260
1261         vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1262         vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1263         vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1264         vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1265         vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1266         vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1267         vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1268         vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1269 #ifdef CONFIG_X86_64
1270         vcpu->regs[VCPU_REGS_R8] = regs->r8;
1271         vcpu->regs[VCPU_REGS_R9] = regs->r9;
1272         vcpu->regs[VCPU_REGS_R10] = regs->r10;
1273         vcpu->regs[VCPU_REGS_R11] = regs->r11;
1274         vcpu->regs[VCPU_REGS_R12] = regs->r12;
1275         vcpu->regs[VCPU_REGS_R13] = regs->r13;
1276         vcpu->regs[VCPU_REGS_R14] = regs->r14;
1277         vcpu->regs[VCPU_REGS_R15] = regs->r15;
1278 #endif
1279
1280         vcpu->rip = regs->rip;
1281         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
1282
1283         kvm_x86_ops->decache_regs(vcpu);
1284
1285         vcpu_put(vcpu);
1286
1287         return 0;
1288 }
1289
1290 static void get_segment(struct kvm_vcpu *vcpu,
1291                         struct kvm_segment *var, int seg)
1292 {
1293         return kvm_x86_ops->get_segment(vcpu, var, seg);
1294 }
1295
1296 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1297                                     struct kvm_sregs *sregs)
1298 {
1299         struct descriptor_table dt;
1300         int pending_vec;
1301
1302         vcpu_load(vcpu);
1303
1304         get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1305         get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1306         get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1307         get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1308         get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1309         get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1310
1311         get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1312         get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1313
1314         kvm_x86_ops->get_idt(vcpu, &dt);
1315         sregs->idt.limit = dt.limit;
1316         sregs->idt.base = dt.base;
1317         kvm_x86_ops->get_gdt(vcpu, &dt);
1318         sregs->gdt.limit = dt.limit;
1319         sregs->gdt.base = dt.base;
1320
1321         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1322         sregs->cr0 = vcpu->cr0;
1323         sregs->cr2 = vcpu->cr2;
1324         sregs->cr3 = vcpu->cr3;
1325         sregs->cr4 = vcpu->cr4;
1326         sregs->cr8 = get_cr8(vcpu);
1327         sregs->efer = vcpu->shadow_efer;
1328         sregs->apic_base = kvm_get_apic_base(vcpu);
1329
1330         if (irqchip_in_kernel(vcpu->kvm)) {
1331                 memset(sregs->interrupt_bitmap, 0,
1332                        sizeof sregs->interrupt_bitmap);
1333                 pending_vec = kvm_x86_ops->get_irq(vcpu);
1334                 if (pending_vec >= 0)
1335                         set_bit(pending_vec,
1336                                 (unsigned long *)sregs->interrupt_bitmap);
1337         } else
1338                 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
1339                        sizeof sregs->interrupt_bitmap);
1340
1341         vcpu_put(vcpu);
1342
1343         return 0;
1344 }
1345
1346 static void set_segment(struct kvm_vcpu *vcpu,
1347                         struct kvm_segment *var, int seg)
1348 {
1349         return kvm_x86_ops->set_segment(vcpu, var, seg);
1350 }
1351
1352 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1353                                     struct kvm_sregs *sregs)
1354 {
1355         int mmu_reset_needed = 0;
1356         int i, pending_vec, max_bits;
1357         struct descriptor_table dt;
1358
1359         vcpu_load(vcpu);
1360
1361         dt.limit = sregs->idt.limit;
1362         dt.base = sregs->idt.base;
1363         kvm_x86_ops->set_idt(vcpu, &dt);
1364         dt.limit = sregs->gdt.limit;
1365         dt.base = sregs->gdt.base;
1366         kvm_x86_ops->set_gdt(vcpu, &dt);
1367
1368         vcpu->cr2 = sregs->cr2;
1369         mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
1370         vcpu->cr3 = sregs->cr3;
1371
1372         set_cr8(vcpu, sregs->cr8);
1373
1374         mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1375 #ifdef CONFIG_X86_64
1376         kvm_x86_ops->set_efer(vcpu, sregs->efer);
1377 #endif
1378         kvm_set_apic_base(vcpu, sregs->apic_base);
1379
1380         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1381
1382         mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1383         vcpu->cr0 = sregs->cr0;
1384         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
1385
1386         mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1387         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
1388         if (!is_long_mode(vcpu) && is_pae(vcpu))
1389                 load_pdptrs(vcpu, vcpu->cr3);
1390
1391         if (mmu_reset_needed)
1392                 kvm_mmu_reset_context(vcpu);
1393
1394         if (!irqchip_in_kernel(vcpu->kvm)) {
1395                 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
1396                        sizeof vcpu->irq_pending);
1397                 vcpu->irq_summary = 0;
1398                 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
1399                         if (vcpu->irq_pending[i])
1400                                 __set_bit(i, &vcpu->irq_summary);
1401         } else {
1402                 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
1403                 pending_vec = find_first_bit(
1404                         (const unsigned long *)sregs->interrupt_bitmap,
1405                         max_bits);
1406                 /* Only pending external irq is handled here */
1407                 if (pending_vec < max_bits) {
1408                         kvm_x86_ops->set_irq(vcpu, pending_vec);
1409                         pr_debug("Set back pending irq %d\n",
1410                                  pending_vec);
1411                 }
1412         }
1413
1414         set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1415         set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1416         set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1417         set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1418         set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1419         set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1420
1421         set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1422         set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1423
1424         vcpu_put(vcpu);
1425
1426         return 0;
1427 }
1428
1429 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1430 {
1431         struct kvm_segment cs;
1432
1433         get_segment(vcpu, &cs, VCPU_SREG_CS);
1434         *db = cs.db;
1435         *l = cs.l;
1436 }
1437 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
1438
1439 /*
1440  * Translate a guest virtual address to a guest physical address.
1441  */
1442 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1443                                     struct kvm_translation *tr)
1444 {
1445         unsigned long vaddr = tr->linear_address;
1446         gpa_t gpa;
1447
1448         vcpu_load(vcpu);
1449         mutex_lock(&vcpu->kvm->lock);
1450         gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1451         tr->physical_address = gpa;
1452         tr->valid = gpa != UNMAPPED_GVA;
1453         tr->writeable = 1;
1454         tr->usermode = 0;
1455         mutex_unlock(&vcpu->kvm->lock);
1456         vcpu_put(vcpu);
1457
1458         return 0;
1459 }
1460
1461 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1462                                     struct kvm_interrupt *irq)
1463 {
1464         if (irq->irq < 0 || irq->irq >= 256)
1465                 return -EINVAL;
1466         if (irqchip_in_kernel(vcpu->kvm))
1467                 return -ENXIO;
1468         vcpu_load(vcpu);
1469
1470         set_bit(irq->irq, vcpu->irq_pending);
1471         set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
1472
1473         vcpu_put(vcpu);
1474
1475         return 0;
1476 }
1477
1478 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1479                                       struct kvm_debug_guest *dbg)
1480 {
1481         int r;
1482
1483         vcpu_load(vcpu);
1484
1485         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
1486
1487         vcpu_put(vcpu);
1488
1489         return r;
1490 }
1491
1492 static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
1493                                     unsigned long address,
1494                                     int *type)
1495 {
1496         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1497         unsigned long pgoff;
1498         struct page *page;
1499
1500         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1501         if (pgoff == 0)
1502                 page = virt_to_page(vcpu->run);
1503         else if (pgoff == KVM_PIO_PAGE_OFFSET)
1504                 page = virt_to_page(vcpu->pio_data);
1505         else
1506                 return NOPAGE_SIGBUS;
1507         get_page(page);
1508         if (type != NULL)
1509                 *type = VM_FAULT_MINOR;
1510
1511         return page;
1512 }
1513
1514 static struct vm_operations_struct kvm_vcpu_vm_ops = {
1515         .nopage = kvm_vcpu_nopage,
1516 };
1517
1518 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1519 {
1520         vma->vm_ops = &kvm_vcpu_vm_ops;
1521         return 0;
1522 }
1523
1524 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1525 {
1526         struct kvm_vcpu *vcpu = filp->private_data;
1527
1528         fput(vcpu->kvm->filp);
1529         return 0;
1530 }
1531
1532 static struct file_operations kvm_vcpu_fops = {
1533         .release        = kvm_vcpu_release,
1534         .unlocked_ioctl = kvm_vcpu_ioctl,
1535         .compat_ioctl   = kvm_vcpu_ioctl,
1536         .mmap           = kvm_vcpu_mmap,
1537 };
1538
1539 /*
1540  * Allocates an inode for the vcpu.
1541  */
1542 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1543 {
1544         int fd, r;
1545         struct inode *inode;
1546         struct file *file;
1547
1548         r = anon_inode_getfd(&fd, &inode, &file,
1549                              "kvm-vcpu", &kvm_vcpu_fops, vcpu);
1550         if (r)
1551                 return r;
1552         atomic_inc(&vcpu->kvm->filp->f_count);
1553         return fd;
1554 }
1555
1556 /*
1557  * Creates some virtual cpus.  Good luck creating more than one.
1558  */
1559 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1560 {
1561         int r;
1562         struct kvm_vcpu *vcpu;
1563
1564         if (!valid_vcpu(n))
1565                 return -EINVAL;
1566
1567         vcpu = kvm_x86_ops->vcpu_create(kvm, n);
1568         if (IS_ERR(vcpu))
1569                 return PTR_ERR(vcpu);
1570
1571         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1572
1573         /* We do fxsave: this must be aligned. */
1574         BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
1575
1576         vcpu_load(vcpu);
1577         r = kvm_x86_ops->vcpu_reset(vcpu);
1578         if (r == 0)
1579                 r = kvm_mmu_setup(vcpu);
1580         vcpu_put(vcpu);
1581         if (r < 0)
1582                 goto free_vcpu;
1583
1584         mutex_lock(&kvm->lock);
1585         if (kvm->vcpus[n]) {
1586                 r = -EEXIST;
1587                 mutex_unlock(&kvm->lock);
1588                 goto mmu_unload;
1589         }
1590         kvm->vcpus[n] = vcpu;
1591         mutex_unlock(&kvm->lock);
1592
1593         /* Now it's all set up, let userspace reach it */
1594         r = create_vcpu_fd(vcpu);
1595         if (r < 0)
1596                 goto unlink;
1597         return r;
1598
1599 unlink:
1600         mutex_lock(&kvm->lock);
1601         kvm->vcpus[n] = NULL;
1602         mutex_unlock(&kvm->lock);
1603
1604 mmu_unload:
1605         vcpu_load(vcpu);
1606         kvm_mmu_unload(vcpu);
1607         vcpu_put(vcpu);
1608
1609 free_vcpu:
1610         kvm_x86_ops->vcpu_free(vcpu);
1611         return r;
1612 }
1613
1614 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1615 {
1616         if (sigset) {
1617                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1618                 vcpu->sigset_active = 1;
1619                 vcpu->sigset = *sigset;
1620         } else
1621                 vcpu->sigset_active = 0;
1622         return 0;
1623 }
1624
1625 /*
1626  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
1627  * we have asm/x86/processor.h
1628  */
1629 struct fxsave {
1630         u16     cwd;
1631         u16     swd;
1632         u16     twd;
1633         u16     fop;
1634         u64     rip;
1635         u64     rdp;
1636         u32     mxcsr;
1637         u32     mxcsr_mask;
1638         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
1639 #ifdef CONFIG_X86_64
1640         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
1641 #else
1642         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
1643 #endif
1644 };
1645
1646 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1647 {
1648         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
1649
1650         vcpu_load(vcpu);
1651
1652         memcpy(fpu->fpr, fxsave->st_space, 128);
1653         fpu->fcw = fxsave->cwd;
1654         fpu->fsw = fxsave->swd;
1655         fpu->ftwx = fxsave->twd;
1656         fpu->last_opcode = fxsave->fop;
1657         fpu->last_ip = fxsave->rip;
1658         fpu->last_dp = fxsave->rdp;
1659         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
1660
1661         vcpu_put(vcpu);
1662
1663         return 0;
1664 }
1665
1666 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1667 {
1668         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
1669
1670         vcpu_load(vcpu);
1671
1672         memcpy(fxsave->st_space, fpu->fpr, 128);
1673         fxsave->cwd = fpu->fcw;
1674         fxsave->swd = fpu->fsw;
1675         fxsave->twd = fpu->ftwx;
1676         fxsave->fop = fpu->last_opcode;
1677         fxsave->rip = fpu->last_ip;
1678         fxsave->rdp = fpu->last_dp;
1679         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
1680
1681         vcpu_put(vcpu);
1682
1683         return 0;
1684 }
1685
1686 static long kvm_vcpu_ioctl(struct file *filp,
1687                            unsigned int ioctl, unsigned long arg)
1688 {
1689         struct kvm_vcpu *vcpu = filp->private_data;
1690         void __user *argp = (void __user *)arg;
1691         int r;
1692
1693         switch (ioctl) {
1694         case KVM_RUN:
1695                 r = -EINVAL;
1696                 if (arg)
1697                         goto out;
1698                 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
1699                 break;
1700         case KVM_GET_REGS: {
1701                 struct kvm_regs kvm_regs;
1702
1703                 memset(&kvm_regs, 0, sizeof kvm_regs);
1704                 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
1705                 if (r)
1706                         goto out;
1707                 r = -EFAULT;
1708                 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
1709                         goto out;
1710                 r = 0;
1711                 break;
1712         }
1713         case KVM_SET_REGS: {
1714                 struct kvm_regs kvm_regs;
1715
1716                 r = -EFAULT;
1717                 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
1718                         goto out;
1719                 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
1720                 if (r)
1721                         goto out;
1722                 r = 0;
1723                 break;
1724         }
1725         case KVM_GET_SREGS: {
1726                 struct kvm_sregs kvm_sregs;
1727
1728                 memset(&kvm_sregs, 0, sizeof kvm_sregs);
1729                 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1730                 if (r)
1731                         goto out;
1732                 r = -EFAULT;
1733                 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1734                         goto out;
1735                 r = 0;
1736                 break;
1737         }
1738         case KVM_SET_SREGS: {
1739                 struct kvm_sregs kvm_sregs;
1740
1741                 r = -EFAULT;
1742                 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1743                         goto out;
1744                 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1745                 if (r)
1746                         goto out;
1747                 r = 0;
1748                 break;
1749         }
1750         case KVM_TRANSLATE: {
1751                 struct kvm_translation tr;
1752
1753                 r = -EFAULT;
1754                 if (copy_from_user(&tr, argp, sizeof tr))
1755                         goto out;
1756                 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
1757                 if (r)
1758                         goto out;
1759                 r = -EFAULT;
1760                 if (copy_to_user(argp, &tr, sizeof tr))
1761                         goto out;
1762                 r = 0;
1763                 break;
1764         }
1765         case KVM_INTERRUPT: {
1766                 struct kvm_interrupt irq;
1767
1768                 r = -EFAULT;
1769                 if (copy_from_user(&irq, argp, sizeof irq))
1770                         goto out;
1771                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1772                 if (r)
1773                         goto out;
1774                 r = 0;
1775                 break;
1776         }
1777         case KVM_DEBUG_GUEST: {
1778                 struct kvm_debug_guest dbg;
1779
1780                 r = -EFAULT;
1781                 if (copy_from_user(&dbg, argp, sizeof dbg))
1782                         goto out;
1783                 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
1784                 if (r)
1785                         goto out;
1786                 r = 0;
1787                 break;
1788         }
1789         case KVM_SET_SIGNAL_MASK: {
1790                 struct kvm_signal_mask __user *sigmask_arg = argp;
1791                 struct kvm_signal_mask kvm_sigmask;
1792                 sigset_t sigset, *p;
1793
1794                 p = NULL;
1795                 if (argp) {
1796                         r = -EFAULT;
1797                         if (copy_from_user(&kvm_sigmask, argp,
1798                                            sizeof kvm_sigmask))
1799                                 goto out;
1800                         r = -EINVAL;
1801                         if (kvm_sigmask.len != sizeof sigset)
1802                                 goto out;
1803                         r = -EFAULT;
1804                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1805                                            sizeof sigset))
1806                                 goto out;
1807                         p = &sigset;
1808                 }
1809                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1810                 break;
1811         }
1812         case KVM_GET_FPU: {
1813                 struct kvm_fpu fpu;
1814
1815                 memset(&fpu, 0, sizeof fpu);
1816                 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
1817                 if (r)
1818                         goto out;
1819                 r = -EFAULT;
1820                 if (copy_to_user(argp, &fpu, sizeof fpu))
1821                         goto out;
1822                 r = 0;
1823                 break;
1824         }
1825         case KVM_SET_FPU: {
1826                 struct kvm_fpu fpu;
1827
1828                 r = -EFAULT;
1829                 if (copy_from_user(&fpu, argp, sizeof fpu))
1830                         goto out;
1831                 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
1832                 if (r)
1833                         goto out;
1834                 r = 0;
1835                 break;
1836         }
1837         default:
1838                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1839         }
1840 out:
1841         return r;
1842 }
1843
1844 static long kvm_vm_ioctl(struct file *filp,
1845                            unsigned int ioctl, unsigned long arg)
1846 {
1847         struct kvm *kvm = filp->private_data;
1848         void __user *argp = (void __user *)arg;
1849         int r;
1850
1851         switch (ioctl) {
1852         case KVM_CREATE_VCPU:
1853                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1854                 if (r < 0)
1855                         goto out;
1856                 break;
1857         case KVM_SET_USER_MEMORY_REGION: {
1858                 struct kvm_userspace_memory_region kvm_userspace_mem;
1859
1860                 r = -EFAULT;
1861                 if (copy_from_user(&kvm_userspace_mem, argp,
1862                                                 sizeof kvm_userspace_mem))
1863                         goto out;
1864
1865                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1866                 if (r)
1867                         goto out;
1868                 break;
1869         }
1870         case KVM_GET_DIRTY_LOG: {
1871                 struct kvm_dirty_log log;
1872
1873                 r = -EFAULT;
1874                 if (copy_from_user(&log, argp, sizeof log))
1875                         goto out;
1876                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1877                 if (r)
1878                         goto out;
1879                 break;
1880         }
1881         default:
1882                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1883         }
1884 out:
1885         return r;
1886 }
1887
1888 static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1889                                   unsigned long address,
1890                                   int *type)
1891 {
1892         struct kvm *kvm = vma->vm_file->private_data;
1893         unsigned long pgoff;
1894         struct page *page;
1895
1896         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1897         if (!kvm_is_visible_gfn(kvm, pgoff))
1898                 return NOPAGE_SIGBUS;
1899         /* current->mm->mmap_sem is already held so call lockless version */
1900         page = __gfn_to_page(kvm, pgoff);
1901         if (is_error_page(page)) {
1902                 kvm_release_page(page);
1903                 return NOPAGE_SIGBUS;
1904         }
1905         if (type != NULL)
1906                 *type = VM_FAULT_MINOR;
1907
1908         return page;
1909 }
1910
1911 static struct vm_operations_struct kvm_vm_vm_ops = {
1912         .nopage = kvm_vm_nopage,
1913 };
1914
1915 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1916 {
1917         vma->vm_ops = &kvm_vm_vm_ops;
1918         return 0;
1919 }
1920
1921 static struct file_operations kvm_vm_fops = {
1922         .release        = kvm_vm_release,
1923         .unlocked_ioctl = kvm_vm_ioctl,
1924         .compat_ioctl   = kvm_vm_ioctl,
1925         .mmap           = kvm_vm_mmap,
1926 };
1927
1928 static int kvm_dev_ioctl_create_vm(void)
1929 {
1930         int fd, r;
1931         struct inode *inode;
1932         struct file *file;
1933         struct kvm *kvm;
1934
1935         kvm = kvm_create_vm();
1936         if (IS_ERR(kvm))
1937                 return PTR_ERR(kvm);
1938         r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1939         if (r) {
1940                 kvm_destroy_vm(kvm);
1941                 return r;
1942         }
1943
1944         kvm->filp = file;
1945
1946         return fd;
1947 }
1948
1949 static long kvm_dev_ioctl(struct file *filp,
1950                           unsigned int ioctl, unsigned long arg)
1951 {
1952         void __user *argp = (void __user *)arg;
1953         long r = -EINVAL;
1954
1955         switch (ioctl) {
1956         case KVM_GET_API_VERSION:
1957                 r = -EINVAL;
1958                 if (arg)
1959                         goto out;
1960                 r = KVM_API_VERSION;
1961                 break;
1962         case KVM_CREATE_VM:
1963                 r = -EINVAL;
1964                 if (arg)
1965                         goto out;
1966                 r = kvm_dev_ioctl_create_vm();
1967                 break;
1968         case KVM_CHECK_EXTENSION: {
1969                 int ext = (long)argp;
1970
1971                 switch (ext) {
1972                 case KVM_CAP_IRQCHIP:
1973                 case KVM_CAP_HLT:
1974                 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1975                 case KVM_CAP_USER_MEMORY:
1976                 case KVM_CAP_SET_TSS_ADDR:
1977                         r = 1;
1978                         break;
1979                 default:
1980                         r = 0;
1981                         break;
1982                 }
1983                 break;
1984         }
1985         case KVM_GET_VCPU_MMAP_SIZE:
1986                 r = -EINVAL;
1987                 if (arg)
1988                         goto out;
1989                 r = 2 * PAGE_SIZE;
1990                 break;
1991         default:
1992                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1993         }
1994 out:
1995         return r;
1996 }
1997
1998 static struct file_operations kvm_chardev_ops = {
1999         .unlocked_ioctl = kvm_dev_ioctl,
2000         .compat_ioctl   = kvm_dev_ioctl,
2001 };
2002
2003 static struct miscdevice kvm_dev = {
2004         KVM_MINOR,
2005         "kvm",
2006         &kvm_chardev_ops,
2007 };
2008
2009 /*
2010  * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2011  * cached on it.
2012  */
2013 static void decache_vcpus_on_cpu(int cpu)
2014 {
2015         struct kvm *vm;
2016         struct kvm_vcpu *vcpu;
2017         int i;
2018
2019         spin_lock(&kvm_lock);
2020         list_for_each_entry(vm, &vm_list, vm_list)
2021                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2022                         vcpu = vm->vcpus[i];
2023                         if (!vcpu)
2024                                 continue;
2025                         /*
2026                          * If the vcpu is locked, then it is running on some
2027                          * other cpu and therefore it is not cached on the
2028                          * cpu in question.
2029                          *
2030                          * If it's not locked, check the last cpu it executed
2031                          * on.
2032                          */
2033                         if (mutex_trylock(&vcpu->mutex)) {
2034                                 if (vcpu->cpu == cpu) {
2035                                         kvm_x86_ops->vcpu_decache(vcpu);
2036                                         vcpu->cpu = -1;
2037                                 }
2038                                 mutex_unlock(&vcpu->mutex);
2039                         }
2040                 }
2041         spin_unlock(&kvm_lock);
2042 }
2043
2044 static void hardware_enable(void *junk)
2045 {
2046         int cpu = raw_smp_processor_id();
2047
2048         if (cpu_isset(cpu, cpus_hardware_enabled))
2049                 return;
2050         cpu_set(cpu, cpus_hardware_enabled);
2051         kvm_x86_ops->hardware_enable(NULL);
2052 }
2053
2054 static void hardware_disable(void *junk)
2055 {
2056         int cpu = raw_smp_processor_id();
2057
2058         if (!cpu_isset(cpu, cpus_hardware_enabled))
2059                 return;
2060         cpu_clear(cpu, cpus_hardware_enabled);
2061         decache_vcpus_on_cpu(cpu);
2062         kvm_x86_ops->hardware_disable(NULL);
2063 }
2064
2065 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2066                            void *v)
2067 {
2068         int cpu = (long)v;
2069
2070         switch (val) {
2071         case CPU_DYING:
2072         case CPU_DYING_FROZEN:
2073                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2074                        cpu);
2075                 hardware_disable(NULL);
2076                 break;
2077         case CPU_UP_CANCELED:
2078         case CPU_UP_CANCELED_FROZEN:
2079                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2080                        cpu);
2081                 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
2082                 break;
2083         case CPU_ONLINE:
2084         case CPU_ONLINE_FROZEN:
2085                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2086                        cpu);
2087                 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
2088                 break;
2089         }
2090         return NOTIFY_OK;
2091 }
2092
2093 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2094                       void *v)
2095 {
2096         if (val == SYS_RESTART) {
2097                 /*
2098                  * Some (well, at least mine) BIOSes hang on reboot if
2099                  * in vmx root mode.
2100                  */
2101                 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2102                 on_each_cpu(hardware_disable, NULL, 0, 1);
2103         }
2104         return NOTIFY_OK;
2105 }
2106
2107 static struct notifier_block kvm_reboot_notifier = {
2108         .notifier_call = kvm_reboot,
2109         .priority = 0,
2110 };
2111
2112 void kvm_io_bus_init(struct kvm_io_bus *bus)
2113 {
2114         memset(bus, 0, sizeof(*bus));
2115 }
2116
2117 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2118 {
2119         int i;
2120
2121         for (i = 0; i < bus->dev_count; i++) {
2122                 struct kvm_io_device *pos = bus->devs[i];
2123
2124                 kvm_iodevice_destructor(pos);
2125         }
2126 }
2127
2128 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
2129 {
2130         int i;
2131
2132         for (i = 0; i < bus->dev_count; i++) {
2133                 struct kvm_io_device *pos = bus->devs[i];
2134
2135                 if (pos->in_range(pos, addr))
2136                         return pos;
2137         }
2138
2139         return NULL;
2140 }
2141
2142 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2143 {
2144         BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2145
2146         bus->devs[bus->dev_count++] = dev;
2147 }
2148
2149 static struct notifier_block kvm_cpu_notifier = {
2150         .notifier_call = kvm_cpu_hotplug,
2151         .priority = 20, /* must be > scheduler priority */
2152 };
2153
2154 static u64 stat_get(void *_offset)
2155 {
2156         unsigned offset = (long)_offset;
2157         u64 total = 0;
2158         struct kvm *kvm;
2159         struct kvm_vcpu *vcpu;
2160         int i;
2161
2162         spin_lock(&kvm_lock);
2163         list_for_each_entry(kvm, &vm_list, vm_list)
2164                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2165                         vcpu = kvm->vcpus[i];
2166                         if (vcpu)
2167                                 total += *(u32 *)((void *)vcpu + offset);
2168                 }
2169         spin_unlock(&kvm_lock);
2170         return total;
2171 }
2172
2173 DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
2174
2175 static __init void kvm_init_debug(void)
2176 {
2177         struct kvm_stats_debugfs_item *p;
2178
2179         debugfs_dir = debugfs_create_dir("kvm", NULL);
2180         for (p = debugfs_entries; p->name; ++p)
2181                 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
2182                                                 (void *)(long)p->offset,
2183                                                 &stat_fops);
2184 }
2185
2186 static void kvm_exit_debug(void)
2187 {
2188         struct kvm_stats_debugfs_item *p;
2189
2190         for (p = debugfs_entries; p->name; ++p)
2191                 debugfs_remove(p->dentry);
2192         debugfs_remove(debugfs_dir);
2193 }
2194
2195 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2196 {
2197         hardware_disable(NULL);
2198         return 0;
2199 }
2200
2201 static int kvm_resume(struct sys_device *dev)
2202 {
2203         hardware_enable(NULL);
2204         return 0;
2205 }
2206
2207 static struct sysdev_class kvm_sysdev_class = {
2208         .name = "kvm",
2209         .suspend = kvm_suspend,
2210         .resume = kvm_resume,
2211 };
2212
2213 static struct sys_device kvm_sysdev = {
2214         .id = 0,
2215         .cls = &kvm_sysdev_class,
2216 };
2217
2218 struct page *bad_page;
2219
2220 static inline
2221 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2222 {
2223         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2224 }
2225
2226 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2227 {
2228         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2229
2230         kvm_x86_ops->vcpu_load(vcpu, cpu);
2231 }
2232
2233 static void kvm_sched_out(struct preempt_notifier *pn,
2234                           struct task_struct *next)
2235 {
2236         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2237
2238         kvm_x86_ops->vcpu_put(vcpu);
2239 }
2240
2241 int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
2242                   struct module *module)
2243 {
2244         int r;
2245         int cpu;
2246
2247         if (kvm_x86_ops) {
2248                 printk(KERN_ERR "kvm: already loaded the other module\n");
2249                 return -EEXIST;
2250         }
2251
2252         if (!ops->cpu_has_kvm_support()) {
2253                 printk(KERN_ERR "kvm: no hardware support\n");
2254                 return -EOPNOTSUPP;
2255         }
2256         if (ops->disabled_by_bios()) {
2257                 printk(KERN_ERR "kvm: disabled by bios\n");
2258                 return -EOPNOTSUPP;
2259         }
2260
2261         kvm_x86_ops = ops;
2262
2263         r = kvm_x86_ops->hardware_setup();
2264         if (r < 0)
2265                 goto out;
2266
2267         for_each_online_cpu(cpu) {
2268                 smp_call_function_single(cpu,
2269                                 kvm_x86_ops->check_processor_compatibility,
2270                                 &r, 0, 1);
2271                 if (r < 0)
2272                         goto out_free_0;
2273         }
2274
2275         on_each_cpu(hardware_enable, NULL, 0, 1);
2276         r = register_cpu_notifier(&kvm_cpu_notifier);
2277         if (r)
2278                 goto out_free_1;
2279         register_reboot_notifier(&kvm_reboot_notifier);
2280
2281         r = sysdev_class_register(&kvm_sysdev_class);
2282         if (r)
2283                 goto out_free_2;
2284
2285         r = sysdev_register(&kvm_sysdev);
2286         if (r)
2287                 goto out_free_3;
2288
2289         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2290         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2291                                            __alignof__(struct kvm_vcpu), 0, 0);
2292         if (!kvm_vcpu_cache) {
2293                 r = -ENOMEM;
2294                 goto out_free_4;
2295         }
2296
2297         kvm_chardev_ops.owner = module;
2298
2299         r = misc_register(&kvm_dev);
2300         if (r) {
2301                 printk(KERN_ERR "kvm: misc device register failed\n");
2302                 goto out_free;
2303         }
2304
2305         kvm_preempt_ops.sched_in = kvm_sched_in;
2306         kvm_preempt_ops.sched_out = kvm_sched_out;
2307
2308         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2309
2310         return 0;
2311
2312 out_free:
2313         kmem_cache_destroy(kvm_vcpu_cache);
2314 out_free_4:
2315         sysdev_unregister(&kvm_sysdev);
2316 out_free_3:
2317         sysdev_class_unregister(&kvm_sysdev_class);
2318 out_free_2:
2319         unregister_reboot_notifier(&kvm_reboot_notifier);
2320         unregister_cpu_notifier(&kvm_cpu_notifier);
2321 out_free_1:
2322         on_each_cpu(hardware_disable, NULL, 0, 1);
2323 out_free_0:
2324         kvm_x86_ops->hardware_unsetup();
2325 out:
2326         kvm_x86_ops = NULL;
2327         return r;
2328 }
2329 EXPORT_SYMBOL_GPL(kvm_init_x86);
2330
2331 void kvm_exit_x86(void)
2332 {
2333         misc_deregister(&kvm_dev);
2334         kmem_cache_destroy(kvm_vcpu_cache);
2335         sysdev_unregister(&kvm_sysdev);
2336         sysdev_class_unregister(&kvm_sysdev_class);
2337         unregister_reboot_notifier(&kvm_reboot_notifier);
2338         unregister_cpu_notifier(&kvm_cpu_notifier);
2339         on_each_cpu(hardware_disable, NULL, 0, 1);
2340         kvm_x86_ops->hardware_unsetup();
2341         kvm_x86_ops = NULL;
2342 }
2343 EXPORT_SYMBOL_GPL(kvm_exit_x86);
2344
2345 static __init int kvm_init(void)
2346 {
2347         int r;
2348
2349         r = kvm_mmu_module_init();
2350         if (r)
2351                 goto out4;
2352
2353         kvm_init_debug();
2354
2355         kvm_arch_init();
2356
2357         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2358
2359         if (bad_page == NULL) {
2360                 r = -ENOMEM;
2361                 goto out;
2362         }
2363
2364         return 0;
2365
2366 out:
2367         kvm_exit_debug();
2368         kvm_mmu_module_exit();
2369 out4:
2370         return r;
2371 }
2372
2373 static __exit void kvm_exit(void)
2374 {
2375         kvm_exit_debug();
2376         __free_page(bad_page);
2377         kvm_mmu_module_exit();
2378 }
2379
2380 module_init(kvm_init)
2381 module_exit(kvm_exit)