KVM: x86: Clear DR7.LE during task-switch
[cascardo/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/sched.h>
29 #include <linux/moduleparam.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/slab.h>
33 #include <linux/tboot.h>
34 #include <linux/hrtimer.h>
35 #include "kvm_cache_regs.h"
36 #include "x86.h"
37
38 #include <asm/io.h>
39 #include <asm/desc.h>
40 #include <asm/vmx.h>
41 #include <asm/virtext.h>
42 #include <asm/mce.h>
43 #include <asm/i387.h>
44 #include <asm/xcr.h>
45 #include <asm/perf_event.h>
46 #include <asm/debugreg.h>
47 #include <asm/kexec.h>
48
49 #include "trace.h"
50
51 #define __ex(x) __kvm_handle_fault_on_reboot(x)
52 #define __ex_clear(x, reg) \
53         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
54
55 MODULE_AUTHOR("Qumranet");
56 MODULE_LICENSE("GPL");
57
58 static const struct x86_cpu_id vmx_cpu_id[] = {
59         X86_FEATURE_MATCH(X86_FEATURE_VMX),
60         {}
61 };
62 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
63
64 static bool __read_mostly enable_vpid = 1;
65 module_param_named(vpid, enable_vpid, bool, 0444);
66
67 static bool __read_mostly flexpriority_enabled = 1;
68 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
69
70 static bool __read_mostly enable_ept = 1;
71 module_param_named(ept, enable_ept, bool, S_IRUGO);
72
73 static bool __read_mostly enable_unrestricted_guest = 1;
74 module_param_named(unrestricted_guest,
75                         enable_unrestricted_guest, bool, S_IRUGO);
76
77 static bool __read_mostly enable_ept_ad_bits = 1;
78 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
79
80 static bool __read_mostly emulate_invalid_guest_state = true;
81 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
82
83 static bool __read_mostly vmm_exclusive = 1;
84 module_param(vmm_exclusive, bool, S_IRUGO);
85
86 static bool __read_mostly fasteoi = 1;
87 module_param(fasteoi, bool, S_IRUGO);
88
89 static bool __read_mostly enable_apicv = 1;
90 module_param(enable_apicv, bool, S_IRUGO);
91
92 static bool __read_mostly enable_shadow_vmcs = 1;
93 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
94 /*
95  * If nested=1, nested virtualization is supported, i.e., guests may use
96  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
97  * use VMX instructions.
98  */
99 static bool __read_mostly nested = 0;
100 module_param(nested, bool, S_IRUGO);
101
102 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
103 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
104 #define KVM_VM_CR0_ALWAYS_ON                                            \
105         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
106 #define KVM_CR4_GUEST_OWNED_BITS                                      \
107         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
108          | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
109
110 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
111 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
112
113 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
114
115 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
116
117 /*
118  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
119  * ple_gap:    upper bound on the amount of time between two successive
120  *             executions of PAUSE in a loop. Also indicate if ple enabled.
121  *             According to test, this time is usually smaller than 128 cycles.
122  * ple_window: upper bound on the amount of time a guest is allowed to execute
123  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
124  *             less than 2^12 cycles
125  * Time is measured based on a counter that runs at the same rate as the TSC,
126  * refer SDM volume 3b section 21.6.13 & 22.1.3.
127  */
128 #define KVM_VMX_DEFAULT_PLE_GAP           128
129 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
130 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
131 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
132 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
133                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
134
135 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
136 module_param(ple_gap, int, S_IRUGO);
137
138 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
139 module_param(ple_window, int, S_IRUGO);
140
141 /* Default doubles per-vcpu window every exit. */
142 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
143 module_param(ple_window_grow, int, S_IRUGO);
144
145 /* Default resets per-vcpu window every exit to ple_window. */
146 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
147 module_param(ple_window_shrink, int, S_IRUGO);
148
149 /* Default is to compute the maximum so we can never overflow. */
150 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
151 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
152 module_param(ple_window_max, int, S_IRUGO);
153
154 extern const ulong vmx_return;
155
156 #define NR_AUTOLOAD_MSRS 8
157 #define VMCS02_POOL_SIZE 1
158
159 struct vmcs {
160         u32 revision_id;
161         u32 abort;
162         char data[0];
163 };
164
165 /*
166  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
167  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
168  * loaded on this CPU (so we can clear them if the CPU goes down).
169  */
170 struct loaded_vmcs {
171         struct vmcs *vmcs;
172         int cpu;
173         int launched;
174         struct list_head loaded_vmcss_on_cpu_link;
175 };
176
177 struct shared_msr_entry {
178         unsigned index;
179         u64 data;
180         u64 mask;
181 };
182
183 /*
184  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
185  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
186  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
187  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
188  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
189  * More than one of these structures may exist, if L1 runs multiple L2 guests.
190  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
191  * underlying hardware which will be used to run L2.
192  * This structure is packed to ensure that its layout is identical across
193  * machines (necessary for live migration).
194  * If there are changes in this struct, VMCS12_REVISION must be changed.
195  */
196 typedef u64 natural_width;
197 struct __packed vmcs12 {
198         /* According to the Intel spec, a VMCS region must start with the
199          * following two fields. Then follow implementation-specific data.
200          */
201         u32 revision_id;
202         u32 abort;
203
204         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
205         u32 padding[7]; /* room for future expansion */
206
207         u64 io_bitmap_a;
208         u64 io_bitmap_b;
209         u64 msr_bitmap;
210         u64 vm_exit_msr_store_addr;
211         u64 vm_exit_msr_load_addr;
212         u64 vm_entry_msr_load_addr;
213         u64 tsc_offset;
214         u64 virtual_apic_page_addr;
215         u64 apic_access_addr;
216         u64 ept_pointer;
217         u64 guest_physical_address;
218         u64 vmcs_link_pointer;
219         u64 guest_ia32_debugctl;
220         u64 guest_ia32_pat;
221         u64 guest_ia32_efer;
222         u64 guest_ia32_perf_global_ctrl;
223         u64 guest_pdptr0;
224         u64 guest_pdptr1;
225         u64 guest_pdptr2;
226         u64 guest_pdptr3;
227         u64 guest_bndcfgs;
228         u64 host_ia32_pat;
229         u64 host_ia32_efer;
230         u64 host_ia32_perf_global_ctrl;
231         u64 padding64[8]; /* room for future expansion */
232         /*
233          * To allow migration of L1 (complete with its L2 guests) between
234          * machines of different natural widths (32 or 64 bit), we cannot have
235          * unsigned long fields with no explict size. We use u64 (aliased
236          * natural_width) instead. Luckily, x86 is little-endian.
237          */
238         natural_width cr0_guest_host_mask;
239         natural_width cr4_guest_host_mask;
240         natural_width cr0_read_shadow;
241         natural_width cr4_read_shadow;
242         natural_width cr3_target_value0;
243         natural_width cr3_target_value1;
244         natural_width cr3_target_value2;
245         natural_width cr3_target_value3;
246         natural_width exit_qualification;
247         natural_width guest_linear_address;
248         natural_width guest_cr0;
249         natural_width guest_cr3;
250         natural_width guest_cr4;
251         natural_width guest_es_base;
252         natural_width guest_cs_base;
253         natural_width guest_ss_base;
254         natural_width guest_ds_base;
255         natural_width guest_fs_base;
256         natural_width guest_gs_base;
257         natural_width guest_ldtr_base;
258         natural_width guest_tr_base;
259         natural_width guest_gdtr_base;
260         natural_width guest_idtr_base;
261         natural_width guest_dr7;
262         natural_width guest_rsp;
263         natural_width guest_rip;
264         natural_width guest_rflags;
265         natural_width guest_pending_dbg_exceptions;
266         natural_width guest_sysenter_esp;
267         natural_width guest_sysenter_eip;
268         natural_width host_cr0;
269         natural_width host_cr3;
270         natural_width host_cr4;
271         natural_width host_fs_base;
272         natural_width host_gs_base;
273         natural_width host_tr_base;
274         natural_width host_gdtr_base;
275         natural_width host_idtr_base;
276         natural_width host_ia32_sysenter_esp;
277         natural_width host_ia32_sysenter_eip;
278         natural_width host_rsp;
279         natural_width host_rip;
280         natural_width paddingl[8]; /* room for future expansion */
281         u32 pin_based_vm_exec_control;
282         u32 cpu_based_vm_exec_control;
283         u32 exception_bitmap;
284         u32 page_fault_error_code_mask;
285         u32 page_fault_error_code_match;
286         u32 cr3_target_count;
287         u32 vm_exit_controls;
288         u32 vm_exit_msr_store_count;
289         u32 vm_exit_msr_load_count;
290         u32 vm_entry_controls;
291         u32 vm_entry_msr_load_count;
292         u32 vm_entry_intr_info_field;
293         u32 vm_entry_exception_error_code;
294         u32 vm_entry_instruction_len;
295         u32 tpr_threshold;
296         u32 secondary_vm_exec_control;
297         u32 vm_instruction_error;
298         u32 vm_exit_reason;
299         u32 vm_exit_intr_info;
300         u32 vm_exit_intr_error_code;
301         u32 idt_vectoring_info_field;
302         u32 idt_vectoring_error_code;
303         u32 vm_exit_instruction_len;
304         u32 vmx_instruction_info;
305         u32 guest_es_limit;
306         u32 guest_cs_limit;
307         u32 guest_ss_limit;
308         u32 guest_ds_limit;
309         u32 guest_fs_limit;
310         u32 guest_gs_limit;
311         u32 guest_ldtr_limit;
312         u32 guest_tr_limit;
313         u32 guest_gdtr_limit;
314         u32 guest_idtr_limit;
315         u32 guest_es_ar_bytes;
316         u32 guest_cs_ar_bytes;
317         u32 guest_ss_ar_bytes;
318         u32 guest_ds_ar_bytes;
319         u32 guest_fs_ar_bytes;
320         u32 guest_gs_ar_bytes;
321         u32 guest_ldtr_ar_bytes;
322         u32 guest_tr_ar_bytes;
323         u32 guest_interruptibility_info;
324         u32 guest_activity_state;
325         u32 guest_sysenter_cs;
326         u32 host_ia32_sysenter_cs;
327         u32 vmx_preemption_timer_value;
328         u32 padding32[7]; /* room for future expansion */
329         u16 virtual_processor_id;
330         u16 guest_es_selector;
331         u16 guest_cs_selector;
332         u16 guest_ss_selector;
333         u16 guest_ds_selector;
334         u16 guest_fs_selector;
335         u16 guest_gs_selector;
336         u16 guest_ldtr_selector;
337         u16 guest_tr_selector;
338         u16 host_es_selector;
339         u16 host_cs_selector;
340         u16 host_ss_selector;
341         u16 host_ds_selector;
342         u16 host_fs_selector;
343         u16 host_gs_selector;
344         u16 host_tr_selector;
345 };
346
347 /*
348  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
349  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
350  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
351  */
352 #define VMCS12_REVISION 0x11e57ed0
353
354 /*
355  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
356  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
357  * current implementation, 4K are reserved to avoid future complications.
358  */
359 #define VMCS12_SIZE 0x1000
360
361 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
362 struct vmcs02_list {
363         struct list_head list;
364         gpa_t vmptr;
365         struct loaded_vmcs vmcs02;
366 };
367
368 /*
369  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
370  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
371  */
372 struct nested_vmx {
373         /* Has the level1 guest done vmxon? */
374         bool vmxon;
375         gpa_t vmxon_ptr;
376
377         /* The guest-physical address of the current VMCS L1 keeps for L2 */
378         gpa_t current_vmptr;
379         /* The host-usable pointer to the above */
380         struct page *current_vmcs12_page;
381         struct vmcs12 *current_vmcs12;
382         struct vmcs *current_shadow_vmcs;
383         /*
384          * Indicates if the shadow vmcs must be updated with the
385          * data hold by vmcs12
386          */
387         bool sync_shadow_vmcs;
388
389         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
390         struct list_head vmcs02_pool;
391         int vmcs02_num;
392         u64 vmcs01_tsc_offset;
393         /* L2 must run next, and mustn't decide to exit to L1. */
394         bool nested_run_pending;
395         /*
396          * Guest pages referred to in vmcs02 with host-physical pointers, so
397          * we must keep them pinned while L2 runs.
398          */
399         struct page *apic_access_page;
400         struct page *virtual_apic_page;
401         u64 msr_ia32_feature_control;
402
403         struct hrtimer preemption_timer;
404         bool preemption_timer_expired;
405
406         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
407         u64 vmcs01_debugctl;
408 };
409
410 #define POSTED_INTR_ON  0
411 /* Posted-Interrupt Descriptor */
412 struct pi_desc {
413         u32 pir[8];     /* Posted interrupt requested */
414         u32 control;    /* bit 0 of control is outstanding notification bit */
415         u32 rsvd[7];
416 } __aligned(64);
417
418 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
419 {
420         return test_and_set_bit(POSTED_INTR_ON,
421                         (unsigned long *)&pi_desc->control);
422 }
423
424 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
425 {
426         return test_and_clear_bit(POSTED_INTR_ON,
427                         (unsigned long *)&pi_desc->control);
428 }
429
430 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
431 {
432         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
433 }
434
435 struct vcpu_vmx {
436         struct kvm_vcpu       vcpu;
437         unsigned long         host_rsp;
438         u8                    fail;
439         bool                  nmi_known_unmasked;
440         u32                   exit_intr_info;
441         u32                   idt_vectoring_info;
442         ulong                 rflags;
443         struct shared_msr_entry *guest_msrs;
444         int                   nmsrs;
445         int                   save_nmsrs;
446         unsigned long         host_idt_base;
447 #ifdef CONFIG_X86_64
448         u64                   msr_host_kernel_gs_base;
449         u64                   msr_guest_kernel_gs_base;
450 #endif
451         u32 vm_entry_controls_shadow;
452         u32 vm_exit_controls_shadow;
453         /*
454          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
455          * non-nested (L1) guest, it always points to vmcs01. For a nested
456          * guest (L2), it points to a different VMCS.
457          */
458         struct loaded_vmcs    vmcs01;
459         struct loaded_vmcs   *loaded_vmcs;
460         bool                  __launched; /* temporary, used in vmx_vcpu_run */
461         struct msr_autoload {
462                 unsigned nr;
463                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
464                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
465         } msr_autoload;
466         struct {
467                 int           loaded;
468                 u16           fs_sel, gs_sel, ldt_sel;
469 #ifdef CONFIG_X86_64
470                 u16           ds_sel, es_sel;
471 #endif
472                 int           gs_ldt_reload_needed;
473                 int           fs_reload_needed;
474                 u64           msr_host_bndcfgs;
475                 unsigned long vmcs_host_cr4;    /* May not match real cr4 */
476         } host_state;
477         struct {
478                 int vm86_active;
479                 ulong save_rflags;
480                 struct kvm_segment segs[8];
481         } rmode;
482         struct {
483                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
484                 struct kvm_save_segment {
485                         u16 selector;
486                         unsigned long base;
487                         u32 limit;
488                         u32 ar;
489                 } seg[8];
490         } segment_cache;
491         int vpid;
492         bool emulation_required;
493
494         /* Support for vnmi-less CPUs */
495         int soft_vnmi_blocked;
496         ktime_t entry_time;
497         s64 vnmi_blocked_time;
498         u32 exit_reason;
499
500         bool rdtscp_enabled;
501
502         /* Posted interrupt descriptor */
503         struct pi_desc pi_desc;
504
505         /* Support for a guest hypervisor (nested VMX) */
506         struct nested_vmx nested;
507
508         /* Dynamic PLE window. */
509         int ple_window;
510         bool ple_window_dirty;
511 };
512
513 enum segment_cache_field {
514         SEG_FIELD_SEL = 0,
515         SEG_FIELD_BASE = 1,
516         SEG_FIELD_LIMIT = 2,
517         SEG_FIELD_AR = 3,
518
519         SEG_FIELD_NR = 4
520 };
521
522 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
523 {
524         return container_of(vcpu, struct vcpu_vmx, vcpu);
525 }
526
527 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
528 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
529 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
530                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
531
532
533 static unsigned long shadow_read_only_fields[] = {
534         /*
535          * We do NOT shadow fields that are modified when L0
536          * traps and emulates any vmx instruction (e.g. VMPTRLD,
537          * VMXON...) executed by L1.
538          * For example, VM_INSTRUCTION_ERROR is read
539          * by L1 if a vmx instruction fails (part of the error path).
540          * Note the code assumes this logic. If for some reason
541          * we start shadowing these fields then we need to
542          * force a shadow sync when L0 emulates vmx instructions
543          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
544          * by nested_vmx_failValid)
545          */
546         VM_EXIT_REASON,
547         VM_EXIT_INTR_INFO,
548         VM_EXIT_INSTRUCTION_LEN,
549         IDT_VECTORING_INFO_FIELD,
550         IDT_VECTORING_ERROR_CODE,
551         VM_EXIT_INTR_ERROR_CODE,
552         EXIT_QUALIFICATION,
553         GUEST_LINEAR_ADDRESS,
554         GUEST_PHYSICAL_ADDRESS
555 };
556 static int max_shadow_read_only_fields =
557         ARRAY_SIZE(shadow_read_only_fields);
558
559 static unsigned long shadow_read_write_fields[] = {
560         TPR_THRESHOLD,
561         GUEST_RIP,
562         GUEST_RSP,
563         GUEST_CR0,
564         GUEST_CR3,
565         GUEST_CR4,
566         GUEST_INTERRUPTIBILITY_INFO,
567         GUEST_RFLAGS,
568         GUEST_CS_SELECTOR,
569         GUEST_CS_AR_BYTES,
570         GUEST_CS_LIMIT,
571         GUEST_CS_BASE,
572         GUEST_ES_BASE,
573         GUEST_BNDCFGS,
574         CR0_GUEST_HOST_MASK,
575         CR0_READ_SHADOW,
576         CR4_READ_SHADOW,
577         TSC_OFFSET,
578         EXCEPTION_BITMAP,
579         CPU_BASED_VM_EXEC_CONTROL,
580         VM_ENTRY_EXCEPTION_ERROR_CODE,
581         VM_ENTRY_INTR_INFO_FIELD,
582         VM_ENTRY_INSTRUCTION_LEN,
583         VM_ENTRY_EXCEPTION_ERROR_CODE,
584         HOST_FS_BASE,
585         HOST_GS_BASE,
586         HOST_FS_SELECTOR,
587         HOST_GS_SELECTOR
588 };
589 static int max_shadow_read_write_fields =
590         ARRAY_SIZE(shadow_read_write_fields);
591
592 static const unsigned short vmcs_field_to_offset_table[] = {
593         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
594         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
595         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
596         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
597         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
598         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
599         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
600         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
601         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
602         FIELD(HOST_ES_SELECTOR, host_es_selector),
603         FIELD(HOST_CS_SELECTOR, host_cs_selector),
604         FIELD(HOST_SS_SELECTOR, host_ss_selector),
605         FIELD(HOST_DS_SELECTOR, host_ds_selector),
606         FIELD(HOST_FS_SELECTOR, host_fs_selector),
607         FIELD(HOST_GS_SELECTOR, host_gs_selector),
608         FIELD(HOST_TR_SELECTOR, host_tr_selector),
609         FIELD64(IO_BITMAP_A, io_bitmap_a),
610         FIELD64(IO_BITMAP_B, io_bitmap_b),
611         FIELD64(MSR_BITMAP, msr_bitmap),
612         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
613         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
614         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
615         FIELD64(TSC_OFFSET, tsc_offset),
616         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
617         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
618         FIELD64(EPT_POINTER, ept_pointer),
619         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
620         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
621         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
622         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
623         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
624         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
625         FIELD64(GUEST_PDPTR0, guest_pdptr0),
626         FIELD64(GUEST_PDPTR1, guest_pdptr1),
627         FIELD64(GUEST_PDPTR2, guest_pdptr2),
628         FIELD64(GUEST_PDPTR3, guest_pdptr3),
629         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
630         FIELD64(HOST_IA32_PAT, host_ia32_pat),
631         FIELD64(HOST_IA32_EFER, host_ia32_efer),
632         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
633         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
634         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
635         FIELD(EXCEPTION_BITMAP, exception_bitmap),
636         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
637         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
638         FIELD(CR3_TARGET_COUNT, cr3_target_count),
639         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
640         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
641         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
642         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
643         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
644         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
645         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
646         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
647         FIELD(TPR_THRESHOLD, tpr_threshold),
648         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
649         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
650         FIELD(VM_EXIT_REASON, vm_exit_reason),
651         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
652         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
653         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
654         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
655         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
656         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
657         FIELD(GUEST_ES_LIMIT, guest_es_limit),
658         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
659         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
660         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
661         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
662         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
663         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
664         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
665         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
666         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
667         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
668         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
669         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
670         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
671         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
672         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
673         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
674         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
675         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
676         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
677         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
678         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
679         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
680         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
681         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
682         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
683         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
684         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
685         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
686         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
687         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
688         FIELD(EXIT_QUALIFICATION, exit_qualification),
689         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
690         FIELD(GUEST_CR0, guest_cr0),
691         FIELD(GUEST_CR3, guest_cr3),
692         FIELD(GUEST_CR4, guest_cr4),
693         FIELD(GUEST_ES_BASE, guest_es_base),
694         FIELD(GUEST_CS_BASE, guest_cs_base),
695         FIELD(GUEST_SS_BASE, guest_ss_base),
696         FIELD(GUEST_DS_BASE, guest_ds_base),
697         FIELD(GUEST_FS_BASE, guest_fs_base),
698         FIELD(GUEST_GS_BASE, guest_gs_base),
699         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
700         FIELD(GUEST_TR_BASE, guest_tr_base),
701         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
702         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
703         FIELD(GUEST_DR7, guest_dr7),
704         FIELD(GUEST_RSP, guest_rsp),
705         FIELD(GUEST_RIP, guest_rip),
706         FIELD(GUEST_RFLAGS, guest_rflags),
707         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
708         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
709         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
710         FIELD(HOST_CR0, host_cr0),
711         FIELD(HOST_CR3, host_cr3),
712         FIELD(HOST_CR4, host_cr4),
713         FIELD(HOST_FS_BASE, host_fs_base),
714         FIELD(HOST_GS_BASE, host_gs_base),
715         FIELD(HOST_TR_BASE, host_tr_base),
716         FIELD(HOST_GDTR_BASE, host_gdtr_base),
717         FIELD(HOST_IDTR_BASE, host_idtr_base),
718         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
719         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
720         FIELD(HOST_RSP, host_rsp),
721         FIELD(HOST_RIP, host_rip),
722 };
723 static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
724
725 static inline short vmcs_field_to_offset(unsigned long field)
726 {
727         if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
728                 return -1;
729         return vmcs_field_to_offset_table[field];
730 }
731
732 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
733 {
734         return to_vmx(vcpu)->nested.current_vmcs12;
735 }
736
737 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
738 {
739         struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
740         if (is_error_page(page))
741                 return NULL;
742
743         return page;
744 }
745
746 static void nested_release_page(struct page *page)
747 {
748         kvm_release_page_dirty(page);
749 }
750
751 static void nested_release_page_clean(struct page *page)
752 {
753         kvm_release_page_clean(page);
754 }
755
756 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
757 static u64 construct_eptp(unsigned long root_hpa);
758 static void kvm_cpu_vmxon(u64 addr);
759 static void kvm_cpu_vmxoff(void);
760 static bool vmx_mpx_supported(void);
761 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
762 static void vmx_set_segment(struct kvm_vcpu *vcpu,
763                             struct kvm_segment *var, int seg);
764 static void vmx_get_segment(struct kvm_vcpu *vcpu,
765                             struct kvm_segment *var, int seg);
766 static bool guest_state_valid(struct kvm_vcpu *vcpu);
767 static u32 vmx_segment_access_rights(struct kvm_segment *var);
768 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
769 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
770 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
771 static int alloc_identity_pagetable(struct kvm *kvm);
772
773 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
774 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
775 /*
776  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
777  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
778  */
779 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
780 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
781
782 static unsigned long *vmx_io_bitmap_a;
783 static unsigned long *vmx_io_bitmap_b;
784 static unsigned long *vmx_msr_bitmap_legacy;
785 static unsigned long *vmx_msr_bitmap_longmode;
786 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
787 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
788 static unsigned long *vmx_vmread_bitmap;
789 static unsigned long *vmx_vmwrite_bitmap;
790
791 static bool cpu_has_load_ia32_efer;
792 static bool cpu_has_load_perf_global_ctrl;
793
794 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
795 static DEFINE_SPINLOCK(vmx_vpid_lock);
796
797 static struct vmcs_config {
798         int size;
799         int order;
800         u32 revision_id;
801         u32 pin_based_exec_ctrl;
802         u32 cpu_based_exec_ctrl;
803         u32 cpu_based_2nd_exec_ctrl;
804         u32 vmexit_ctrl;
805         u32 vmentry_ctrl;
806 } vmcs_config;
807
808 static struct vmx_capability {
809         u32 ept;
810         u32 vpid;
811 } vmx_capability;
812
813 #define VMX_SEGMENT_FIELD(seg)                                  \
814         [VCPU_SREG_##seg] = {                                   \
815                 .selector = GUEST_##seg##_SELECTOR,             \
816                 .base = GUEST_##seg##_BASE,                     \
817                 .limit = GUEST_##seg##_LIMIT,                   \
818                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
819         }
820
821 static const struct kvm_vmx_segment_field {
822         unsigned selector;
823         unsigned base;
824         unsigned limit;
825         unsigned ar_bytes;
826 } kvm_vmx_segment_fields[] = {
827         VMX_SEGMENT_FIELD(CS),
828         VMX_SEGMENT_FIELD(DS),
829         VMX_SEGMENT_FIELD(ES),
830         VMX_SEGMENT_FIELD(FS),
831         VMX_SEGMENT_FIELD(GS),
832         VMX_SEGMENT_FIELD(SS),
833         VMX_SEGMENT_FIELD(TR),
834         VMX_SEGMENT_FIELD(LDTR),
835 };
836
837 static u64 host_efer;
838
839 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
840
841 /*
842  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
843  * away by decrementing the array size.
844  */
845 static const u32 vmx_msr_index[] = {
846 #ifdef CONFIG_X86_64
847         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
848 #endif
849         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
850 };
851
852 static inline bool is_page_fault(u32 intr_info)
853 {
854         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
855                              INTR_INFO_VALID_MASK)) ==
856                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
857 }
858
859 static inline bool is_no_device(u32 intr_info)
860 {
861         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
862                              INTR_INFO_VALID_MASK)) ==
863                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
864 }
865
866 static inline bool is_invalid_opcode(u32 intr_info)
867 {
868         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
869                              INTR_INFO_VALID_MASK)) ==
870                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
871 }
872
873 static inline bool is_external_interrupt(u32 intr_info)
874 {
875         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
876                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
877 }
878
879 static inline bool is_machine_check(u32 intr_info)
880 {
881         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
882                              INTR_INFO_VALID_MASK)) ==
883                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
884 }
885
886 static inline bool cpu_has_vmx_msr_bitmap(void)
887 {
888         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
889 }
890
891 static inline bool cpu_has_vmx_tpr_shadow(void)
892 {
893         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
894 }
895
896 static inline bool vm_need_tpr_shadow(struct kvm *kvm)
897 {
898         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
899 }
900
901 static inline bool cpu_has_secondary_exec_ctrls(void)
902 {
903         return vmcs_config.cpu_based_exec_ctrl &
904                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
905 }
906
907 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
908 {
909         return vmcs_config.cpu_based_2nd_exec_ctrl &
910                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
911 }
912
913 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
914 {
915         return vmcs_config.cpu_based_2nd_exec_ctrl &
916                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
917 }
918
919 static inline bool cpu_has_vmx_apic_register_virt(void)
920 {
921         return vmcs_config.cpu_based_2nd_exec_ctrl &
922                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
923 }
924
925 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
926 {
927         return vmcs_config.cpu_based_2nd_exec_ctrl &
928                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
929 }
930
931 static inline bool cpu_has_vmx_posted_intr(void)
932 {
933         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
934 }
935
936 static inline bool cpu_has_vmx_apicv(void)
937 {
938         return cpu_has_vmx_apic_register_virt() &&
939                 cpu_has_vmx_virtual_intr_delivery() &&
940                 cpu_has_vmx_posted_intr();
941 }
942
943 static inline bool cpu_has_vmx_flexpriority(void)
944 {
945         return cpu_has_vmx_tpr_shadow() &&
946                 cpu_has_vmx_virtualize_apic_accesses();
947 }
948
949 static inline bool cpu_has_vmx_ept_execute_only(void)
950 {
951         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
952 }
953
954 static inline bool cpu_has_vmx_eptp_uncacheable(void)
955 {
956         return vmx_capability.ept & VMX_EPTP_UC_BIT;
957 }
958
959 static inline bool cpu_has_vmx_eptp_writeback(void)
960 {
961         return vmx_capability.ept & VMX_EPTP_WB_BIT;
962 }
963
964 static inline bool cpu_has_vmx_ept_2m_page(void)
965 {
966         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
967 }
968
969 static inline bool cpu_has_vmx_ept_1g_page(void)
970 {
971         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
972 }
973
974 static inline bool cpu_has_vmx_ept_4levels(void)
975 {
976         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
977 }
978
979 static inline bool cpu_has_vmx_ept_ad_bits(void)
980 {
981         return vmx_capability.ept & VMX_EPT_AD_BIT;
982 }
983
984 static inline bool cpu_has_vmx_invept_context(void)
985 {
986         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
987 }
988
989 static inline bool cpu_has_vmx_invept_global(void)
990 {
991         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
992 }
993
994 static inline bool cpu_has_vmx_invvpid_single(void)
995 {
996         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
997 }
998
999 static inline bool cpu_has_vmx_invvpid_global(void)
1000 {
1001         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1002 }
1003
1004 static inline bool cpu_has_vmx_ept(void)
1005 {
1006         return vmcs_config.cpu_based_2nd_exec_ctrl &
1007                 SECONDARY_EXEC_ENABLE_EPT;
1008 }
1009
1010 static inline bool cpu_has_vmx_unrestricted_guest(void)
1011 {
1012         return vmcs_config.cpu_based_2nd_exec_ctrl &
1013                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1014 }
1015
1016 static inline bool cpu_has_vmx_ple(void)
1017 {
1018         return vmcs_config.cpu_based_2nd_exec_ctrl &
1019                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1020 }
1021
1022 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
1023 {
1024         return flexpriority_enabled && irqchip_in_kernel(kvm);
1025 }
1026
1027 static inline bool cpu_has_vmx_vpid(void)
1028 {
1029         return vmcs_config.cpu_based_2nd_exec_ctrl &
1030                 SECONDARY_EXEC_ENABLE_VPID;
1031 }
1032
1033 static inline bool cpu_has_vmx_rdtscp(void)
1034 {
1035         return vmcs_config.cpu_based_2nd_exec_ctrl &
1036                 SECONDARY_EXEC_RDTSCP;
1037 }
1038
1039 static inline bool cpu_has_vmx_invpcid(void)
1040 {
1041         return vmcs_config.cpu_based_2nd_exec_ctrl &
1042                 SECONDARY_EXEC_ENABLE_INVPCID;
1043 }
1044
1045 static inline bool cpu_has_virtual_nmis(void)
1046 {
1047         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1048 }
1049
1050 static inline bool cpu_has_vmx_wbinvd_exit(void)
1051 {
1052         return vmcs_config.cpu_based_2nd_exec_ctrl &
1053                 SECONDARY_EXEC_WBINVD_EXITING;
1054 }
1055
1056 static inline bool cpu_has_vmx_shadow_vmcs(void)
1057 {
1058         u64 vmx_msr;
1059         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1060         /* check if the cpu supports writing r/o exit information fields */
1061         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1062                 return false;
1063
1064         return vmcs_config.cpu_based_2nd_exec_ctrl &
1065                 SECONDARY_EXEC_SHADOW_VMCS;
1066 }
1067
1068 static inline bool report_flexpriority(void)
1069 {
1070         return flexpriority_enabled;
1071 }
1072
1073 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1074 {
1075         return vmcs12->cpu_based_vm_exec_control & bit;
1076 }
1077
1078 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1079 {
1080         return (vmcs12->cpu_based_vm_exec_control &
1081                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1082                 (vmcs12->secondary_vm_exec_control & bit);
1083 }
1084
1085 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1086 {
1087         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1088 }
1089
1090 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1091 {
1092         return vmcs12->pin_based_vm_exec_control &
1093                 PIN_BASED_VMX_PREEMPTION_TIMER;
1094 }
1095
1096 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1097 {
1098         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1099 }
1100
1101 static inline bool is_exception(u32 intr_info)
1102 {
1103         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1104                 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1105 }
1106
1107 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1108                               u32 exit_intr_info,
1109                               unsigned long exit_qualification);
1110 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1111                         struct vmcs12 *vmcs12,
1112                         u32 reason, unsigned long qualification);
1113
1114 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1115 {
1116         int i;
1117
1118         for (i = 0; i < vmx->nmsrs; ++i)
1119                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1120                         return i;
1121         return -1;
1122 }
1123
1124 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1125 {
1126     struct {
1127         u64 vpid : 16;
1128         u64 rsvd : 48;
1129         u64 gva;
1130     } operand = { vpid, 0, gva };
1131
1132     asm volatile (__ex(ASM_VMX_INVVPID)
1133                   /* CF==1 or ZF==1 --> rc = -1 */
1134                   "; ja 1f ; ud2 ; 1:"
1135                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1136 }
1137
1138 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1139 {
1140         struct {
1141                 u64 eptp, gpa;
1142         } operand = {eptp, gpa};
1143
1144         asm volatile (__ex(ASM_VMX_INVEPT)
1145                         /* CF==1 or ZF==1 --> rc = -1 */
1146                         "; ja 1f ; ud2 ; 1:\n"
1147                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1148 }
1149
1150 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1151 {
1152         int i;
1153
1154         i = __find_msr_index(vmx, msr);
1155         if (i >= 0)
1156                 return &vmx->guest_msrs[i];
1157         return NULL;
1158 }
1159
1160 static void vmcs_clear(struct vmcs *vmcs)
1161 {
1162         u64 phys_addr = __pa(vmcs);
1163         u8 error;
1164
1165         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1166                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1167                       : "cc", "memory");
1168         if (error)
1169                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1170                        vmcs, phys_addr);
1171 }
1172
1173 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1174 {
1175         vmcs_clear(loaded_vmcs->vmcs);
1176         loaded_vmcs->cpu = -1;
1177         loaded_vmcs->launched = 0;
1178 }
1179
1180 static void vmcs_load(struct vmcs *vmcs)
1181 {
1182         u64 phys_addr = __pa(vmcs);
1183         u8 error;
1184
1185         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1186                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1187                         : "cc", "memory");
1188         if (error)
1189                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1190                        vmcs, phys_addr);
1191 }
1192
1193 #ifdef CONFIG_KEXEC
1194 /*
1195  * This bitmap is used to indicate whether the vmclear
1196  * operation is enabled on all cpus. All disabled by
1197  * default.
1198  */
1199 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1200
1201 static inline void crash_enable_local_vmclear(int cpu)
1202 {
1203         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1204 }
1205
1206 static inline void crash_disable_local_vmclear(int cpu)
1207 {
1208         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1209 }
1210
1211 static inline int crash_local_vmclear_enabled(int cpu)
1212 {
1213         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1214 }
1215
1216 static void crash_vmclear_local_loaded_vmcss(void)
1217 {
1218         int cpu = raw_smp_processor_id();
1219         struct loaded_vmcs *v;
1220
1221         if (!crash_local_vmclear_enabled(cpu))
1222                 return;
1223
1224         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1225                             loaded_vmcss_on_cpu_link)
1226                 vmcs_clear(v->vmcs);
1227 }
1228 #else
1229 static inline void crash_enable_local_vmclear(int cpu) { }
1230 static inline void crash_disable_local_vmclear(int cpu) { }
1231 #endif /* CONFIG_KEXEC */
1232
1233 static void __loaded_vmcs_clear(void *arg)
1234 {
1235         struct loaded_vmcs *loaded_vmcs = arg;
1236         int cpu = raw_smp_processor_id();
1237
1238         if (loaded_vmcs->cpu != cpu)
1239                 return; /* vcpu migration can race with cpu offline */
1240         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1241                 per_cpu(current_vmcs, cpu) = NULL;
1242         crash_disable_local_vmclear(cpu);
1243         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1244
1245         /*
1246          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1247          * is before setting loaded_vmcs->vcpu to -1 which is done in
1248          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1249          * then adds the vmcs into percpu list before it is deleted.
1250          */
1251         smp_wmb();
1252
1253         loaded_vmcs_init(loaded_vmcs);
1254         crash_enable_local_vmclear(cpu);
1255 }
1256
1257 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1258 {
1259         int cpu = loaded_vmcs->cpu;
1260
1261         if (cpu != -1)
1262                 smp_call_function_single(cpu,
1263                          __loaded_vmcs_clear, loaded_vmcs, 1);
1264 }
1265
1266 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
1267 {
1268         if (vmx->vpid == 0)
1269                 return;
1270
1271         if (cpu_has_vmx_invvpid_single())
1272                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
1273 }
1274
1275 static inline void vpid_sync_vcpu_global(void)
1276 {
1277         if (cpu_has_vmx_invvpid_global())
1278                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1279 }
1280
1281 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1282 {
1283         if (cpu_has_vmx_invvpid_single())
1284                 vpid_sync_vcpu_single(vmx);
1285         else
1286                 vpid_sync_vcpu_global();
1287 }
1288
1289 static inline void ept_sync_global(void)
1290 {
1291         if (cpu_has_vmx_invept_global())
1292                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1293 }
1294
1295 static inline void ept_sync_context(u64 eptp)
1296 {
1297         if (enable_ept) {
1298                 if (cpu_has_vmx_invept_context())
1299                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1300                 else
1301                         ept_sync_global();
1302         }
1303 }
1304
1305 static __always_inline unsigned long vmcs_readl(unsigned long field)
1306 {
1307         unsigned long value;
1308
1309         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1310                       : "=a"(value) : "d"(field) : "cc");
1311         return value;
1312 }
1313
1314 static __always_inline u16 vmcs_read16(unsigned long field)
1315 {
1316         return vmcs_readl(field);
1317 }
1318
1319 static __always_inline u32 vmcs_read32(unsigned long field)
1320 {
1321         return vmcs_readl(field);
1322 }
1323
1324 static __always_inline u64 vmcs_read64(unsigned long field)
1325 {
1326 #ifdef CONFIG_X86_64
1327         return vmcs_readl(field);
1328 #else
1329         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1330 #endif
1331 }
1332
1333 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1334 {
1335         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1336                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1337         dump_stack();
1338 }
1339
1340 static void vmcs_writel(unsigned long field, unsigned long value)
1341 {
1342         u8 error;
1343
1344         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1345                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1346         if (unlikely(error))
1347                 vmwrite_error(field, value);
1348 }
1349
1350 static void vmcs_write16(unsigned long field, u16 value)
1351 {
1352         vmcs_writel(field, value);
1353 }
1354
1355 static void vmcs_write32(unsigned long field, u32 value)
1356 {
1357         vmcs_writel(field, value);
1358 }
1359
1360 static void vmcs_write64(unsigned long field, u64 value)
1361 {
1362         vmcs_writel(field, value);
1363 #ifndef CONFIG_X86_64
1364         asm volatile ("");
1365         vmcs_writel(field+1, value >> 32);
1366 #endif
1367 }
1368
1369 static void vmcs_clear_bits(unsigned long field, u32 mask)
1370 {
1371         vmcs_writel(field, vmcs_readl(field) & ~mask);
1372 }
1373
1374 static void vmcs_set_bits(unsigned long field, u32 mask)
1375 {
1376         vmcs_writel(field, vmcs_readl(field) | mask);
1377 }
1378
1379 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1380 {
1381         vmcs_write32(VM_ENTRY_CONTROLS, val);
1382         vmx->vm_entry_controls_shadow = val;
1383 }
1384
1385 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1386 {
1387         if (vmx->vm_entry_controls_shadow != val)
1388                 vm_entry_controls_init(vmx, val);
1389 }
1390
1391 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1392 {
1393         return vmx->vm_entry_controls_shadow;
1394 }
1395
1396
1397 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1398 {
1399         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1400 }
1401
1402 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1403 {
1404         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1405 }
1406
1407 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1408 {
1409         vmcs_write32(VM_EXIT_CONTROLS, val);
1410         vmx->vm_exit_controls_shadow = val;
1411 }
1412
1413 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1414 {
1415         if (vmx->vm_exit_controls_shadow != val)
1416                 vm_exit_controls_init(vmx, val);
1417 }
1418
1419 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1420 {
1421         return vmx->vm_exit_controls_shadow;
1422 }
1423
1424
1425 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1426 {
1427         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1428 }
1429
1430 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1431 {
1432         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1433 }
1434
1435 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1436 {
1437         vmx->segment_cache.bitmask = 0;
1438 }
1439
1440 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1441                                        unsigned field)
1442 {
1443         bool ret;
1444         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1445
1446         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1447                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1448                 vmx->segment_cache.bitmask = 0;
1449         }
1450         ret = vmx->segment_cache.bitmask & mask;
1451         vmx->segment_cache.bitmask |= mask;
1452         return ret;
1453 }
1454
1455 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1456 {
1457         u16 *p = &vmx->segment_cache.seg[seg].selector;
1458
1459         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1460                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1461         return *p;
1462 }
1463
1464 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1465 {
1466         ulong *p = &vmx->segment_cache.seg[seg].base;
1467
1468         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1469                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1470         return *p;
1471 }
1472
1473 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1474 {
1475         u32 *p = &vmx->segment_cache.seg[seg].limit;
1476
1477         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1478                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1479         return *p;
1480 }
1481
1482 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1483 {
1484         u32 *p = &vmx->segment_cache.seg[seg].ar;
1485
1486         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1487                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1488         return *p;
1489 }
1490
1491 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1492 {
1493         u32 eb;
1494
1495         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1496              (1u << NM_VECTOR) | (1u << DB_VECTOR);
1497         if ((vcpu->guest_debug &
1498              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1499             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1500                 eb |= 1u << BP_VECTOR;
1501         if (to_vmx(vcpu)->rmode.vm86_active)
1502                 eb = ~0;
1503         if (enable_ept)
1504                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1505         if (vcpu->fpu_active)
1506                 eb &= ~(1u << NM_VECTOR);
1507
1508         /* When we are running a nested L2 guest and L1 specified for it a
1509          * certain exception bitmap, we must trap the same exceptions and pass
1510          * them to L1. When running L2, we will only handle the exceptions
1511          * specified above if L1 did not want them.
1512          */
1513         if (is_guest_mode(vcpu))
1514                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1515
1516         vmcs_write32(EXCEPTION_BITMAP, eb);
1517 }
1518
1519 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1520                 unsigned long entry, unsigned long exit)
1521 {
1522         vm_entry_controls_clearbit(vmx, entry);
1523         vm_exit_controls_clearbit(vmx, exit);
1524 }
1525
1526 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1527 {
1528         unsigned i;
1529         struct msr_autoload *m = &vmx->msr_autoload;
1530
1531         switch (msr) {
1532         case MSR_EFER:
1533                 if (cpu_has_load_ia32_efer) {
1534                         clear_atomic_switch_msr_special(vmx,
1535                                         VM_ENTRY_LOAD_IA32_EFER,
1536                                         VM_EXIT_LOAD_IA32_EFER);
1537                         return;
1538                 }
1539                 break;
1540         case MSR_CORE_PERF_GLOBAL_CTRL:
1541                 if (cpu_has_load_perf_global_ctrl) {
1542                         clear_atomic_switch_msr_special(vmx,
1543                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1544                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1545                         return;
1546                 }
1547                 break;
1548         }
1549
1550         for (i = 0; i < m->nr; ++i)
1551                 if (m->guest[i].index == msr)
1552                         break;
1553
1554         if (i == m->nr)
1555                 return;
1556         --m->nr;
1557         m->guest[i] = m->guest[m->nr];
1558         m->host[i] = m->host[m->nr];
1559         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1560         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1561 }
1562
1563 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1564                 unsigned long entry, unsigned long exit,
1565                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1566                 u64 guest_val, u64 host_val)
1567 {
1568         vmcs_write64(guest_val_vmcs, guest_val);
1569         vmcs_write64(host_val_vmcs, host_val);
1570         vm_entry_controls_setbit(vmx, entry);
1571         vm_exit_controls_setbit(vmx, exit);
1572 }
1573
1574 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1575                                   u64 guest_val, u64 host_val)
1576 {
1577         unsigned i;
1578         struct msr_autoload *m = &vmx->msr_autoload;
1579
1580         switch (msr) {
1581         case MSR_EFER:
1582                 if (cpu_has_load_ia32_efer) {
1583                         add_atomic_switch_msr_special(vmx,
1584                                         VM_ENTRY_LOAD_IA32_EFER,
1585                                         VM_EXIT_LOAD_IA32_EFER,
1586                                         GUEST_IA32_EFER,
1587                                         HOST_IA32_EFER,
1588                                         guest_val, host_val);
1589                         return;
1590                 }
1591                 break;
1592         case MSR_CORE_PERF_GLOBAL_CTRL:
1593                 if (cpu_has_load_perf_global_ctrl) {
1594                         add_atomic_switch_msr_special(vmx,
1595                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1596                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1597                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1598                                         HOST_IA32_PERF_GLOBAL_CTRL,
1599                                         guest_val, host_val);
1600                         return;
1601                 }
1602                 break;
1603         }
1604
1605         for (i = 0; i < m->nr; ++i)
1606                 if (m->guest[i].index == msr)
1607                         break;
1608
1609         if (i == NR_AUTOLOAD_MSRS) {
1610                 printk_once(KERN_WARNING "Not enough msr switch entries. "
1611                                 "Can't add msr %x\n", msr);
1612                 return;
1613         } else if (i == m->nr) {
1614                 ++m->nr;
1615                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1616                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1617         }
1618
1619         m->guest[i].index = msr;
1620         m->guest[i].value = guest_val;
1621         m->host[i].index = msr;
1622         m->host[i].value = host_val;
1623 }
1624
1625 static void reload_tss(void)
1626 {
1627         /*
1628          * VT restores TR but not its size.  Useless.
1629          */
1630         struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1631         struct desc_struct *descs;
1632
1633         descs = (void *)gdt->address;
1634         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1635         load_TR_desc();
1636 }
1637
1638 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1639 {
1640         u64 guest_efer;
1641         u64 ignore_bits;
1642
1643         guest_efer = vmx->vcpu.arch.efer;
1644
1645         /*
1646          * NX is emulated; LMA and LME handled by hardware; SCE meaningless
1647          * outside long mode
1648          */
1649         ignore_bits = EFER_NX | EFER_SCE;
1650 #ifdef CONFIG_X86_64
1651         ignore_bits |= EFER_LMA | EFER_LME;
1652         /* SCE is meaningful only in long mode on Intel */
1653         if (guest_efer & EFER_LMA)
1654                 ignore_bits &= ~(u64)EFER_SCE;
1655 #endif
1656         guest_efer &= ~ignore_bits;
1657         guest_efer |= host_efer & ignore_bits;
1658         vmx->guest_msrs[efer_offset].data = guest_efer;
1659         vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1660
1661         clear_atomic_switch_msr(vmx, MSR_EFER);
1662         /* On ept, can't emulate nx, and must switch nx atomically */
1663         if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1664                 guest_efer = vmx->vcpu.arch.efer;
1665                 if (!(guest_efer & EFER_LMA))
1666                         guest_efer &= ~EFER_LME;
1667                 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1668                 return false;
1669         }
1670
1671         return true;
1672 }
1673
1674 static unsigned long segment_base(u16 selector)
1675 {
1676         struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1677         struct desc_struct *d;
1678         unsigned long table_base;
1679         unsigned long v;
1680
1681         if (!(selector & ~3))
1682                 return 0;
1683
1684         table_base = gdt->address;
1685
1686         if (selector & 4) {           /* from ldt */
1687                 u16 ldt_selector = kvm_read_ldt();
1688
1689                 if (!(ldt_selector & ~3))
1690                         return 0;
1691
1692                 table_base = segment_base(ldt_selector);
1693         }
1694         d = (struct desc_struct *)(table_base + (selector & ~7));
1695         v = get_desc_base(d);
1696 #ifdef CONFIG_X86_64
1697        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1698                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1699 #endif
1700         return v;
1701 }
1702
1703 static inline unsigned long kvm_read_tr_base(void)
1704 {
1705         u16 tr;
1706         asm("str %0" : "=g"(tr));
1707         return segment_base(tr);
1708 }
1709
1710 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1711 {
1712         struct vcpu_vmx *vmx = to_vmx(vcpu);
1713         int i;
1714
1715         if (vmx->host_state.loaded)
1716                 return;
1717
1718         vmx->host_state.loaded = 1;
1719         /*
1720          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1721          * allow segment selectors with cpl > 0 or ti == 1.
1722          */
1723         vmx->host_state.ldt_sel = kvm_read_ldt();
1724         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1725         savesegment(fs, vmx->host_state.fs_sel);
1726         if (!(vmx->host_state.fs_sel & 7)) {
1727                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1728                 vmx->host_state.fs_reload_needed = 0;
1729         } else {
1730                 vmcs_write16(HOST_FS_SELECTOR, 0);
1731                 vmx->host_state.fs_reload_needed = 1;
1732         }
1733         savesegment(gs, vmx->host_state.gs_sel);
1734         if (!(vmx->host_state.gs_sel & 7))
1735                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1736         else {
1737                 vmcs_write16(HOST_GS_SELECTOR, 0);
1738                 vmx->host_state.gs_ldt_reload_needed = 1;
1739         }
1740
1741 #ifdef CONFIG_X86_64
1742         savesegment(ds, vmx->host_state.ds_sel);
1743         savesegment(es, vmx->host_state.es_sel);
1744 #endif
1745
1746 #ifdef CONFIG_X86_64
1747         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1748         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1749 #else
1750         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1751         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1752 #endif
1753
1754 #ifdef CONFIG_X86_64
1755         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1756         if (is_long_mode(&vmx->vcpu))
1757                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1758 #endif
1759         if (boot_cpu_has(X86_FEATURE_MPX))
1760                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
1761         for (i = 0; i < vmx->save_nmsrs; ++i)
1762                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1763                                    vmx->guest_msrs[i].data,
1764                                    vmx->guest_msrs[i].mask);
1765 }
1766
1767 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1768 {
1769         if (!vmx->host_state.loaded)
1770                 return;
1771
1772         ++vmx->vcpu.stat.host_state_reload;
1773         vmx->host_state.loaded = 0;
1774 #ifdef CONFIG_X86_64
1775         if (is_long_mode(&vmx->vcpu))
1776                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1777 #endif
1778         if (vmx->host_state.gs_ldt_reload_needed) {
1779                 kvm_load_ldt(vmx->host_state.ldt_sel);
1780 #ifdef CONFIG_X86_64
1781                 load_gs_index(vmx->host_state.gs_sel);
1782 #else
1783                 loadsegment(gs, vmx->host_state.gs_sel);
1784 #endif
1785         }
1786         if (vmx->host_state.fs_reload_needed)
1787                 loadsegment(fs, vmx->host_state.fs_sel);
1788 #ifdef CONFIG_X86_64
1789         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1790                 loadsegment(ds, vmx->host_state.ds_sel);
1791                 loadsegment(es, vmx->host_state.es_sel);
1792         }
1793 #endif
1794         reload_tss();
1795 #ifdef CONFIG_X86_64
1796         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1797 #endif
1798         if (vmx->host_state.msr_host_bndcfgs)
1799                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
1800         /*
1801          * If the FPU is not active (through the host task or
1802          * the guest vcpu), then restore the cr0.TS bit.
1803          */
1804         if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1805                 stts();
1806         load_gdt(this_cpu_ptr(&host_gdt));
1807 }
1808
1809 static void vmx_load_host_state(struct vcpu_vmx *vmx)
1810 {
1811         preempt_disable();
1812         __vmx_load_host_state(vmx);
1813         preempt_enable();
1814 }
1815
1816 /*
1817  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1818  * vcpu mutex is already taken.
1819  */
1820 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1821 {
1822         struct vcpu_vmx *vmx = to_vmx(vcpu);
1823         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1824
1825         if (!vmm_exclusive)
1826                 kvm_cpu_vmxon(phys_addr);
1827         else if (vmx->loaded_vmcs->cpu != cpu)
1828                 loaded_vmcs_clear(vmx->loaded_vmcs);
1829
1830         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1831                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1832                 vmcs_load(vmx->loaded_vmcs->vmcs);
1833         }
1834
1835         if (vmx->loaded_vmcs->cpu != cpu) {
1836                 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1837                 unsigned long sysenter_esp;
1838
1839                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1840                 local_irq_disable();
1841                 crash_disable_local_vmclear(cpu);
1842
1843                 /*
1844                  * Read loaded_vmcs->cpu should be before fetching
1845                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
1846                  * See the comments in __loaded_vmcs_clear().
1847                  */
1848                 smp_rmb();
1849
1850                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1851                          &per_cpu(loaded_vmcss_on_cpu, cpu));
1852                 crash_enable_local_vmclear(cpu);
1853                 local_irq_enable();
1854
1855                 /*
1856                  * Linux uses per-cpu TSS and GDT, so set these when switching
1857                  * processors.
1858                  */
1859                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
1860                 vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
1861
1862                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1863                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1864                 vmx->loaded_vmcs->cpu = cpu;
1865         }
1866 }
1867
1868 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1869 {
1870         __vmx_load_host_state(to_vmx(vcpu));
1871         if (!vmm_exclusive) {
1872                 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1873                 vcpu->cpu = -1;
1874                 kvm_cpu_vmxoff();
1875         }
1876 }
1877
1878 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1879 {
1880         ulong cr0;
1881
1882         if (vcpu->fpu_active)
1883                 return;
1884         vcpu->fpu_active = 1;
1885         cr0 = vmcs_readl(GUEST_CR0);
1886         cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1887         cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1888         vmcs_writel(GUEST_CR0, cr0);
1889         update_exception_bitmap(vcpu);
1890         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1891         if (is_guest_mode(vcpu))
1892                 vcpu->arch.cr0_guest_owned_bits &=
1893                         ~get_vmcs12(vcpu)->cr0_guest_host_mask;
1894         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1895 }
1896
1897 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1898
1899 /*
1900  * Return the cr0 value that a nested guest would read. This is a combination
1901  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
1902  * its hypervisor (cr0_read_shadow).
1903  */
1904 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1905 {
1906         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1907                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1908 }
1909 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1910 {
1911         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1912                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1913 }
1914
1915 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1916 {
1917         /* Note that there is no vcpu->fpu_active = 0 here. The caller must
1918          * set this *before* calling this function.
1919          */
1920         vmx_decache_cr0_guest_bits(vcpu);
1921         vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1922         update_exception_bitmap(vcpu);
1923         vcpu->arch.cr0_guest_owned_bits = 0;
1924         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1925         if (is_guest_mode(vcpu)) {
1926                 /*
1927                  * L1's specified read shadow might not contain the TS bit,
1928                  * so now that we turned on shadowing of this bit, we need to
1929                  * set this bit of the shadow. Like in nested_vmx_run we need
1930                  * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
1931                  * up-to-date here because we just decached cr0.TS (and we'll
1932                  * only update vmcs12->guest_cr0 on nested exit).
1933                  */
1934                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1935                 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
1936                         (vcpu->arch.cr0 & X86_CR0_TS);
1937                 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
1938         } else
1939                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1940 }
1941
1942 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1943 {
1944         unsigned long rflags, save_rflags;
1945
1946         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1947                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1948                 rflags = vmcs_readl(GUEST_RFLAGS);
1949                 if (to_vmx(vcpu)->rmode.vm86_active) {
1950                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1951                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1952                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1953                 }
1954                 to_vmx(vcpu)->rflags = rflags;
1955         }
1956         return to_vmx(vcpu)->rflags;
1957 }
1958
1959 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1960 {
1961         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1962         to_vmx(vcpu)->rflags = rflags;
1963         if (to_vmx(vcpu)->rmode.vm86_active) {
1964                 to_vmx(vcpu)->rmode.save_rflags = rflags;
1965                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1966         }
1967         vmcs_writel(GUEST_RFLAGS, rflags);
1968 }
1969
1970 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1971 {
1972         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1973         int ret = 0;
1974
1975         if (interruptibility & GUEST_INTR_STATE_STI)
1976                 ret |= KVM_X86_SHADOW_INT_STI;
1977         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1978                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1979
1980         return ret;
1981 }
1982
1983 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1984 {
1985         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1986         u32 interruptibility = interruptibility_old;
1987
1988         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1989
1990         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1991                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1992         else if (mask & KVM_X86_SHADOW_INT_STI)
1993                 interruptibility |= GUEST_INTR_STATE_STI;
1994
1995         if ((interruptibility != interruptibility_old))
1996                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1997 }
1998
1999 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2000 {
2001         unsigned long rip;
2002
2003         rip = kvm_rip_read(vcpu);
2004         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2005         kvm_rip_write(vcpu, rip);
2006
2007         /* skipping an emulated instruction also counts */
2008         vmx_set_interrupt_shadow(vcpu, 0);
2009 }
2010
2011 /*
2012  * KVM wants to inject page-faults which it got to the guest. This function
2013  * checks whether in a nested guest, we need to inject them to L1 or L2.
2014  */
2015 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2016 {
2017         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2018
2019         if (!(vmcs12->exception_bitmap & (1u << nr)))
2020                 return 0;
2021
2022         nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
2023                           vmcs_read32(VM_EXIT_INTR_INFO),
2024                           vmcs_readl(EXIT_QUALIFICATION));
2025         return 1;
2026 }
2027
2028 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
2029                                 bool has_error_code, u32 error_code,
2030                                 bool reinject)
2031 {
2032         struct vcpu_vmx *vmx = to_vmx(vcpu);
2033         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2034
2035         if (!reinject && is_guest_mode(vcpu) &&
2036             nested_vmx_check_exception(vcpu, nr))
2037                 return;
2038
2039         if (has_error_code) {
2040                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2041                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2042         }
2043
2044         if (vmx->rmode.vm86_active) {
2045                 int inc_eip = 0;
2046                 if (kvm_exception_is_soft(nr))
2047                         inc_eip = vcpu->arch.event_exit_inst_len;
2048                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2049                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2050                 return;
2051         }
2052
2053         if (kvm_exception_is_soft(nr)) {
2054                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2055                              vmx->vcpu.arch.event_exit_inst_len);
2056                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2057         } else
2058                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2059
2060         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2061 }
2062
2063 static bool vmx_rdtscp_supported(void)
2064 {
2065         return cpu_has_vmx_rdtscp();
2066 }
2067
2068 static bool vmx_invpcid_supported(void)
2069 {
2070         return cpu_has_vmx_invpcid() && enable_ept;
2071 }
2072
2073 /*
2074  * Swap MSR entry in host/guest MSR entry array.
2075  */
2076 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2077 {
2078         struct shared_msr_entry tmp;
2079
2080         tmp = vmx->guest_msrs[to];
2081         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2082         vmx->guest_msrs[from] = tmp;
2083 }
2084
2085 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2086 {
2087         unsigned long *msr_bitmap;
2088
2089         if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
2090                 if (is_long_mode(vcpu))
2091                         msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2092                 else
2093                         msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2094         } else {
2095                 if (is_long_mode(vcpu))
2096                         msr_bitmap = vmx_msr_bitmap_longmode;
2097                 else
2098                         msr_bitmap = vmx_msr_bitmap_legacy;
2099         }
2100
2101         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2102 }
2103
2104 /*
2105  * Set up the vmcs to automatically save and restore system
2106  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2107  * mode, as fiddling with msrs is very expensive.
2108  */
2109 static void setup_msrs(struct vcpu_vmx *vmx)
2110 {
2111         int save_nmsrs, index;
2112
2113         save_nmsrs = 0;
2114 #ifdef CONFIG_X86_64
2115         if (is_long_mode(&vmx->vcpu)) {
2116                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2117                 if (index >= 0)
2118                         move_msr_up(vmx, index, save_nmsrs++);
2119                 index = __find_msr_index(vmx, MSR_LSTAR);
2120                 if (index >= 0)
2121                         move_msr_up(vmx, index, save_nmsrs++);
2122                 index = __find_msr_index(vmx, MSR_CSTAR);
2123                 if (index >= 0)
2124                         move_msr_up(vmx, index, save_nmsrs++);
2125                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2126                 if (index >= 0 && vmx->rdtscp_enabled)
2127                         move_msr_up(vmx, index, save_nmsrs++);
2128                 /*
2129                  * MSR_STAR is only needed on long mode guests, and only
2130                  * if efer.sce is enabled.
2131                  */
2132                 index = __find_msr_index(vmx, MSR_STAR);
2133                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2134                         move_msr_up(vmx, index, save_nmsrs++);
2135         }
2136 #endif
2137         index = __find_msr_index(vmx, MSR_EFER);
2138         if (index >= 0 && update_transition_efer(vmx, index))
2139                 move_msr_up(vmx, index, save_nmsrs++);
2140
2141         vmx->save_nmsrs = save_nmsrs;
2142
2143         if (cpu_has_vmx_msr_bitmap())
2144                 vmx_set_msr_bitmap(&vmx->vcpu);
2145 }
2146
2147 /*
2148  * reads and returns guest's timestamp counter "register"
2149  * guest_tsc = host_tsc + tsc_offset    -- 21.3
2150  */
2151 static u64 guest_read_tsc(void)
2152 {
2153         u64 host_tsc, tsc_offset;
2154
2155         rdtscll(host_tsc);
2156         tsc_offset = vmcs_read64(TSC_OFFSET);
2157         return host_tsc + tsc_offset;
2158 }
2159
2160 /*
2161  * Like guest_read_tsc, but always returns L1's notion of the timestamp
2162  * counter, even if a nested guest (L2) is currently running.
2163  */
2164 static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2165 {
2166         u64 tsc_offset;
2167
2168         tsc_offset = is_guest_mode(vcpu) ?
2169                 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2170                 vmcs_read64(TSC_OFFSET);
2171         return host_tsc + tsc_offset;
2172 }
2173
2174 /*
2175  * Engage any workarounds for mis-matched TSC rates.  Currently limited to
2176  * software catchup for faster rates on slower CPUs.
2177  */
2178 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2179 {
2180         if (!scale)
2181                 return;
2182
2183         if (user_tsc_khz > tsc_khz) {
2184                 vcpu->arch.tsc_catchup = 1;
2185                 vcpu->arch.tsc_always_catchup = 1;
2186         } else
2187                 WARN(1, "user requested TSC rate below hardware speed\n");
2188 }
2189
2190 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2191 {
2192         return vmcs_read64(TSC_OFFSET);
2193 }
2194
2195 /*
2196  * writes 'offset' into guest's timestamp counter offset register
2197  */
2198 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2199 {
2200         if (is_guest_mode(vcpu)) {
2201                 /*
2202                  * We're here if L1 chose not to trap WRMSR to TSC. According
2203                  * to the spec, this should set L1's TSC; The offset that L1
2204                  * set for L2 remains unchanged, and still needs to be added
2205                  * to the newly set TSC to get L2's TSC.
2206                  */
2207                 struct vmcs12 *vmcs12;
2208                 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2209                 /* recalculate vmcs02.TSC_OFFSET: */
2210                 vmcs12 = get_vmcs12(vcpu);
2211                 vmcs_write64(TSC_OFFSET, offset +
2212                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2213                          vmcs12->tsc_offset : 0));
2214         } else {
2215                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2216                                            vmcs_read64(TSC_OFFSET), offset);
2217                 vmcs_write64(TSC_OFFSET, offset);
2218         }
2219 }
2220
2221 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
2222 {
2223         u64 offset = vmcs_read64(TSC_OFFSET);
2224
2225         vmcs_write64(TSC_OFFSET, offset + adjustment);
2226         if (is_guest_mode(vcpu)) {
2227                 /* Even when running L2, the adjustment needs to apply to L1 */
2228                 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2229         } else
2230                 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2231                                            offset + adjustment);
2232 }
2233
2234 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2235 {
2236         return target_tsc - native_read_tsc();
2237 }
2238
2239 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2240 {
2241         struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2242         return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2243 }
2244
2245 /*
2246  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2247  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2248  * all guests if the "nested" module option is off, and can also be disabled
2249  * for a single guest by disabling its VMX cpuid bit.
2250  */
2251 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2252 {
2253         return nested && guest_cpuid_has_vmx(vcpu);
2254 }
2255
2256 /*
2257  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2258  * returned for the various VMX controls MSRs when nested VMX is enabled.
2259  * The same values should also be used to verify that vmcs12 control fields are
2260  * valid during nested entry from L1 to L2.
2261  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2262  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2263  * bit in the high half is on if the corresponding bit in the control field
2264  * may be on. See also vmx_control_verify().
2265  * TODO: allow these variables to be modified (downgraded) by module options
2266  * or other means.
2267  */
2268 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
2269 static u32 nested_vmx_true_procbased_ctls_low;
2270 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2271 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2272 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2273 static u32 nested_vmx_true_exit_ctls_low;
2274 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2275 static u32 nested_vmx_true_entry_ctls_low;
2276 static u32 nested_vmx_misc_low, nested_vmx_misc_high;
2277 static u32 nested_vmx_ept_caps;
2278 static __init void nested_vmx_setup_ctls_msrs(void)
2279 {
2280         /*
2281          * Note that as a general rule, the high half of the MSRs (bits in
2282          * the control fields which may be 1) should be initialized by the
2283          * intersection of the underlying hardware's MSR (i.e., features which
2284          * can be supported) and the list of features we want to expose -
2285          * because they are known to be properly supported in our code.
2286          * Also, usually, the low half of the MSRs (bits which must be 1) can
2287          * be set to 0, meaning that L1 may turn off any of these bits. The
2288          * reason is that if one of these bits is necessary, it will appear
2289          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2290          * fields of vmcs01 and vmcs02, will turn these bits off - and
2291          * nested_vmx_exit_handled() will not pass related exits to L1.
2292          * These rules have exceptions below.
2293          */
2294
2295         /* pin-based controls */
2296         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2297               nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
2298         nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2299         nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
2300                 PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS;
2301         nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2302                 PIN_BASED_VMX_PREEMPTION_TIMER;
2303
2304         /* exit controls */
2305         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2306                 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
2307         nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2308
2309         nested_vmx_exit_ctls_high &=
2310 #ifdef CONFIG_X86_64
2311                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2312 #endif
2313                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2314         nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2315                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2316                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2317
2318         if (vmx_mpx_supported())
2319                 nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2320
2321         /* We support free control of debug control saving. */
2322         nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low &
2323                 ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2324
2325         /* entry controls */
2326         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2327                 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
2328         nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2329         nested_vmx_entry_ctls_high &=
2330 #ifdef CONFIG_X86_64
2331                 VM_ENTRY_IA32E_MODE |
2332 #endif
2333                 VM_ENTRY_LOAD_IA32_PAT;
2334         nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
2335                                        VM_ENTRY_LOAD_IA32_EFER);
2336         if (vmx_mpx_supported())
2337                 nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2338
2339         /* We support free control of debug control loading. */
2340         nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low &
2341                 ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2342
2343         /* cpu-based controls */
2344         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2345                 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
2346         nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2347         nested_vmx_procbased_ctls_high &=
2348                 CPU_BASED_VIRTUAL_INTR_PENDING |
2349                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2350                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2351                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2352                 CPU_BASED_CR3_STORE_EXITING |
2353 #ifdef CONFIG_X86_64
2354                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2355 #endif
2356                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2357                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
2358                 CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
2359                 CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW |
2360                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2361         /*
2362          * We can allow some features even when not supported by the
2363          * hardware. For example, L1 can specify an MSR bitmap - and we
2364          * can use it to avoid exits to L1 - even when L0 runs L2
2365          * without MSR bitmaps.
2366          */
2367         nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2368                 CPU_BASED_USE_MSR_BITMAPS;
2369
2370         /* We support free control of CR3 access interception. */
2371         nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low &
2372                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2373
2374         /* secondary cpu-based controls */
2375         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2376                 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
2377         nested_vmx_secondary_ctls_low = 0;
2378         nested_vmx_secondary_ctls_high &=
2379                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2380                 SECONDARY_EXEC_UNRESTRICTED_GUEST |
2381                 SECONDARY_EXEC_WBINVD_EXITING;
2382
2383         if (enable_ept) {
2384                 /* nested EPT: emulate EPT also to L1 */
2385                 nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
2386                 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2387                          VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2388                          VMX_EPT_INVEPT_BIT;
2389                 nested_vmx_ept_caps &= vmx_capability.ept;
2390                 /*
2391                  * For nested guests, we don't do anything specific
2392                  * for single context invalidation. Hence, only advertise
2393                  * support for global context invalidation.
2394                  */
2395                 nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
2396         } else
2397                 nested_vmx_ept_caps = 0;
2398
2399         /* miscellaneous data */
2400         rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
2401         nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2402         nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2403                 VMX_MISC_ACTIVITY_HLT;
2404         nested_vmx_misc_high = 0;
2405 }
2406
2407 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2408 {
2409         /*
2410          * Bits 0 in high must be 0, and bits 1 in low must be 1.
2411          */
2412         return ((control & high) | low) == control;
2413 }
2414
2415 static inline u64 vmx_control_msr(u32 low, u32 high)
2416 {
2417         return low | ((u64)high << 32);
2418 }
2419
2420 /* Returns 0 on success, non-0 otherwise. */
2421 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2422 {
2423         switch (msr_index) {
2424         case MSR_IA32_VMX_BASIC:
2425                 /*
2426                  * This MSR reports some information about VMX support. We
2427                  * should return information about the VMX we emulate for the
2428                  * guest, and the VMCS structure we give it - not about the
2429                  * VMX support of the underlying hardware.
2430                  */
2431                 *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
2432                            ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2433                            (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2434                 break;
2435         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2436         case MSR_IA32_VMX_PINBASED_CTLS:
2437                 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
2438                                         nested_vmx_pinbased_ctls_high);
2439                 break;
2440         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2441                 *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low,
2442                                         nested_vmx_procbased_ctls_high);
2443                 break;
2444         case MSR_IA32_VMX_PROCBASED_CTLS:
2445                 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
2446                                         nested_vmx_procbased_ctls_high);
2447                 break;
2448         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2449                 *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low,
2450                                         nested_vmx_exit_ctls_high);
2451                 break;
2452         case MSR_IA32_VMX_EXIT_CTLS:
2453                 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2454                                         nested_vmx_exit_ctls_high);
2455                 break;
2456         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2457                 *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low,
2458                                         nested_vmx_entry_ctls_high);
2459                 break;
2460         case MSR_IA32_VMX_ENTRY_CTLS:
2461                 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2462                                         nested_vmx_entry_ctls_high);
2463                 break;
2464         case MSR_IA32_VMX_MISC:
2465                 *pdata = vmx_control_msr(nested_vmx_misc_low,
2466                                          nested_vmx_misc_high);
2467                 break;
2468         /*
2469          * These MSRs specify bits which the guest must keep fixed (on or off)
2470          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2471          * We picked the standard core2 setting.
2472          */
2473 #define VMXON_CR0_ALWAYSON      (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2474 #define VMXON_CR4_ALWAYSON      X86_CR4_VMXE
2475         case MSR_IA32_VMX_CR0_FIXED0:
2476                 *pdata = VMXON_CR0_ALWAYSON;
2477                 break;
2478         case MSR_IA32_VMX_CR0_FIXED1:
2479                 *pdata = -1ULL;
2480                 break;
2481         case MSR_IA32_VMX_CR4_FIXED0:
2482                 *pdata = VMXON_CR4_ALWAYSON;
2483                 break;
2484         case MSR_IA32_VMX_CR4_FIXED1:
2485                 *pdata = -1ULL;
2486                 break;
2487         case MSR_IA32_VMX_VMCS_ENUM:
2488                 *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2489                 break;
2490         case MSR_IA32_VMX_PROCBASED_CTLS2:
2491                 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
2492                                         nested_vmx_secondary_ctls_high);
2493                 break;
2494         case MSR_IA32_VMX_EPT_VPID_CAP:
2495                 /* Currently, no nested vpid support */
2496                 *pdata = nested_vmx_ept_caps;
2497                 break;
2498         default:
2499                 return 1;
2500         }
2501
2502         return 0;
2503 }
2504
2505 /*
2506  * Reads an msr value (of 'msr_index') into 'pdata'.
2507  * Returns 0 on success, non-0 otherwise.
2508  * Assumes vcpu_load() was already called.
2509  */
2510 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2511 {
2512         u64 data;
2513         struct shared_msr_entry *msr;
2514
2515         if (!pdata) {
2516                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2517                 return -EINVAL;
2518         }
2519
2520         switch (msr_index) {
2521 #ifdef CONFIG_X86_64
2522         case MSR_FS_BASE:
2523                 data = vmcs_readl(GUEST_FS_BASE);
2524                 break;
2525         case MSR_GS_BASE:
2526                 data = vmcs_readl(GUEST_GS_BASE);
2527                 break;
2528         case MSR_KERNEL_GS_BASE:
2529                 vmx_load_host_state(to_vmx(vcpu));
2530                 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2531                 break;
2532 #endif
2533         case MSR_EFER:
2534                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2535         case MSR_IA32_TSC:
2536                 data = guest_read_tsc();
2537                 break;
2538         case MSR_IA32_SYSENTER_CS:
2539                 data = vmcs_read32(GUEST_SYSENTER_CS);
2540                 break;
2541         case MSR_IA32_SYSENTER_EIP:
2542                 data = vmcs_readl(GUEST_SYSENTER_EIP);
2543                 break;
2544         case MSR_IA32_SYSENTER_ESP:
2545                 data = vmcs_readl(GUEST_SYSENTER_ESP);
2546                 break;
2547         case MSR_IA32_BNDCFGS:
2548                 if (!vmx_mpx_supported())
2549                         return 1;
2550                 data = vmcs_read64(GUEST_BNDCFGS);
2551                 break;
2552         case MSR_IA32_FEATURE_CONTROL:
2553                 if (!nested_vmx_allowed(vcpu))
2554                         return 1;
2555                 data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2556                 break;
2557         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2558                 if (!nested_vmx_allowed(vcpu))
2559                         return 1;
2560                 return vmx_get_vmx_msr(vcpu, msr_index, pdata);
2561         case MSR_TSC_AUX:
2562                 if (!to_vmx(vcpu)->rdtscp_enabled)
2563                         return 1;
2564                 /* Otherwise falls through */
2565         default:
2566                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
2567                 if (msr) {
2568                         data = msr->data;
2569                         break;
2570                 }
2571                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2572         }
2573
2574         *pdata = data;
2575         return 0;
2576 }
2577
2578 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2579
2580 /*
2581  * Writes msr value into into the appropriate "register".
2582  * Returns 0 on success, non-0 otherwise.
2583  * Assumes vcpu_load() was already called.
2584  */
2585 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2586 {
2587         struct vcpu_vmx *vmx = to_vmx(vcpu);
2588         struct shared_msr_entry *msr;
2589         int ret = 0;
2590         u32 msr_index = msr_info->index;
2591         u64 data = msr_info->data;
2592
2593         switch (msr_index) {
2594         case MSR_EFER:
2595                 ret = kvm_set_msr_common(vcpu, msr_info);
2596                 break;
2597 #ifdef CONFIG_X86_64
2598         case MSR_FS_BASE:
2599                 vmx_segment_cache_clear(vmx);
2600                 vmcs_writel(GUEST_FS_BASE, data);
2601                 break;
2602         case MSR_GS_BASE:
2603                 vmx_segment_cache_clear(vmx);
2604                 vmcs_writel(GUEST_GS_BASE, data);
2605                 break;
2606         case MSR_KERNEL_GS_BASE:
2607                 vmx_load_host_state(vmx);
2608                 vmx->msr_guest_kernel_gs_base = data;
2609                 break;
2610 #endif
2611         case MSR_IA32_SYSENTER_CS:
2612                 vmcs_write32(GUEST_SYSENTER_CS, data);
2613                 break;
2614         case MSR_IA32_SYSENTER_EIP:
2615                 vmcs_writel(GUEST_SYSENTER_EIP, data);
2616                 break;
2617         case MSR_IA32_SYSENTER_ESP:
2618                 vmcs_writel(GUEST_SYSENTER_ESP, data);
2619                 break;
2620         case MSR_IA32_BNDCFGS:
2621                 if (!vmx_mpx_supported())
2622                         return 1;
2623                 vmcs_write64(GUEST_BNDCFGS, data);
2624                 break;
2625         case MSR_IA32_TSC:
2626                 kvm_write_tsc(vcpu, msr_info);
2627                 break;
2628         case MSR_IA32_CR_PAT:
2629                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2630                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2631                                 return 1;
2632                         vmcs_write64(GUEST_IA32_PAT, data);
2633                         vcpu->arch.pat = data;
2634                         break;
2635                 }
2636                 ret = kvm_set_msr_common(vcpu, msr_info);
2637                 break;
2638         case MSR_IA32_TSC_ADJUST:
2639                 ret = kvm_set_msr_common(vcpu, msr_info);
2640                 break;
2641         case MSR_IA32_FEATURE_CONTROL:
2642                 if (!nested_vmx_allowed(vcpu) ||
2643                     (to_vmx(vcpu)->nested.msr_ia32_feature_control &
2644                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
2645                         return 1;
2646                 vmx->nested.msr_ia32_feature_control = data;
2647                 if (msr_info->host_initiated && data == 0)
2648                         vmx_leave_nested(vcpu);
2649                 break;
2650         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2651                 return 1; /* they are read-only */
2652         case MSR_TSC_AUX:
2653                 if (!vmx->rdtscp_enabled)
2654                         return 1;
2655                 /* Check reserved bit, higher 32 bits should be zero */
2656                 if ((data >> 32) != 0)
2657                         return 1;
2658                 /* Otherwise falls through */
2659         default:
2660                 msr = find_msr_entry(vmx, msr_index);
2661                 if (msr) {
2662                         u64 old_msr_data = msr->data;
2663                         msr->data = data;
2664                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2665                                 preempt_disable();
2666                                 ret = kvm_set_shared_msr(msr->index, msr->data,
2667                                                          msr->mask);
2668                                 preempt_enable();
2669                                 if (ret)
2670                                         msr->data = old_msr_data;
2671                         }
2672                         break;
2673                 }
2674                 ret = kvm_set_msr_common(vcpu, msr_info);
2675         }
2676
2677         return ret;
2678 }
2679
2680 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2681 {
2682         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2683         switch (reg) {
2684         case VCPU_REGS_RSP:
2685                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2686                 break;
2687         case VCPU_REGS_RIP:
2688                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2689                 break;
2690         case VCPU_EXREG_PDPTR:
2691                 if (enable_ept)
2692                         ept_save_pdptrs(vcpu);
2693                 break;
2694         default:
2695                 break;
2696         }
2697 }
2698
2699 static __init int cpu_has_kvm_support(void)
2700 {
2701         return cpu_has_vmx();
2702 }
2703
2704 static __init int vmx_disabled_by_bios(void)
2705 {
2706         u64 msr;
2707
2708         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2709         if (msr & FEATURE_CONTROL_LOCKED) {
2710                 /* launched w/ TXT and VMX disabled */
2711                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2712                         && tboot_enabled())
2713                         return 1;
2714                 /* launched w/o TXT and VMX only enabled w/ TXT */
2715                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2716                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2717                         && !tboot_enabled()) {
2718                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2719                                 "activate TXT before enabling KVM\n");
2720                         return 1;
2721                 }
2722                 /* launched w/o TXT and VMX disabled */
2723                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2724                         && !tboot_enabled())
2725                         return 1;
2726         }
2727
2728         return 0;
2729 }
2730
2731 static void kvm_cpu_vmxon(u64 addr)
2732 {
2733         asm volatile (ASM_VMX_VMXON_RAX
2734                         : : "a"(&addr), "m"(addr)
2735                         : "memory", "cc");
2736 }
2737
2738 static int hardware_enable(void)
2739 {
2740         int cpu = raw_smp_processor_id();
2741         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2742         u64 old, test_bits;
2743
2744         if (read_cr4() & X86_CR4_VMXE)
2745                 return -EBUSY;
2746
2747         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2748
2749         /*
2750          * Now we can enable the vmclear operation in kdump
2751          * since the loaded_vmcss_on_cpu list on this cpu
2752          * has been initialized.
2753          *
2754          * Though the cpu is not in VMX operation now, there
2755          * is no problem to enable the vmclear operation
2756          * for the loaded_vmcss_on_cpu list is empty!
2757          */
2758         crash_enable_local_vmclear(cpu);
2759
2760         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2761
2762         test_bits = FEATURE_CONTROL_LOCKED;
2763         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2764         if (tboot_enabled())
2765                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2766
2767         if ((old & test_bits) != test_bits) {
2768                 /* enable and lock */
2769                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2770         }
2771         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
2772
2773         if (vmm_exclusive) {
2774                 kvm_cpu_vmxon(phys_addr);
2775                 ept_sync_global();
2776         }
2777
2778         native_store_gdt(this_cpu_ptr(&host_gdt));
2779
2780         return 0;
2781 }
2782
2783 static void vmclear_local_loaded_vmcss(void)
2784 {
2785         int cpu = raw_smp_processor_id();
2786         struct loaded_vmcs *v, *n;
2787
2788         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2789                                  loaded_vmcss_on_cpu_link)
2790                 __loaded_vmcs_clear(v);
2791 }
2792
2793
2794 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2795  * tricks.
2796  */
2797 static void kvm_cpu_vmxoff(void)
2798 {
2799         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2800 }
2801
2802 static void hardware_disable(void)
2803 {
2804         if (vmm_exclusive) {
2805                 vmclear_local_loaded_vmcss();
2806                 kvm_cpu_vmxoff();
2807         }
2808         write_cr4(read_cr4() & ~X86_CR4_VMXE);
2809 }
2810
2811 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2812                                       u32 msr, u32 *result)
2813 {
2814         u32 vmx_msr_low, vmx_msr_high;
2815         u32 ctl = ctl_min | ctl_opt;
2816
2817         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2818
2819         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2820         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2821
2822         /* Ensure minimum (required) set of control bits are supported. */
2823         if (ctl_min & ~ctl)
2824                 return -EIO;
2825
2826         *result = ctl;
2827         return 0;
2828 }
2829
2830 static __init bool allow_1_setting(u32 msr, u32 ctl)
2831 {
2832         u32 vmx_msr_low, vmx_msr_high;
2833
2834         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2835         return vmx_msr_high & ctl;
2836 }
2837
2838 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2839 {
2840         u32 vmx_msr_low, vmx_msr_high;
2841         u32 min, opt, min2, opt2;
2842         u32 _pin_based_exec_control = 0;
2843         u32 _cpu_based_exec_control = 0;
2844         u32 _cpu_based_2nd_exec_control = 0;
2845         u32 _vmexit_control = 0;
2846         u32 _vmentry_control = 0;
2847
2848         min = CPU_BASED_HLT_EXITING |
2849 #ifdef CONFIG_X86_64
2850               CPU_BASED_CR8_LOAD_EXITING |
2851               CPU_BASED_CR8_STORE_EXITING |
2852 #endif
2853               CPU_BASED_CR3_LOAD_EXITING |
2854               CPU_BASED_CR3_STORE_EXITING |
2855               CPU_BASED_USE_IO_BITMAPS |
2856               CPU_BASED_MOV_DR_EXITING |
2857               CPU_BASED_USE_TSC_OFFSETING |
2858               CPU_BASED_MWAIT_EXITING |
2859               CPU_BASED_MONITOR_EXITING |
2860               CPU_BASED_INVLPG_EXITING |
2861               CPU_BASED_RDPMC_EXITING;
2862
2863         opt = CPU_BASED_TPR_SHADOW |
2864               CPU_BASED_USE_MSR_BITMAPS |
2865               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2866         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2867                                 &_cpu_based_exec_control) < 0)
2868                 return -EIO;
2869 #ifdef CONFIG_X86_64
2870         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2871                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2872                                            ~CPU_BASED_CR8_STORE_EXITING;
2873 #endif
2874         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2875                 min2 = 0;
2876                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2877                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2878                         SECONDARY_EXEC_WBINVD_EXITING |
2879                         SECONDARY_EXEC_ENABLE_VPID |
2880                         SECONDARY_EXEC_ENABLE_EPT |
2881                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
2882                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2883                         SECONDARY_EXEC_RDTSCP |
2884                         SECONDARY_EXEC_ENABLE_INVPCID |
2885                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
2886                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2887                         SECONDARY_EXEC_SHADOW_VMCS;
2888                 if (adjust_vmx_controls(min2, opt2,
2889                                         MSR_IA32_VMX_PROCBASED_CTLS2,
2890                                         &_cpu_based_2nd_exec_control) < 0)
2891                         return -EIO;
2892         }
2893 #ifndef CONFIG_X86_64
2894         if (!(_cpu_based_2nd_exec_control &
2895                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2896                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2897 #endif
2898
2899         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2900                 _cpu_based_2nd_exec_control &= ~(
2901                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2902                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2903                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2904
2905         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2906                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2907                    enabled */
2908                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2909                                              CPU_BASED_CR3_STORE_EXITING |
2910                                              CPU_BASED_INVLPG_EXITING);
2911                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2912                       vmx_capability.ept, vmx_capability.vpid);
2913         }
2914
2915         min = VM_EXIT_SAVE_DEBUG_CONTROLS;
2916 #ifdef CONFIG_X86_64
2917         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2918 #endif
2919         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
2920                 VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
2921         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2922                                 &_vmexit_control) < 0)
2923                 return -EIO;
2924
2925         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2926         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
2927         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2928                                 &_pin_based_exec_control) < 0)
2929                 return -EIO;
2930
2931         if (!(_cpu_based_2nd_exec_control &
2932                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
2933                 !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
2934                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2935
2936         min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
2937         opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
2938         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2939                                 &_vmentry_control) < 0)
2940                 return -EIO;
2941
2942         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2943
2944         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2945         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2946                 return -EIO;
2947
2948 #ifdef CONFIG_X86_64
2949         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2950         if (vmx_msr_high & (1u<<16))
2951                 return -EIO;
2952 #endif
2953
2954         /* Require Write-Back (WB) memory type for VMCS accesses. */
2955         if (((vmx_msr_high >> 18) & 15) != 6)
2956                 return -EIO;
2957
2958         vmcs_conf->size = vmx_msr_high & 0x1fff;
2959         vmcs_conf->order = get_order(vmcs_config.size);
2960         vmcs_conf->revision_id = vmx_msr_low;
2961
2962         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2963         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2964         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2965         vmcs_conf->vmexit_ctrl         = _vmexit_control;
2966         vmcs_conf->vmentry_ctrl        = _vmentry_control;
2967
2968         cpu_has_load_ia32_efer =
2969                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2970                                 VM_ENTRY_LOAD_IA32_EFER)
2971                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2972                                    VM_EXIT_LOAD_IA32_EFER);
2973
2974         cpu_has_load_perf_global_ctrl =
2975                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2976                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2977                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2978                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2979
2980         /*
2981          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2982          * but due to arrata below it can't be used. Workaround is to use
2983          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2984          *
2985          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2986          *
2987          * AAK155             (model 26)
2988          * AAP115             (model 30)
2989          * AAT100             (model 37)
2990          * BC86,AAY89,BD102   (model 44)
2991          * BA97               (model 46)
2992          *
2993          */
2994         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2995                 switch (boot_cpu_data.x86_model) {
2996                 case 26:
2997                 case 30:
2998                 case 37:
2999                 case 44:
3000                 case 46:
3001                         cpu_has_load_perf_global_ctrl = false;
3002                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
3003                                         "does not work properly. Using workaround\n");
3004                         break;
3005                 default:
3006                         break;
3007                 }
3008         }
3009
3010         return 0;
3011 }
3012
3013 static struct vmcs *alloc_vmcs_cpu(int cpu)
3014 {
3015         int node = cpu_to_node(cpu);
3016         struct page *pages;
3017         struct vmcs *vmcs;
3018
3019         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
3020         if (!pages)
3021                 return NULL;
3022         vmcs = page_address(pages);
3023         memset(vmcs, 0, vmcs_config.size);
3024         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
3025         return vmcs;
3026 }
3027
3028 static struct vmcs *alloc_vmcs(void)
3029 {
3030         return alloc_vmcs_cpu(raw_smp_processor_id());
3031 }
3032
3033 static void free_vmcs(struct vmcs *vmcs)
3034 {
3035         free_pages((unsigned long)vmcs, vmcs_config.order);
3036 }
3037
3038 /*
3039  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
3040  */
3041 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3042 {
3043         if (!loaded_vmcs->vmcs)
3044                 return;
3045         loaded_vmcs_clear(loaded_vmcs);
3046         free_vmcs(loaded_vmcs->vmcs);
3047         loaded_vmcs->vmcs = NULL;
3048 }
3049
3050 static void free_kvm_area(void)
3051 {
3052         int cpu;
3053
3054         for_each_possible_cpu(cpu) {
3055                 free_vmcs(per_cpu(vmxarea, cpu));
3056                 per_cpu(vmxarea, cpu) = NULL;
3057         }
3058 }
3059
3060 static void init_vmcs_shadow_fields(void)
3061 {
3062         int i, j;
3063
3064         /* No checks for read only fields yet */
3065
3066         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3067                 switch (shadow_read_write_fields[i]) {
3068                 case GUEST_BNDCFGS:
3069                         if (!vmx_mpx_supported())
3070                                 continue;
3071                         break;
3072                 default:
3073                         break;
3074                 }
3075
3076                 if (j < i)
3077                         shadow_read_write_fields[j] =
3078                                 shadow_read_write_fields[i];
3079                 j++;
3080         }
3081         max_shadow_read_write_fields = j;
3082
3083         /* shadowed fields guest access without vmexit */
3084         for (i = 0; i < max_shadow_read_write_fields; i++) {
3085                 clear_bit(shadow_read_write_fields[i],
3086                           vmx_vmwrite_bitmap);
3087                 clear_bit(shadow_read_write_fields[i],
3088                           vmx_vmread_bitmap);
3089         }
3090         for (i = 0; i < max_shadow_read_only_fields; i++)
3091                 clear_bit(shadow_read_only_fields[i],
3092                           vmx_vmread_bitmap);
3093 }
3094
3095 static __init int alloc_kvm_area(void)
3096 {
3097         int cpu;
3098
3099         for_each_possible_cpu(cpu) {
3100                 struct vmcs *vmcs;
3101
3102                 vmcs = alloc_vmcs_cpu(cpu);
3103                 if (!vmcs) {
3104                         free_kvm_area();
3105                         return -ENOMEM;
3106                 }
3107
3108                 per_cpu(vmxarea, cpu) = vmcs;
3109         }
3110         return 0;
3111 }
3112
3113 static __init int hardware_setup(void)
3114 {
3115         if (setup_vmcs_config(&vmcs_config) < 0)
3116                 return -EIO;
3117
3118         if (boot_cpu_has(X86_FEATURE_NX))
3119                 kvm_enable_efer_bits(EFER_NX);
3120
3121         if (!cpu_has_vmx_vpid())
3122                 enable_vpid = 0;
3123         if (!cpu_has_vmx_shadow_vmcs())
3124                 enable_shadow_vmcs = 0;
3125         if (enable_shadow_vmcs)
3126                 init_vmcs_shadow_fields();
3127
3128         if (!cpu_has_vmx_ept() ||
3129             !cpu_has_vmx_ept_4levels()) {
3130                 enable_ept = 0;
3131                 enable_unrestricted_guest = 0;
3132                 enable_ept_ad_bits = 0;
3133         }
3134
3135         if (!cpu_has_vmx_ept_ad_bits())
3136                 enable_ept_ad_bits = 0;
3137
3138         if (!cpu_has_vmx_unrestricted_guest())
3139                 enable_unrestricted_guest = 0;
3140
3141         if (!cpu_has_vmx_flexpriority()) {
3142                 flexpriority_enabled = 0;
3143
3144                 /*
3145                  * set_apic_access_page_addr() is used to reload apic access
3146                  * page upon invalidation.  No need to do anything if the
3147                  * processor does not have the APIC_ACCESS_ADDR VMCS field.
3148                  */
3149                 kvm_x86_ops->set_apic_access_page_addr = NULL;
3150         }
3151
3152         if (!cpu_has_vmx_tpr_shadow())
3153                 kvm_x86_ops->update_cr8_intercept = NULL;
3154
3155         if (enable_ept && !cpu_has_vmx_ept_2m_page())
3156                 kvm_disable_largepages();
3157
3158         if (!cpu_has_vmx_ple())
3159                 ple_gap = 0;
3160
3161         if (!cpu_has_vmx_apicv())
3162                 enable_apicv = 0;
3163
3164         if (enable_apicv)
3165                 kvm_x86_ops->update_cr8_intercept = NULL;
3166         else {
3167                 kvm_x86_ops->hwapic_irr_update = NULL;
3168                 kvm_x86_ops->deliver_posted_interrupt = NULL;
3169                 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
3170         }
3171
3172         if (nested)
3173                 nested_vmx_setup_ctls_msrs();
3174
3175         return alloc_kvm_area();
3176 }
3177
3178 static __exit void hardware_unsetup(void)
3179 {
3180         free_kvm_area();
3181 }
3182
3183 static bool emulation_required(struct kvm_vcpu *vcpu)
3184 {
3185         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3186 }
3187
3188 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3189                 struct kvm_segment *save)
3190 {
3191         if (!emulate_invalid_guest_state) {
3192                 /*
3193                  * CS and SS RPL should be equal during guest entry according
3194                  * to VMX spec, but in reality it is not always so. Since vcpu
3195                  * is in the middle of the transition from real mode to
3196                  * protected mode it is safe to assume that RPL 0 is a good
3197                  * default value.
3198                  */
3199                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3200                         save->selector &= ~SELECTOR_RPL_MASK;
3201                 save->dpl = save->selector & SELECTOR_RPL_MASK;
3202                 save->s = 1;
3203         }
3204         vmx_set_segment(vcpu, save, seg);
3205 }
3206
3207 static void enter_pmode(struct kvm_vcpu *vcpu)
3208 {
3209         unsigned long flags;
3210         struct vcpu_vmx *vmx = to_vmx(vcpu);
3211
3212         /*
3213          * Update real mode segment cache. It may be not up-to-date if sement
3214          * register was written while vcpu was in a guest mode.
3215          */
3216         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3217         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3218         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3219         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3220         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3221         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3222
3223         vmx->rmode.vm86_active = 0;
3224
3225         vmx_segment_cache_clear(vmx);
3226
3227         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3228
3229         flags = vmcs_readl(GUEST_RFLAGS);
3230         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3231         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3232         vmcs_writel(GUEST_RFLAGS, flags);
3233
3234         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3235                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3236
3237         update_exception_bitmap(vcpu);
3238
3239         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3240         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3241         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3242         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3243         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3244         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3245 }
3246
3247 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3248 {
3249         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3250         struct kvm_segment var = *save;
3251
3252         var.dpl = 0x3;
3253         if (seg == VCPU_SREG_CS)
3254                 var.type = 0x3;
3255
3256         if (!emulate_invalid_guest_state) {
3257                 var.selector = var.base >> 4;
3258                 var.base = var.base & 0xffff0;
3259                 var.limit = 0xffff;
3260                 var.g = 0;
3261                 var.db = 0;
3262                 var.present = 1;
3263                 var.s = 1;
3264                 var.l = 0;
3265                 var.unusable = 0;
3266                 var.type = 0x3;
3267                 var.avl = 0;
3268                 if (save->base & 0xf)
3269                         printk_once(KERN_WARNING "kvm: segment base is not "
3270                                         "paragraph aligned when entering "
3271                                         "protected mode (seg=%d)", seg);
3272         }
3273
3274         vmcs_write16(sf->selector, var.selector);
3275         vmcs_write32(sf->base, var.base);
3276         vmcs_write32(sf->limit, var.limit);
3277         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3278 }
3279
3280 static void enter_rmode(struct kvm_vcpu *vcpu)
3281 {
3282         unsigned long flags;
3283         struct vcpu_vmx *vmx = to_vmx(vcpu);
3284
3285         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3286         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3287         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3288         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3289         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3290         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3291         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3292
3293         vmx->rmode.vm86_active = 1;
3294
3295         /*
3296          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
3297          * vcpu. Warn the user that an update is overdue.
3298          */
3299         if (!vcpu->kvm->arch.tss_addr)
3300                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
3301                              "called before entering vcpu\n");
3302
3303         vmx_segment_cache_clear(vmx);
3304
3305         vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
3306         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3307         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3308
3309         flags = vmcs_readl(GUEST_RFLAGS);
3310         vmx->rmode.save_rflags = flags;
3311
3312         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3313
3314         vmcs_writel(GUEST_RFLAGS, flags);
3315         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3316         update_exception_bitmap(vcpu);
3317
3318         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3319         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3320         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3321         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3322         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3323         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3324
3325         kvm_mmu_reset_context(vcpu);
3326 }
3327
3328 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3329 {
3330         struct vcpu_vmx *vmx = to_vmx(vcpu);
3331         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
3332
3333         if (!msr)
3334                 return;
3335
3336         /*
3337          * Force kernel_gs_base reloading before EFER changes, as control
3338          * of this msr depends on is_long_mode().
3339          */
3340         vmx_load_host_state(to_vmx(vcpu));
3341         vcpu->arch.efer = efer;
3342         if (efer & EFER_LMA) {
3343                 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3344                 msr->data = efer;
3345         } else {
3346                 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3347
3348                 msr->data = efer & ~EFER_LME;
3349         }
3350         setup_msrs(vmx);
3351 }
3352
3353 #ifdef CONFIG_X86_64
3354
3355 static void enter_lmode(struct kvm_vcpu *vcpu)
3356 {
3357         u32 guest_tr_ar;
3358
3359         vmx_segment_cache_clear(to_vmx(vcpu));
3360
3361         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3362         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
3363                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3364                                      __func__);
3365                 vmcs_write32(GUEST_TR_AR_BYTES,
3366                              (guest_tr_ar & ~AR_TYPE_MASK)
3367                              | AR_TYPE_BUSY_64_TSS);
3368         }
3369         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3370 }
3371
3372 static void exit_lmode(struct kvm_vcpu *vcpu)
3373 {
3374         vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3375         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3376 }
3377
3378 #endif
3379
3380 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3381 {
3382         vpid_sync_context(to_vmx(vcpu));
3383         if (enable_ept) {
3384                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3385                         return;
3386                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3387         }
3388 }
3389
3390 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3391 {
3392         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3393
3394         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3395         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3396 }
3397
3398 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3399 {
3400         if (enable_ept && is_paging(vcpu))
3401                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3402         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3403 }
3404
3405 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3406 {
3407         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3408
3409         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3410         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3411 }
3412
3413 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3414 {
3415         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3416
3417         if (!test_bit(VCPU_EXREG_PDPTR,
3418                       (unsigned long *)&vcpu->arch.regs_dirty))
3419                 return;
3420
3421         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3422                 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3423                 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3424                 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3425                 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3426         }
3427 }
3428
3429 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3430 {
3431         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3432
3433         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3434                 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3435                 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3436                 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3437                 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3438         }
3439
3440         __set_bit(VCPU_EXREG_PDPTR,
3441                   (unsigned long *)&vcpu->arch.regs_avail);
3442         __set_bit(VCPU_EXREG_PDPTR,
3443                   (unsigned long *)&vcpu->arch.regs_dirty);
3444 }
3445
3446 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3447
3448 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3449                                         unsigned long cr0,
3450                                         struct kvm_vcpu *vcpu)
3451 {
3452         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3453                 vmx_decache_cr3(vcpu);
3454         if (!(cr0 & X86_CR0_PG)) {
3455                 /* From paging/starting to nonpaging */
3456                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3457                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3458                              (CPU_BASED_CR3_LOAD_EXITING |
3459                               CPU_BASED_CR3_STORE_EXITING));
3460                 vcpu->arch.cr0 = cr0;
3461                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3462         } else if (!is_paging(vcpu)) {
3463                 /* From nonpaging to paging */
3464                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3465                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3466                              ~(CPU_BASED_CR3_LOAD_EXITING |
3467                                CPU_BASED_CR3_STORE_EXITING));
3468                 vcpu->arch.cr0 = cr0;
3469                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3470         }
3471
3472         if (!(cr0 & X86_CR0_WP))
3473                 *hw_cr0 &= ~X86_CR0_WP;
3474 }
3475
3476 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3477 {
3478         struct vcpu_vmx *vmx = to_vmx(vcpu);
3479         unsigned long hw_cr0;
3480
3481         hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3482         if (enable_unrestricted_guest)
3483                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3484         else {
3485                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3486
3487                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3488                         enter_pmode(vcpu);
3489
3490                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3491                         enter_rmode(vcpu);
3492         }
3493
3494 #ifdef CONFIG_X86_64
3495         if (vcpu->arch.efer & EFER_LME) {
3496                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3497                         enter_lmode(vcpu);
3498                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3499                         exit_lmode(vcpu);
3500         }
3501 #endif
3502
3503         if (enable_ept)
3504                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3505
3506         if (!vcpu->fpu_active)
3507                 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3508
3509         vmcs_writel(CR0_READ_SHADOW, cr0);
3510         vmcs_writel(GUEST_CR0, hw_cr0);
3511         vcpu->arch.cr0 = cr0;
3512
3513         /* depends on vcpu->arch.cr0 to be set to a new value */
3514         vmx->emulation_required = emulation_required(vcpu);
3515 }
3516
3517 static u64 construct_eptp(unsigned long root_hpa)
3518 {
3519         u64 eptp;
3520
3521         /* TODO write the value reading from MSR */
3522         eptp = VMX_EPT_DEFAULT_MT |
3523                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3524         if (enable_ept_ad_bits)
3525                 eptp |= VMX_EPT_AD_ENABLE_BIT;
3526         eptp |= (root_hpa & PAGE_MASK);
3527
3528         return eptp;
3529 }
3530
3531 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3532 {
3533         unsigned long guest_cr3;
3534         u64 eptp;
3535
3536         guest_cr3 = cr3;
3537         if (enable_ept) {
3538                 eptp = construct_eptp(cr3);
3539                 vmcs_write64(EPT_POINTER, eptp);
3540                 if (is_paging(vcpu) || is_guest_mode(vcpu))
3541                         guest_cr3 = kvm_read_cr3(vcpu);
3542                 else
3543                         guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
3544                 ept_load_pdptrs(vcpu);
3545         }
3546
3547         vmx_flush_tlb(vcpu);
3548         vmcs_writel(GUEST_CR3, guest_cr3);
3549 }
3550
3551 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3552 {
3553         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
3554                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3555
3556         if (cr4 & X86_CR4_VMXE) {
3557                 /*
3558                  * To use VMXON (and later other VMX instructions), a guest
3559                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
3560                  * So basically the check on whether to allow nested VMX
3561                  * is here.
3562                  */
3563                 if (!nested_vmx_allowed(vcpu))
3564                         return 1;
3565         }
3566         if (to_vmx(vcpu)->nested.vmxon &&
3567             ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3568                 return 1;
3569
3570         vcpu->arch.cr4 = cr4;
3571         if (enable_ept) {
3572                 if (!is_paging(vcpu)) {
3573                         hw_cr4 &= ~X86_CR4_PAE;
3574                         hw_cr4 |= X86_CR4_PSE;
3575                         /*
3576                          * SMEP/SMAP is disabled if CPU is in non-paging mode
3577                          * in hardware. However KVM always uses paging mode to
3578                          * emulate guest non-paging mode with TDP.
3579                          * To emulate this behavior, SMEP/SMAP needs to be
3580                          * manually disabled when guest switches to non-paging
3581                          * mode.
3582                          */
3583                         hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
3584                 } else if (!(cr4 & X86_CR4_PAE)) {
3585                         hw_cr4 &= ~X86_CR4_PAE;
3586                 }
3587         }
3588
3589         vmcs_writel(CR4_READ_SHADOW, cr4);
3590         vmcs_writel(GUEST_CR4, hw_cr4);
3591         return 0;
3592 }
3593
3594 static void vmx_get_segment(struct kvm_vcpu *vcpu,
3595                             struct kvm_segment *var, int seg)
3596 {
3597         struct vcpu_vmx *vmx = to_vmx(vcpu);
3598         u32 ar;
3599
3600         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3601                 *var = vmx->rmode.segs[seg];
3602                 if (seg == VCPU_SREG_TR
3603                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3604                         return;
3605                 var->base = vmx_read_guest_seg_base(vmx, seg);
3606                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3607                 return;
3608         }
3609         var->base = vmx_read_guest_seg_base(vmx, seg);
3610         var->limit = vmx_read_guest_seg_limit(vmx, seg);
3611         var->selector = vmx_read_guest_seg_selector(vmx, seg);
3612         ar = vmx_read_guest_seg_ar(vmx, seg);
3613         var->unusable = (ar >> 16) & 1;
3614         var->type = ar & 15;
3615         var->s = (ar >> 4) & 1;
3616         var->dpl = (ar >> 5) & 3;
3617         /*
3618          * Some userspaces do not preserve unusable property. Since usable
3619          * segment has to be present according to VMX spec we can use present
3620          * property to amend userspace bug by making unusable segment always
3621          * nonpresent. vmx_segment_access_rights() already marks nonpresent
3622          * segment as unusable.
3623          */
3624         var->present = !var->unusable;
3625         var->avl = (ar >> 12) & 1;
3626         var->l = (ar >> 13) & 1;
3627         var->db = (ar >> 14) & 1;
3628         var->g = (ar >> 15) & 1;
3629 }
3630
3631 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3632 {
3633         struct kvm_segment s;
3634
3635         if (to_vmx(vcpu)->rmode.vm86_active) {
3636                 vmx_get_segment(vcpu, &s, seg);
3637                 return s.base;
3638         }
3639         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3640 }
3641
3642 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3643 {
3644         struct vcpu_vmx *vmx = to_vmx(vcpu);
3645
3646         if (unlikely(vmx->rmode.vm86_active))
3647                 return 0;
3648         else {
3649                 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3650                 return AR_DPL(ar);
3651         }
3652 }
3653
3654 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3655 {
3656         u32 ar;
3657
3658         if (var->unusable || !var->present)
3659                 ar = 1 << 16;
3660         else {
3661                 ar = var->type & 15;
3662                 ar |= (var->s & 1) << 4;
3663                 ar |= (var->dpl & 3) << 5;
3664                 ar |= (var->present & 1) << 7;
3665                 ar |= (var->avl & 1) << 12;
3666                 ar |= (var->l & 1) << 13;
3667                 ar |= (var->db & 1) << 14;
3668                 ar |= (var->g & 1) << 15;
3669         }
3670
3671         return ar;
3672 }
3673
3674 static void vmx_set_segment(struct kvm_vcpu *vcpu,
3675                             struct kvm_segment *var, int seg)
3676 {
3677         struct vcpu_vmx *vmx = to_vmx(vcpu);
3678         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3679
3680         vmx_segment_cache_clear(vmx);
3681
3682         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3683                 vmx->rmode.segs[seg] = *var;
3684                 if (seg == VCPU_SREG_TR)
3685                         vmcs_write16(sf->selector, var->selector);
3686                 else if (var->s)
3687                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3688                 goto out;
3689         }
3690
3691         vmcs_writel(sf->base, var->base);
3692         vmcs_write32(sf->limit, var->limit);
3693         vmcs_write16(sf->selector, var->selector);
3694
3695         /*
3696          *   Fix the "Accessed" bit in AR field of segment registers for older
3697          * qemu binaries.
3698          *   IA32 arch specifies that at the time of processor reset the
3699          * "Accessed" bit in the AR field of segment registers is 1. And qemu
3700          * is setting it to 0 in the userland code. This causes invalid guest
3701          * state vmexit when "unrestricted guest" mode is turned on.
3702          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3703          * tree. Newer qemu binaries with that qemu fix would not need this
3704          * kvm hack.
3705          */
3706         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3707                 var->type |= 0x1; /* Accessed */
3708
3709         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3710
3711 out:
3712         vmx->emulation_required = emulation_required(vcpu);
3713 }
3714
3715 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3716 {
3717         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3718
3719         *db = (ar >> 14) & 1;
3720         *l = (ar >> 13) & 1;
3721 }
3722
3723 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3724 {
3725         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3726         dt->address = vmcs_readl(GUEST_IDTR_BASE);
3727 }
3728
3729 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3730 {
3731         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3732         vmcs_writel(GUEST_IDTR_BASE, dt->address);
3733 }
3734
3735 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3736 {
3737         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3738         dt->address = vmcs_readl(GUEST_GDTR_BASE);
3739 }
3740
3741 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3742 {
3743         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3744         vmcs_writel(GUEST_GDTR_BASE, dt->address);
3745 }
3746
3747 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3748 {
3749         struct kvm_segment var;
3750         u32 ar;
3751
3752         vmx_get_segment(vcpu, &var, seg);
3753         var.dpl = 0x3;
3754         if (seg == VCPU_SREG_CS)
3755                 var.type = 0x3;
3756         ar = vmx_segment_access_rights(&var);
3757
3758         if (var.base != (var.selector << 4))
3759                 return false;
3760         if (var.limit != 0xffff)
3761                 return false;
3762         if (ar != 0xf3)
3763                 return false;
3764
3765         return true;
3766 }
3767
3768 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3769 {
3770         struct kvm_segment cs;
3771         unsigned int cs_rpl;
3772
3773         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3774         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3775
3776         if (cs.unusable)
3777                 return false;
3778         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3779                 return false;
3780         if (!cs.s)
3781                 return false;
3782         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
3783                 if (cs.dpl > cs_rpl)
3784                         return false;
3785         } else {
3786                 if (cs.dpl != cs_rpl)
3787                         return false;
3788         }
3789         if (!cs.present)
3790                 return false;
3791
3792         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3793         return true;
3794 }
3795
3796 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3797 {
3798         struct kvm_segment ss;
3799         unsigned int ss_rpl;
3800
3801         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3802         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3803
3804         if (ss.unusable)
3805                 return true;
3806         if (ss.type != 3 && ss.type != 7)
3807                 return false;
3808         if (!ss.s)
3809                 return false;
3810         if (ss.dpl != ss_rpl) /* DPL != RPL */
3811                 return false;
3812         if (!ss.present)
3813                 return false;
3814
3815         return true;
3816 }
3817
3818 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3819 {
3820         struct kvm_segment var;
3821         unsigned int rpl;
3822
3823         vmx_get_segment(vcpu, &var, seg);
3824         rpl = var.selector & SELECTOR_RPL_MASK;
3825
3826         if (var.unusable)
3827                 return true;
3828         if (!var.s)
3829                 return false;
3830         if (!var.present)
3831                 return false;
3832         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3833                 if (var.dpl < rpl) /* DPL < RPL */
3834                         return false;
3835         }
3836
3837         /* TODO: Add other members to kvm_segment_field to allow checking for other access
3838          * rights flags
3839          */
3840         return true;
3841 }
3842
3843 static bool tr_valid(struct kvm_vcpu *vcpu)
3844 {
3845         struct kvm_segment tr;
3846
3847         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3848
3849         if (tr.unusable)
3850                 return false;
3851         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
3852                 return false;
3853         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3854                 return false;
3855         if (!tr.present)
3856                 return false;
3857
3858         return true;
3859 }
3860
3861 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3862 {
3863         struct kvm_segment ldtr;
3864
3865         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3866
3867         if (ldtr.unusable)
3868                 return true;
3869         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
3870                 return false;
3871         if (ldtr.type != 2)
3872                 return false;
3873         if (!ldtr.present)
3874                 return false;
3875
3876         return true;
3877 }
3878
3879 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3880 {
3881         struct kvm_segment cs, ss;
3882
3883         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3884         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3885
3886         return ((cs.selector & SELECTOR_RPL_MASK) ==
3887                  (ss.selector & SELECTOR_RPL_MASK));
3888 }
3889
3890 /*
3891  * Check if guest state is valid. Returns true if valid, false if
3892  * not.
3893  * We assume that registers are always usable
3894  */
3895 static bool guest_state_valid(struct kvm_vcpu *vcpu)
3896 {
3897         if (enable_unrestricted_guest)
3898                 return true;
3899
3900         /* real mode guest state checks */
3901         if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3902                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3903                         return false;
3904                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3905                         return false;
3906                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3907                         return false;
3908                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3909                         return false;
3910                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3911                         return false;
3912                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3913                         return false;
3914         } else {
3915         /* protected mode guest state checks */
3916                 if (!cs_ss_rpl_check(vcpu))
3917                         return false;
3918                 if (!code_segment_valid(vcpu))
3919                         return false;
3920                 if (!stack_segment_valid(vcpu))
3921                         return false;
3922                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3923                         return false;
3924                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3925                         return false;
3926                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3927                         return false;
3928                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3929                         return false;
3930                 if (!tr_valid(vcpu))
3931                         return false;
3932                 if (!ldtr_valid(vcpu))
3933                         return false;
3934         }
3935         /* TODO:
3936          * - Add checks on RIP
3937          * - Add checks on RFLAGS
3938          */
3939
3940         return true;
3941 }
3942
3943 static int init_rmode_tss(struct kvm *kvm)
3944 {
3945         gfn_t fn;
3946         u16 data = 0;
3947         int idx, r;
3948
3949         idx = srcu_read_lock(&kvm->srcu);
3950         fn = kvm->arch.tss_addr >> PAGE_SHIFT;
3951         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3952         if (r < 0)
3953                 goto out;
3954         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3955         r = kvm_write_guest_page(kvm, fn++, &data,
3956                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
3957         if (r < 0)
3958                 goto out;
3959         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3960         if (r < 0)
3961                 goto out;
3962         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3963         if (r < 0)
3964                 goto out;
3965         data = ~0;
3966         r = kvm_write_guest_page(kvm, fn, &data,
3967                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3968                                  sizeof(u8));
3969 out:
3970         srcu_read_unlock(&kvm->srcu, idx);
3971         return r;
3972 }
3973
3974 static int init_rmode_identity_map(struct kvm *kvm)
3975 {
3976         int i, idx, r = 0;
3977         pfn_t identity_map_pfn;
3978         u32 tmp;
3979
3980         if (!enable_ept)
3981                 return 0;
3982
3983         /* Protect kvm->arch.ept_identity_pagetable_done. */
3984         mutex_lock(&kvm->slots_lock);
3985
3986         if (likely(kvm->arch.ept_identity_pagetable_done))
3987                 goto out2;
3988
3989         identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
3990
3991         r = alloc_identity_pagetable(kvm);
3992         if (r < 0)
3993                 goto out2;
3994
3995         idx = srcu_read_lock(&kvm->srcu);
3996         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3997         if (r < 0)
3998                 goto out;
3999         /* Set up identity-mapping pagetable for EPT in real mode */
4000         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
4001                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
4002                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
4003                 r = kvm_write_guest_page(kvm, identity_map_pfn,
4004                                 &tmp, i * sizeof(tmp), sizeof(tmp));
4005                 if (r < 0)
4006                         goto out;
4007         }
4008         kvm->arch.ept_identity_pagetable_done = true;
4009
4010 out:
4011         srcu_read_unlock(&kvm->srcu, idx);
4012
4013 out2:
4014         mutex_unlock(&kvm->slots_lock);
4015         return r;
4016 }
4017
4018 static void seg_setup(int seg)
4019 {
4020         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4021         unsigned int ar;
4022
4023         vmcs_write16(sf->selector, 0);
4024         vmcs_writel(sf->base, 0);
4025         vmcs_write32(sf->limit, 0xffff);
4026         ar = 0x93;
4027         if (seg == VCPU_SREG_CS)
4028                 ar |= 0x08; /* code segment */
4029
4030         vmcs_write32(sf->ar_bytes, ar);
4031 }
4032
4033 static int alloc_apic_access_page(struct kvm *kvm)
4034 {
4035         struct page *page;
4036         struct kvm_userspace_memory_region kvm_userspace_mem;
4037         int r = 0;
4038
4039         mutex_lock(&kvm->slots_lock);
4040         if (kvm->arch.apic_access_page_done)
4041                 goto out;
4042         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
4043         kvm_userspace_mem.flags = 0;
4044         kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
4045         kvm_userspace_mem.memory_size = PAGE_SIZE;
4046         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
4047         if (r)
4048                 goto out;
4049
4050         page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
4051         if (is_error_page(page)) {
4052                 r = -EFAULT;
4053                 goto out;
4054         }
4055
4056         /*
4057          * Do not pin the page in memory, so that memory hot-unplug
4058          * is able to migrate it.
4059          */
4060         put_page(page);
4061         kvm->arch.apic_access_page_done = true;
4062 out:
4063         mutex_unlock(&kvm->slots_lock);
4064         return r;
4065 }
4066
4067 static int alloc_identity_pagetable(struct kvm *kvm)
4068 {
4069         /* Called with kvm->slots_lock held. */
4070
4071         struct kvm_userspace_memory_region kvm_userspace_mem;
4072         int r = 0;
4073
4074         BUG_ON(kvm->arch.ept_identity_pagetable_done);
4075
4076         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
4077         kvm_userspace_mem.flags = 0;
4078         kvm_userspace_mem.guest_phys_addr =
4079                 kvm->arch.ept_identity_map_addr;
4080         kvm_userspace_mem.memory_size = PAGE_SIZE;
4081         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
4082
4083         return r;
4084 }
4085
4086 static void allocate_vpid(struct vcpu_vmx *vmx)
4087 {
4088         int vpid;
4089
4090         vmx->vpid = 0;
4091         if (!enable_vpid)
4092                 return;
4093         spin_lock(&vmx_vpid_lock);
4094         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
4095         if (vpid < VMX_NR_VPIDS) {
4096                 vmx->vpid = vpid;
4097                 __set_bit(vpid, vmx_vpid_bitmap);
4098         }
4099         spin_unlock(&vmx_vpid_lock);
4100 }
4101
4102 static void free_vpid(struct vcpu_vmx *vmx)
4103 {
4104         if (!enable_vpid)
4105                 return;
4106         spin_lock(&vmx_vpid_lock);
4107         if (vmx->vpid != 0)
4108                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
4109         spin_unlock(&vmx_vpid_lock);
4110 }
4111
4112 #define MSR_TYPE_R      1
4113 #define MSR_TYPE_W      2
4114 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
4115                                                 u32 msr, int type)
4116 {
4117         int f = sizeof(unsigned long);
4118
4119         if (!cpu_has_vmx_msr_bitmap())
4120                 return;
4121
4122         /*
4123          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
4124          * have the write-low and read-high bitmap offsets the wrong way round.
4125          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
4126          */
4127         if (msr <= 0x1fff) {
4128                 if (type & MSR_TYPE_R)
4129                         /* read-low */
4130                         __clear_bit(msr, msr_bitmap + 0x000 / f);
4131
4132                 if (type & MSR_TYPE_W)
4133                         /* write-low */
4134                         __clear_bit(msr, msr_bitmap + 0x800 / f);
4135
4136         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4137                 msr &= 0x1fff;
4138                 if (type & MSR_TYPE_R)
4139                         /* read-high */
4140                         __clear_bit(msr, msr_bitmap + 0x400 / f);
4141
4142                 if (type & MSR_TYPE_W)
4143                         /* write-high */
4144                         __clear_bit(msr, msr_bitmap + 0xc00 / f);
4145
4146         }
4147 }
4148
4149 static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
4150                                                 u32 msr, int type)
4151 {
4152         int f = sizeof(unsigned long);
4153
4154         if (!cpu_has_vmx_msr_bitmap())
4155                 return;
4156
4157         /*
4158          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
4159          * have the write-low and read-high bitmap offsets the wrong way round.
4160          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
4161          */
4162         if (msr <= 0x1fff) {
4163                 if (type & MSR_TYPE_R)
4164                         /* read-low */
4165                         __set_bit(msr, msr_bitmap + 0x000 / f);
4166
4167                 if (type & MSR_TYPE_W)
4168                         /* write-low */
4169                         __set_bit(msr, msr_bitmap + 0x800 / f);
4170
4171         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4172                 msr &= 0x1fff;
4173                 if (type & MSR_TYPE_R)
4174                         /* read-high */
4175                         __set_bit(msr, msr_bitmap + 0x400 / f);
4176
4177                 if (type & MSR_TYPE_W)
4178                         /* write-high */
4179                         __set_bit(msr, msr_bitmap + 0xc00 / f);
4180
4181         }
4182 }
4183
4184 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
4185 {
4186         if (!longmode_only)
4187                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
4188                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
4189         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
4190                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
4191 }
4192
4193 static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
4194 {
4195         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4196                         msr, MSR_TYPE_R);
4197         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4198                         msr, MSR_TYPE_R);
4199 }
4200
4201 static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
4202 {
4203         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4204                         msr, MSR_TYPE_R);
4205         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4206                         msr, MSR_TYPE_R);
4207 }
4208
4209 static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4210 {
4211         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4212                         msr, MSR_TYPE_W);
4213         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4214                         msr, MSR_TYPE_W);
4215 }
4216
4217 static int vmx_vm_has_apicv(struct kvm *kvm)
4218 {
4219         return enable_apicv && irqchip_in_kernel(kvm);
4220 }
4221
4222 /*
4223  * Send interrupt to vcpu via posted interrupt way.
4224  * 1. If target vcpu is running(non-root mode), send posted interrupt
4225  * notification to vcpu and hardware will sync PIR to vIRR atomically.
4226  * 2. If target vcpu isn't running(root mode), kick it to pick up the
4227  * interrupt from PIR in next vmentry.
4228  */
4229 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4230 {
4231         struct vcpu_vmx *vmx = to_vmx(vcpu);
4232         int r;
4233
4234         if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4235                 return;
4236
4237         r = pi_test_and_set_on(&vmx->pi_desc);
4238         kvm_make_request(KVM_REQ_EVENT, vcpu);
4239 #ifdef CONFIG_SMP
4240         if (!r && (vcpu->mode == IN_GUEST_MODE))
4241                 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4242                                 POSTED_INTR_VECTOR);
4243         else
4244 #endif
4245                 kvm_vcpu_kick(vcpu);
4246 }
4247
4248 static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
4249 {
4250         struct vcpu_vmx *vmx = to_vmx(vcpu);
4251
4252         if (!pi_test_and_clear_on(&vmx->pi_desc))
4253                 return;
4254
4255         kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
4256 }
4257
4258 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
4259 {
4260         return;
4261 }
4262
4263 /*
4264  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4265  * will not change in the lifetime of the guest.
4266  * Note that host-state that does change is set elsewhere. E.g., host-state
4267  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4268  */
4269 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4270 {
4271         u32 low32, high32;
4272         unsigned long tmpl;
4273         struct desc_ptr dt;
4274         unsigned long cr4;
4275
4276         vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
4277         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
4278
4279         /* Save the most likely value for this task's CR4 in the VMCS. */
4280         cr4 = read_cr4();
4281         vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
4282         vmx->host_state.vmcs_host_cr4 = cr4;
4283
4284         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4285 #ifdef CONFIG_X86_64
4286         /*
4287          * Load null selectors, so we can avoid reloading them in
4288          * __vmx_load_host_state(), in case userspace uses the null selectors
4289          * too (the expected case).
4290          */
4291         vmcs_write16(HOST_DS_SELECTOR, 0);
4292         vmcs_write16(HOST_ES_SELECTOR, 0);
4293 #else
4294         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4295         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4296 #endif
4297         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4298         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4299
4300         native_store_idt(&dt);
4301         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
4302         vmx->host_idt_base = dt.address;
4303
4304         vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
4305
4306         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4307         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4308         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4309         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4310
4311         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4312                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4313                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4314         }
4315 }
4316
4317 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4318 {
4319         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
4320         if (enable_ept)
4321                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
4322         if (is_guest_mode(&vmx->vcpu))
4323                 vmx->vcpu.arch.cr4_guest_owned_bits &=
4324                         ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
4325         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
4326 }
4327
4328 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4329 {
4330         u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4331
4332         if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4333                 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4334         return pin_based_exec_ctrl;
4335 }
4336
4337 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4338 {
4339         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4340
4341         if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4342                 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4343
4344         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
4345                 exec_control &= ~CPU_BASED_TPR_SHADOW;
4346 #ifdef CONFIG_X86_64
4347                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4348                                 CPU_BASED_CR8_LOAD_EXITING;
4349 #endif
4350         }
4351         if (!enable_ept)
4352                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
4353                                 CPU_BASED_CR3_LOAD_EXITING  |
4354                                 CPU_BASED_INVLPG_EXITING;
4355         return exec_control;
4356 }
4357
4358 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4359 {
4360         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4361         if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4362                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4363         if (vmx->vpid == 0)
4364                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4365         if (!enable_ept) {
4366                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4367                 enable_unrestricted_guest = 0;
4368                 /* Enable INVPCID for non-ept guests may cause performance regression. */
4369                 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
4370         }
4371         if (!enable_unrestricted_guest)
4372                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4373         if (!ple_gap)
4374                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4375         if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4376                 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4377                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4378         exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4379         /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4380            (handle_vmptrld).
4381            We can NOT enable shadow_vmcs here because we don't have yet
4382            a current VMCS12
4383         */
4384         exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4385         return exec_control;
4386 }
4387
4388 static void ept_set_mmio_spte_mask(void)
4389 {
4390         /*
4391          * EPT Misconfigurations can be generated if the value of bits 2:0
4392          * of an EPT paging-structure entry is 110b (write/execute).
4393          * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
4394          * spte.
4395          */
4396         kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
4397 }
4398
4399 /*
4400  * Sets up the vmcs for emulated real mode.
4401  */
4402 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4403 {
4404 #ifdef CONFIG_X86_64
4405         unsigned long a;
4406 #endif
4407         int i;
4408
4409         /* I/O */
4410         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
4411         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
4412
4413         if (enable_shadow_vmcs) {
4414                 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
4415                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
4416         }
4417         if (cpu_has_vmx_msr_bitmap())
4418                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
4419
4420         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4421
4422         /* Control */
4423         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4424
4425         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
4426
4427         if (cpu_has_secondary_exec_ctrls()) {
4428                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4429                                 vmx_secondary_exec_control(vmx));
4430         }
4431
4432         if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
4433                 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4434                 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4435                 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4436                 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4437
4438                 vmcs_write16(GUEST_INTR_STATUS, 0);
4439
4440                 vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4441                 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4442         }
4443
4444         if (ple_gap) {
4445                 vmcs_write32(PLE_GAP, ple_gap);
4446                 vmx->ple_window = ple_window;
4447                 vmx->ple_window_dirty = true;
4448         }
4449
4450         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4451         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4452         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4453
4454         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4455         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4456         vmx_set_constant_host_state(vmx);
4457 #ifdef CONFIG_X86_64
4458         rdmsrl(MSR_FS_BASE, a);
4459         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
4460         rdmsrl(MSR_GS_BASE, a);
4461         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
4462 #else
4463         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4464         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4465 #endif
4466
4467         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4468         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4469         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
4470         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4471         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
4472
4473         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
4474                 u32 msr_low, msr_high;
4475                 u64 host_pat;
4476                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
4477                 host_pat = msr_low | ((u64) msr_high << 32);
4478                 /* Write the default value follow host pat */
4479                 vmcs_write64(GUEST_IA32_PAT, host_pat);
4480                 /* Keep arch.pat sync with GUEST_IA32_PAT */
4481                 vmx->vcpu.arch.pat = host_pat;
4482         }
4483
4484         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
4485                 u32 index = vmx_msr_index[i];
4486                 u32 data_low, data_high;
4487                 int j = vmx->nmsrs;
4488
4489                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
4490                         continue;
4491                 if (wrmsr_safe(index, data_low, data_high) < 0)
4492                         continue;
4493                 vmx->guest_msrs[j].index = i;
4494                 vmx->guest_msrs[j].data = 0;
4495                 vmx->guest_msrs[j].mask = -1ull;
4496                 ++vmx->nmsrs;
4497         }
4498
4499
4500         vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
4501
4502         /* 22.2.1, 20.8.1 */
4503         vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
4504
4505         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
4506         set_cr4_guest_host_mask(vmx);
4507
4508         return 0;
4509 }
4510
4511 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4512 {
4513         struct vcpu_vmx *vmx = to_vmx(vcpu);
4514         struct msr_data apic_base_msr;
4515
4516         vmx->rmode.vm86_active = 0;
4517
4518         vmx->soft_vnmi_blocked = 0;
4519
4520         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4521         kvm_set_cr8(&vmx->vcpu, 0);
4522         apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
4523         if (kvm_vcpu_is_bsp(&vmx->vcpu))
4524                 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4525         apic_base_msr.host_initiated = true;
4526         kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
4527
4528         vmx_segment_cache_clear(vmx);
4529
4530         seg_setup(VCPU_SREG_CS);
4531         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4532         vmcs_write32(GUEST_CS_BASE, 0xffff0000);
4533
4534         seg_setup(VCPU_SREG_DS);
4535         seg_setup(VCPU_SREG_ES);
4536         seg_setup(VCPU_SREG_FS);
4537         seg_setup(VCPU_SREG_GS);
4538         seg_setup(VCPU_SREG_SS);
4539
4540         vmcs_write16(GUEST_TR_SELECTOR, 0);
4541         vmcs_writel(GUEST_TR_BASE, 0);
4542         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4543         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4544
4545         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4546         vmcs_writel(GUEST_LDTR_BASE, 0);
4547         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4548         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4549
4550         vmcs_write32(GUEST_SYSENTER_CS, 0);
4551         vmcs_writel(GUEST_SYSENTER_ESP, 0);
4552         vmcs_writel(GUEST_SYSENTER_EIP, 0);
4553
4554         vmcs_writel(GUEST_RFLAGS, 0x02);
4555         kvm_rip_write(vcpu, 0xfff0);
4556
4557         vmcs_writel(GUEST_GDTR_BASE, 0);
4558         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4559
4560         vmcs_writel(GUEST_IDTR_BASE, 0);
4561         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4562
4563         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4564         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4565         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4566
4567         /* Special registers */
4568         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4569
4570         setup_msrs(vmx);
4571
4572         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
4573
4574         if (cpu_has_vmx_tpr_shadow()) {
4575                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4576                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
4577                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4578                                      __pa(vmx->vcpu.arch.apic->regs));
4579                 vmcs_write32(TPR_THRESHOLD, 0);
4580         }
4581
4582         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4583
4584         if (vmx_vm_has_apicv(vcpu->kvm))
4585                 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
4586
4587         if (vmx->vpid != 0)
4588                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4589
4590         vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
4591         vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
4592         vmx_set_cr4(&vmx->vcpu, 0);
4593         vmx_set_efer(&vmx->vcpu, 0);
4594         vmx_fpu_activate(&vmx->vcpu);
4595         update_exception_bitmap(&vmx->vcpu);
4596
4597         vpid_sync_context(vmx);
4598 }
4599
4600 /*
4601  * In nested virtualization, check if L1 asked to exit on external interrupts.
4602  * For most existing hypervisors, this will always return true.
4603  */
4604 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
4605 {
4606         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4607                 PIN_BASED_EXT_INTR_MASK;
4608 }
4609
4610 /*
4611  * In nested virtualization, check if L1 has set
4612  * VM_EXIT_ACK_INTR_ON_EXIT
4613  */
4614 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
4615 {
4616         return get_vmcs12(vcpu)->vm_exit_controls &
4617                 VM_EXIT_ACK_INTR_ON_EXIT;
4618 }
4619
4620 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
4621 {
4622         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4623                 PIN_BASED_NMI_EXITING;
4624 }
4625
4626 static void enable_irq_window(struct kvm_vcpu *vcpu)
4627 {
4628         u32 cpu_based_vm_exec_control;
4629
4630         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4631         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
4632         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4633 }
4634
4635 static void enable_nmi_window(struct kvm_vcpu *vcpu)
4636 {
4637         u32 cpu_based_vm_exec_control;
4638
4639         if (!cpu_has_virtual_nmis() ||
4640             vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4641                 enable_irq_window(vcpu);
4642                 return;
4643         }
4644
4645         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4646         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
4647         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4648 }
4649
4650 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
4651 {
4652         struct vcpu_vmx *vmx = to_vmx(vcpu);
4653         uint32_t intr;
4654         int irq = vcpu->arch.interrupt.nr;
4655
4656         trace_kvm_inj_virq(irq);
4657
4658         ++vcpu->stat.irq_injections;
4659         if (vmx->rmode.vm86_active) {
4660                 int inc_eip = 0;
4661                 if (vcpu->arch.interrupt.soft)
4662                         inc_eip = vcpu->arch.event_exit_inst_len;
4663                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
4664                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4665                 return;
4666         }
4667         intr = irq | INTR_INFO_VALID_MASK;
4668         if (vcpu->arch.interrupt.soft) {
4669                 intr |= INTR_TYPE_SOFT_INTR;
4670                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4671                              vmx->vcpu.arch.event_exit_inst_len);
4672         } else
4673                 intr |= INTR_TYPE_EXT_INTR;
4674         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4675 }
4676
4677 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4678 {
4679         struct vcpu_vmx *vmx = to_vmx(vcpu);
4680
4681         if (is_guest_mode(vcpu))
4682                 return;
4683
4684         if (!cpu_has_virtual_nmis()) {
4685                 /*
4686                  * Tracking the NMI-blocked state in software is built upon
4687                  * finding the next open IRQ window. This, in turn, depends on
4688                  * well-behaving guests: They have to keep IRQs disabled at
4689                  * least as long as the NMI handler runs. Otherwise we may
4690                  * cause NMI nesting, maybe breaking the guest. But as this is
4691                  * highly unlikely, we can live with the residual risk.
4692                  */
4693                 vmx->soft_vnmi_blocked = 1;
4694                 vmx->vnmi_blocked_time = 0;
4695         }
4696
4697         ++vcpu->stat.nmi_injections;
4698         vmx->nmi_known_unmasked = false;
4699         if (vmx->rmode.vm86_active) {
4700                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
4701                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4702                 return;
4703         }
4704         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4705                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4706 }
4707
4708 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4709 {
4710         if (!cpu_has_virtual_nmis())
4711                 return to_vmx(vcpu)->soft_vnmi_blocked;
4712         if (to_vmx(vcpu)->nmi_known_unmasked)
4713                 return false;
4714         return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
4715 }
4716
4717 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4718 {
4719         struct vcpu_vmx *vmx = to_vmx(vcpu);
4720
4721         if (!cpu_has_virtual_nmis()) {
4722                 if (vmx->soft_vnmi_blocked != masked) {
4723                         vmx->soft_vnmi_blocked = masked;
4724                         vmx->vnmi_blocked_time = 0;
4725                 }
4726         } else {
4727                 vmx->nmi_known_unmasked = !masked;
4728                 if (masked)
4729                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4730                                       GUEST_INTR_STATE_NMI);
4731                 else
4732                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4733                                         GUEST_INTR_STATE_NMI);
4734         }
4735 }
4736
4737 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4738 {
4739         if (to_vmx(vcpu)->nested.nested_run_pending)
4740                 return 0;
4741
4742         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
4743                 return 0;
4744
4745         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4746                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
4747                    | GUEST_INTR_STATE_NMI));
4748 }
4749
4750 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4751 {
4752         return (!to_vmx(vcpu)->nested.nested_run_pending &&
4753                 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4754                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4755                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
4756 }
4757
4758 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4759 {
4760         int ret;
4761         struct kvm_userspace_memory_region tss_mem = {
4762                 .slot = TSS_PRIVATE_MEMSLOT,
4763                 .guest_phys_addr = addr,
4764                 .memory_size = PAGE_SIZE * 3,
4765                 .flags = 0,
4766         };
4767
4768         ret = kvm_set_memory_region(kvm, &tss_mem);
4769         if (ret)
4770                 return ret;
4771         kvm->arch.tss_addr = addr;
4772         return init_rmode_tss(kvm);
4773 }
4774
4775 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
4776 {
4777         switch (vec) {
4778         case BP_VECTOR:
4779                 /*
4780                  * Update instruction length as we may reinject the exception
4781                  * from user space while in guest debugging mode.
4782                  */
4783                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4784                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4785                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
4786                         return false;
4787                 /* fall through */
4788         case DB_VECTOR:
4789                 if (vcpu->guest_debug &
4790                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
4791                         return false;
4792                 /* fall through */
4793         case DE_VECTOR:
4794         case OF_VECTOR:
4795         case BR_VECTOR:
4796         case UD_VECTOR:
4797         case DF_VECTOR:
4798         case SS_VECTOR:
4799         case GP_VECTOR:
4800         case MF_VECTOR:
4801                 return true;
4802         break;
4803         }
4804         return false;
4805 }
4806
4807 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4808                                   int vec, u32 err_code)
4809 {
4810         /*
4811          * Instruction with address size override prefix opcode 0x67
4812          * Cause the #SS fault with 0 error code in VM86 mode.
4813          */
4814         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4815                 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
4816                         if (vcpu->arch.halt_request) {
4817                                 vcpu->arch.halt_request = 0;
4818                                 return kvm_emulate_halt(vcpu);
4819                         }
4820                         return 1;
4821                 }
4822                 return 0;
4823         }
4824
4825         /*
4826          * Forward all other exceptions that are valid in real mode.
4827          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
4828          *        the required debugging infrastructure rework.
4829          */
4830         kvm_queue_exception(vcpu, vec);
4831         return 1;
4832 }
4833
4834 /*
4835  * Trigger machine check on the host. We assume all the MSRs are already set up
4836  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4837  * We pass a fake environment to the machine check handler because we want
4838  * the guest to be always treated like user space, no matter what context
4839  * it used internally.
4840  */
4841 static void kvm_machine_check(void)
4842 {
4843 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
4844         struct pt_regs regs = {
4845                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
4846                 .flags = X86_EFLAGS_IF,
4847         };
4848
4849         do_machine_check(&regs, 0);
4850 #endif
4851 }
4852
4853 static int handle_machine_check(struct kvm_vcpu *vcpu)
4854 {
4855         /* already handled by vcpu_run */
4856         return 1;
4857 }
4858
4859 static int handle_exception(struct kvm_vcpu *vcpu)
4860 {
4861         struct vcpu_vmx *vmx = to_vmx(vcpu);
4862         struct kvm_run *kvm_run = vcpu->run;
4863         u32 intr_info, ex_no, error_code;
4864         unsigned long cr2, rip, dr6;
4865         u32 vect_info;
4866         enum emulation_result er;
4867
4868         vect_info = vmx->idt_vectoring_info;
4869         intr_info = vmx->exit_intr_info;
4870
4871         if (is_machine_check(intr_info))
4872                 return handle_machine_check(vcpu);
4873
4874         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
4875                 return 1;  /* already handled by vmx_vcpu_run() */
4876
4877         if (is_no_device(intr_info)) {
4878                 vmx_fpu_activate(vcpu);
4879                 return 1;
4880         }
4881
4882         if (is_invalid_opcode(intr_info)) {
4883                 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
4884                 if (er != EMULATE_DONE)
4885                         kvm_queue_exception(vcpu, UD_VECTOR);
4886                 return 1;
4887         }
4888
4889         error_code = 0;
4890         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
4891                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
4892
4893         /*
4894          * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
4895          * MMIO, it is better to report an internal error.
4896          * See the comments in vmx_handle_exit.
4897          */
4898         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
4899             !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
4900                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4901                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
4902                 vcpu->run->internal.ndata = 2;
4903                 vcpu->run->internal.data[0] = vect_info;
4904                 vcpu->run->internal.data[1] = intr_info;
4905                 return 0;
4906         }
4907
4908         if (is_page_fault(intr_info)) {
4909                 /* EPT won't cause page fault directly */
4910                 BUG_ON(enable_ept);
4911                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
4912                 trace_kvm_page_fault(cr2, error_code);
4913
4914                 if (kvm_event_needs_reinjection(vcpu))
4915                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
4916                 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
4917         }
4918
4919         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
4920
4921         if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
4922                 return handle_rmode_exception(vcpu, ex_no, error_code);
4923
4924         switch (ex_no) {
4925         case DB_VECTOR:
4926                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
4927                 if (!(vcpu->guest_debug &
4928                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4929                         vcpu->arch.dr6 &= ~15;
4930                         vcpu->arch.dr6 |= dr6 | DR6_RTM;
4931                         if (!(dr6 & ~DR6_RESERVED)) /* icebp */
4932                                 skip_emulated_instruction(vcpu);
4933
4934                         kvm_queue_exception(vcpu, DB_VECTOR);
4935                         return 1;
4936                 }
4937                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4938                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4939                 /* fall through */
4940         case BP_VECTOR:
4941                 /*
4942                  * Update instruction length as we may reinject #BP from
4943                  * user space while in guest debugging mode. Reading it for
4944                  * #DB as well causes no harm, it is not used in that case.
4945                  */
4946                 vmx->vcpu.arch.event_exit_inst_len =
4947                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4948                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
4949                 rip = kvm_rip_read(vcpu);
4950                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4951                 kvm_run->debug.arch.exception = ex_no;
4952                 break;
4953         default:
4954                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4955                 kvm_run->ex.exception = ex_no;
4956                 kvm_run->ex.error_code = error_code;
4957                 break;
4958         }
4959         return 0;
4960 }
4961
4962 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
4963 {
4964         ++vcpu->stat.irq_exits;
4965         return 1;
4966 }
4967
4968 static int handle_triple_fault(struct kvm_vcpu *vcpu)
4969 {
4970         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4971         return 0;
4972 }
4973
4974 static int handle_io(struct kvm_vcpu *vcpu)
4975 {
4976         unsigned long exit_qualification;
4977         int size, in, string;
4978         unsigned port;
4979
4980         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4981         string = (exit_qualification & 16) != 0;
4982         in = (exit_qualification & 8) != 0;
4983
4984         ++vcpu->stat.io_exits;
4985
4986         if (string || in)
4987                 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4988
4989         port = exit_qualification >> 16;
4990         size = (exit_qualification & 7) + 1;
4991         skip_emulated_instruction(vcpu);
4992
4993         return kvm_fast_pio_out(vcpu, size, port);
4994 }
4995
4996 static void
4997 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4998 {
4999         /*
5000          * Patch in the VMCALL instruction:
5001          */
5002         hypercall[0] = 0x0f;
5003         hypercall[1] = 0x01;
5004         hypercall[2] = 0xc1;
5005 }
5006
5007 static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val)
5008 {
5009         unsigned long always_on = VMXON_CR0_ALWAYSON;
5010
5011         if (nested_vmx_secondary_ctls_high &
5012                 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5013             nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5014                 always_on &= ~(X86_CR0_PE | X86_CR0_PG);
5015         return (val & always_on) == always_on;
5016 }
5017
5018 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5019 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5020 {
5021         if (is_guest_mode(vcpu)) {
5022                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5023                 unsigned long orig_val = val;
5024
5025                 /*
5026                  * We get here when L2 changed cr0 in a way that did not change
5027                  * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5028                  * but did change L0 shadowed bits. So we first calculate the
5029                  * effective cr0 value that L1 would like to write into the
5030                  * hardware. It consists of the L2-owned bits from the new
5031                  * value combined with the L1-owned bits from L1's guest_cr0.
5032                  */
5033                 val = (val & ~vmcs12->cr0_guest_host_mask) |
5034                         (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5035
5036                 if (!nested_cr0_valid(vmcs12, val))
5037                         return 1;
5038
5039                 if (kvm_set_cr0(vcpu, val))
5040                         return 1;
5041                 vmcs_writel(CR0_READ_SHADOW, orig_val);
5042                 return 0;
5043         } else {
5044                 if (to_vmx(vcpu)->nested.vmxon &&
5045                     ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
5046                         return 1;
5047                 return kvm_set_cr0(vcpu, val);
5048         }
5049 }
5050
5051 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5052 {
5053         if (is_guest_mode(vcpu)) {
5054                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5055                 unsigned long orig_val = val;
5056
5057                 /* analogously to handle_set_cr0 */
5058                 val = (val & ~vmcs12->cr4_guest_host_mask) |
5059                         (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5060                 if (kvm_set_cr4(vcpu, val))
5061                         return 1;
5062                 vmcs_writel(CR4_READ_SHADOW, orig_val);
5063                 return 0;
5064         } else
5065                 return kvm_set_cr4(vcpu, val);
5066 }
5067
5068 /* called to set cr0 as approriate for clts instruction exit. */
5069 static void handle_clts(struct kvm_vcpu *vcpu)
5070 {
5071         if (is_guest_mode(vcpu)) {
5072                 /*
5073                  * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
5074                  * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
5075                  * just pretend it's off (also in arch.cr0 for fpu_activate).
5076                  */
5077                 vmcs_writel(CR0_READ_SHADOW,
5078                         vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
5079                 vcpu->arch.cr0 &= ~X86_CR0_TS;
5080         } else
5081                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
5082 }
5083
5084 static int handle_cr(struct kvm_vcpu *vcpu)
5085 {
5086         unsigned long exit_qualification, val;
5087         int cr;
5088         int reg;
5089         int err;
5090
5091         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5092         cr = exit_qualification & 15;
5093         reg = (exit_qualification >> 8) & 15;
5094         switch ((exit_qualification >> 4) & 3) {
5095         case 0: /* mov to cr */
5096                 val = kvm_register_readl(vcpu, reg);
5097                 trace_kvm_cr_write(cr, val);
5098                 switch (cr) {
5099                 case 0:
5100                         err = handle_set_cr0(vcpu, val);
5101                         kvm_complete_insn_gp(vcpu, err);
5102                         return 1;
5103                 case 3:
5104                         err = kvm_set_cr3(vcpu, val);
5105                         kvm_complete_insn_gp(vcpu, err);
5106                         return 1;
5107                 case 4:
5108                         err = handle_set_cr4(vcpu, val);
5109                         kvm_complete_insn_gp(vcpu, err);
5110                         return 1;
5111                 case 8: {
5112                                 u8 cr8_prev = kvm_get_cr8(vcpu);
5113                                 u8 cr8 = (u8)val;
5114                                 err = kvm_set_cr8(vcpu, cr8);
5115                                 kvm_complete_insn_gp(vcpu, err);
5116                                 if (irqchip_in_kernel(vcpu->kvm))
5117                                         return 1;
5118                                 if (cr8_prev <= cr8)
5119                                         return 1;
5120                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5121                                 return 0;
5122                         }
5123                 }
5124                 break;
5125         case 2: /* clts */
5126                 handle_clts(vcpu);
5127                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
5128                 skip_emulated_instruction(vcpu);
5129                 vmx_fpu_activate(vcpu);
5130                 return 1;
5131         case 1: /*mov from cr*/
5132                 switch (cr) {
5133                 case 3:
5134                         val = kvm_read_cr3(vcpu);
5135                         kvm_register_write(vcpu, reg, val);
5136                         trace_kvm_cr_read(cr, val);
5137                         skip_emulated_instruction(vcpu);
5138                         return 1;
5139                 case 8:
5140                         val = kvm_get_cr8(vcpu);
5141                         kvm_register_write(vcpu, reg, val);
5142                         trace_kvm_cr_read(cr, val);
5143                         skip_emulated_instruction(vcpu);
5144                         return 1;
5145                 }
5146                 break;
5147         case 3: /* lmsw */
5148                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5149                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5150                 kvm_lmsw(vcpu, val);
5151
5152                 skip_emulated_instruction(vcpu);
5153                 return 1;
5154         default:
5155                 break;
5156         }
5157         vcpu->run->exit_reason = 0;
5158         vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5159                (int)(exit_qualification >> 4) & 3, cr);
5160         return 0;
5161 }
5162
5163 static int handle_dr(struct kvm_vcpu *vcpu)
5164 {
5165         unsigned long exit_qualification;
5166         int dr, reg;
5167
5168         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
5169         if (!kvm_require_cpl(vcpu, 0))
5170                 return 1;
5171         dr = vmcs_readl(GUEST_DR7);
5172         if (dr & DR7_GD) {
5173                 /*
5174                  * As the vm-exit takes precedence over the debug trap, we
5175                  * need to emulate the latter, either for the host or the
5176                  * guest debugging itself.
5177                  */
5178                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5179                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
5180                         vcpu->run->debug.arch.dr7 = dr;
5181                         vcpu->run->debug.arch.pc =
5182                                 vmcs_readl(GUEST_CS_BASE) +
5183                                 vmcs_readl(GUEST_RIP);
5184                         vcpu->run->debug.arch.exception = DB_VECTOR;
5185                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5186                         return 0;
5187                 } else {
5188                         vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
5189                         kvm_queue_exception(vcpu, DB_VECTOR);
5190                         return 1;
5191                 }
5192         }
5193
5194         if (vcpu->guest_debug == 0) {
5195                 u32 cpu_based_vm_exec_control;
5196
5197                 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5198                 cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
5199                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5200
5201                 /*
5202                  * No more DR vmexits; force a reload of the debug registers
5203                  * and reenter on this instruction.  The next vmexit will
5204                  * retrieve the full state of the debug registers.
5205                  */
5206                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5207                 return 1;
5208         }
5209
5210         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5211         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5212         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5213         if (exit_qualification & TYPE_MOV_FROM_DR) {
5214                 unsigned long val;
5215
5216                 if (kvm_get_dr(vcpu, dr, &val))
5217                         return 1;
5218                 kvm_register_write(vcpu, reg, val);
5219         } else
5220                 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
5221                         return 1;
5222
5223         skip_emulated_instruction(vcpu);
5224         return 1;
5225 }
5226
5227 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
5228 {
5229         return vcpu->arch.dr6;
5230 }
5231
5232 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
5233 {
5234 }
5235
5236 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5237 {
5238         u32 cpu_based_vm_exec_control;
5239
5240         get_debugreg(vcpu->arch.db[0], 0);
5241         get_debugreg(vcpu->arch.db[1], 1);
5242         get_debugreg(vcpu->arch.db[2], 2);
5243         get_debugreg(vcpu->arch.db[3], 3);
5244         get_debugreg(vcpu->arch.dr6, 6);
5245         vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5246
5247         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5248
5249         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5250         cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
5251         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5252 }
5253
5254 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5255 {
5256         vmcs_writel(GUEST_DR7, val);
5257 }
5258
5259 static int handle_cpuid(struct kvm_vcpu *vcpu)
5260 {
5261         kvm_emulate_cpuid(vcpu);
5262         return 1;
5263 }
5264
5265 static int handle_rdmsr(struct kvm_vcpu *vcpu)
5266 {
5267         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5268         u64 data;
5269
5270         if (vmx_get_msr(vcpu, ecx, &data)) {
5271                 trace_kvm_msr_read_ex(ecx);
5272                 kvm_inject_gp(vcpu, 0);
5273                 return 1;
5274         }
5275
5276         trace_kvm_msr_read(ecx, data);
5277
5278         /* FIXME: handling of bits 32:63 of rax, rdx */
5279         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
5280         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
5281         skip_emulated_instruction(vcpu);
5282         return 1;
5283 }
5284
5285 static int handle_wrmsr(struct kvm_vcpu *vcpu)
5286 {
5287         struct msr_data msr;
5288         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5289         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
5290                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
5291
5292         msr.data = data;
5293         msr.index = ecx;
5294         msr.host_initiated = false;
5295         if (kvm_set_msr(vcpu, &msr) != 0) {
5296                 trace_kvm_msr_write_ex(ecx, data);
5297                 kvm_inject_gp(vcpu, 0);
5298                 return 1;
5299         }
5300
5301         trace_kvm_msr_write(ecx, data);
5302         skip_emulated_instruction(vcpu);
5303         return 1;
5304 }
5305
5306 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5307 {
5308         kvm_make_request(KVM_REQ_EVENT, vcpu);
5309         return 1;
5310 }
5311
5312 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5313 {
5314         u32 cpu_based_vm_exec_control;
5315
5316         /* clear pending irq */
5317         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5318         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
5319         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5320
5321         kvm_make_request(KVM_REQ_EVENT, vcpu);
5322
5323         ++vcpu->stat.irq_window_exits;
5324
5325         /*
5326          * If the user space waits to inject interrupts, exit as soon as
5327          * possible
5328          */
5329         if (!irqchip_in_kernel(vcpu->kvm) &&
5330             vcpu->run->request_interrupt_window &&
5331             !kvm_cpu_has_interrupt(vcpu)) {
5332                 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
5333                 return 0;
5334         }
5335         return 1;
5336 }
5337
5338 static int handle_halt(struct kvm_vcpu *vcpu)
5339 {
5340         skip_emulated_instruction(vcpu);
5341         return kvm_emulate_halt(vcpu);
5342 }
5343
5344 static int handle_vmcall(struct kvm_vcpu *vcpu)
5345 {
5346         skip_emulated_instruction(vcpu);
5347         kvm_emulate_hypercall(vcpu);
5348         return 1;
5349 }
5350
5351 static int handle_invd(struct kvm_vcpu *vcpu)
5352 {
5353         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5354 }
5355
5356 static int handle_invlpg(struct kvm_vcpu *vcpu)
5357 {
5358         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5359
5360         kvm_mmu_invlpg(vcpu, exit_qualification);
5361         skip_emulated_instruction(vcpu);
5362         return 1;
5363 }
5364
5365 static int handle_rdpmc(struct kvm_vcpu *vcpu)
5366 {
5367         int err;
5368
5369         err = kvm_rdpmc(vcpu);
5370         kvm_complete_insn_gp(vcpu, err);
5371
5372         return 1;
5373 }
5374
5375 static int handle_wbinvd(struct kvm_vcpu *vcpu)
5376 {
5377         skip_emulated_instruction(vcpu);
5378         kvm_emulate_wbinvd(vcpu);
5379         return 1;
5380 }
5381
5382 static int handle_xsetbv(struct kvm_vcpu *vcpu)
5383 {
5384         u64 new_bv = kvm_read_edx_eax(vcpu);
5385         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5386
5387         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
5388                 skip_emulated_instruction(vcpu);
5389         return 1;
5390 }
5391
5392 static int handle_apic_access(struct kvm_vcpu *vcpu)
5393 {
5394         if (likely(fasteoi)) {
5395                 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5396                 int access_type, offset;
5397
5398                 access_type = exit_qualification & APIC_ACCESS_TYPE;
5399                 offset = exit_qualification & APIC_ACCESS_OFFSET;
5400                 /*
5401                  * Sane guest uses MOV to write EOI, with written value
5402                  * not cared. So make a short-circuit here by avoiding
5403                  * heavy instruction emulation.
5404                  */
5405                 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5406                     (offset == APIC_EOI)) {
5407                         kvm_lapic_set_eoi(vcpu);
5408                         skip_emulated_instruction(vcpu);
5409                         return 1;
5410                 }
5411         }
5412         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5413 }
5414
5415 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5416 {
5417         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5418         int vector = exit_qualification & 0xff;
5419
5420         /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5421         kvm_apic_set_eoi_accelerated(vcpu, vector);
5422         return 1;
5423 }
5424
5425 static int handle_apic_write(struct kvm_vcpu *vcpu)
5426 {
5427         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5428         u32 offset = exit_qualification & 0xfff;
5429
5430         /* APIC-write VM exit is trap-like and thus no need to adjust IP */
5431         kvm_apic_write_nodecode(vcpu, offset);
5432         return 1;
5433 }
5434
5435 static int handle_task_switch(struct kvm_vcpu *vcpu)
5436 {
5437         struct vcpu_vmx *vmx = to_vmx(vcpu);
5438         unsigned long exit_qualification;
5439         bool has_error_code = false;
5440         u32 error_code = 0;
5441         u16 tss_selector;
5442         int reason, type, idt_v, idt_index;
5443
5444         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5445         idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5446         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5447
5448         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5449
5450         reason = (u32)exit_qualification >> 30;
5451         if (reason == TASK_SWITCH_GATE && idt_v) {
5452                 switch (type) {
5453                 case INTR_TYPE_NMI_INTR:
5454                         vcpu->arch.nmi_injected = false;
5455                         vmx_set_nmi_mask(vcpu, true);
5456                         break;
5457                 case INTR_TYPE_EXT_INTR:
5458                 case INTR_TYPE_SOFT_INTR:
5459                         kvm_clear_interrupt_queue(vcpu);
5460                         break;
5461                 case INTR_TYPE_HARD_EXCEPTION:
5462                         if (vmx->idt_vectoring_info &
5463                             VECTORING_INFO_DELIVER_CODE_MASK) {
5464                                 has_error_code = true;
5465                                 error_code =
5466                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
5467                         }
5468                         /* fall through */
5469                 case INTR_TYPE_SOFT_EXCEPTION:
5470                         kvm_clear_exception_queue(vcpu);
5471                         break;
5472                 default:
5473                         break;
5474                 }
5475         }
5476         tss_selector = exit_qualification;
5477
5478         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5479                        type != INTR_TYPE_EXT_INTR &&
5480                        type != INTR_TYPE_NMI_INTR))
5481                 skip_emulated_instruction(vcpu);
5482
5483         if (kvm_task_switch(vcpu, tss_selector,
5484                             type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
5485                             has_error_code, error_code) == EMULATE_FAIL) {
5486                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5487                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5488                 vcpu->run->internal.ndata = 0;
5489                 return 0;
5490         }
5491
5492         /* clear all local breakpoint enable flags */
5493         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x155);
5494
5495         /*
5496          * TODO: What about debug traps on tss switch?
5497          *       Are we supposed to inject them and update dr6?
5498          */
5499
5500         return 1;
5501 }
5502
5503 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5504 {
5505         unsigned long exit_qualification;
5506         gpa_t gpa;
5507         u32 error_code;
5508         int gla_validity;
5509
5510         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5511
5512         gla_validity = (exit_qualification >> 7) & 0x3;
5513         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
5514                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
5515                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
5516                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
5517                         vmcs_readl(GUEST_LINEAR_ADDRESS));
5518                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
5519                         (long unsigned int)exit_qualification);
5520                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5521                 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
5522                 return 0;
5523         }
5524
5525         /*
5526          * EPT violation happened while executing iret from NMI,
5527          * "blocked by NMI" bit has to be set before next VM entry.
5528          * There are errata that may cause this bit to not be set:
5529          * AAK134, BY25.
5530          */
5531         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5532                         cpu_has_virtual_nmis() &&
5533                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5534                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5535
5536         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5537         trace_kvm_page_fault(gpa, exit_qualification);
5538
5539         /* It is a write fault? */
5540         error_code = exit_qualification & (1U << 1);
5541         /* It is a fetch fault? */
5542         error_code |= (exit_qualification & (1U << 2)) << 2;
5543         /* ept page table is present? */
5544         error_code |= (exit_qualification >> 3) & 0x1;
5545
5546         vcpu->arch.exit_qualification = exit_qualification;
5547
5548         return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5549 }
5550
5551 static u64 ept_rsvd_mask(u64 spte, int level)
5552 {
5553         int i;
5554         u64 mask = 0;
5555
5556         for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
5557                 mask |= (1ULL << i);
5558
5559         if (level == 4)
5560                 /* bits 7:3 reserved */
5561                 mask |= 0xf8;
5562         else if (spte & (1ULL << 7))
5563                 /*
5564                  * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively,
5565                  * level == 1 if the hypervisor is using the ignored bit 7.
5566                  */
5567                 mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE;
5568         else if (level > 1)
5569                 /* bits 6:3 reserved */
5570                 mask |= 0x78;
5571
5572         return mask;
5573 }
5574
5575 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
5576                                        int level)
5577 {
5578         printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
5579
5580         /* 010b (write-only) */
5581         WARN_ON((spte & 0x7) == 0x2);
5582
5583         /* 110b (write/execute) */
5584         WARN_ON((spte & 0x7) == 0x6);
5585
5586         /* 100b (execute-only) and value not supported by logical processor */
5587         if (!cpu_has_vmx_ept_execute_only())
5588                 WARN_ON((spte & 0x7) == 0x4);
5589
5590         /* not 000b */
5591         if ((spte & 0x7)) {
5592                 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
5593
5594                 if (rsvd_bits != 0) {
5595                         printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
5596                                          __func__, rsvd_bits);
5597                         WARN_ON(1);
5598                 }
5599
5600                 /* bits 5:3 are _not_ reserved for large page or leaf page */
5601                 if ((rsvd_bits & 0x38) == 0) {
5602                         u64 ept_mem_type = (spte & 0x38) >> 3;
5603
5604                         if (ept_mem_type == 2 || ept_mem_type == 3 ||
5605                             ept_mem_type == 7) {
5606                                 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
5607                                                 __func__, ept_mem_type);
5608                                 WARN_ON(1);
5609                         }
5610                 }
5611         }
5612 }
5613
5614 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5615 {
5616         u64 sptes[4];
5617         int nr_sptes, i, ret;
5618         gpa_t gpa;
5619
5620         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5621         if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5622                 skip_emulated_instruction(vcpu);
5623                 return 1;
5624         }
5625
5626         ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5627         if (likely(ret == RET_MMIO_PF_EMULATE))
5628                 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5629                                               EMULATE_DONE;
5630
5631         if (unlikely(ret == RET_MMIO_PF_INVALID))
5632                 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
5633
5634         if (unlikely(ret == RET_MMIO_PF_RETRY))
5635                 return 1;
5636
5637         /* It is the real ept misconfig */
5638         printk(KERN_ERR "EPT: Misconfiguration.\n");
5639         printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
5640
5641         nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
5642
5643         for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
5644                 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
5645
5646         vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5647         vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
5648
5649         return 0;
5650 }
5651
5652 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5653 {
5654         u32 cpu_based_vm_exec_control;
5655
5656         /* clear pending NMI */
5657         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5658         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
5659         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5660         ++vcpu->stat.nmi_window_exits;
5661         kvm_make_request(KVM_REQ_EVENT, vcpu);
5662
5663         return 1;
5664 }
5665
5666 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5667 {
5668         struct vcpu_vmx *vmx = to_vmx(vcpu);
5669         enum emulation_result err = EMULATE_DONE;
5670         int ret = 1;
5671         u32 cpu_exec_ctrl;
5672         bool intr_window_requested;
5673         unsigned count = 130;
5674
5675         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5676         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
5677
5678         while (vmx->emulation_required && count-- != 0) {
5679                 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
5680                         return handle_interrupt_window(&vmx->vcpu);
5681
5682                 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
5683                         return 1;
5684
5685                 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
5686
5687                 if (err == EMULATE_USER_EXIT) {
5688                         ++vcpu->stat.mmio_exits;
5689                         ret = 0;
5690                         goto out;
5691                 }
5692
5693                 if (err != EMULATE_DONE) {
5694                         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5695                         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5696                         vcpu->run->internal.ndata = 0;
5697                         return 0;
5698                 }
5699
5700                 if (vcpu->arch.halt_request) {
5701                         vcpu->arch.halt_request = 0;
5702                         ret = kvm_emulate_halt(vcpu);
5703                         goto out;
5704                 }
5705
5706                 if (signal_pending(current))
5707                         goto out;
5708                 if (need_resched())
5709                         schedule();
5710         }
5711
5712 out:
5713         return ret;
5714 }
5715
5716 static int __grow_ple_window(int val)
5717 {
5718         if (ple_window_grow < 1)
5719                 return ple_window;
5720
5721         val = min(val, ple_window_actual_max);
5722
5723         if (ple_window_grow < ple_window)
5724                 val *= ple_window_grow;
5725         else
5726                 val += ple_window_grow;
5727
5728         return val;
5729 }
5730
5731 static int __shrink_ple_window(int val, int modifier, int minimum)
5732 {
5733         if (modifier < 1)
5734                 return ple_window;
5735
5736         if (modifier < ple_window)
5737                 val /= modifier;
5738         else
5739                 val -= modifier;
5740
5741         return max(val, minimum);
5742 }
5743
5744 static void grow_ple_window(struct kvm_vcpu *vcpu)
5745 {
5746         struct vcpu_vmx *vmx = to_vmx(vcpu);
5747         int old = vmx->ple_window;
5748
5749         vmx->ple_window = __grow_ple_window(old);
5750
5751         if (vmx->ple_window != old)
5752                 vmx->ple_window_dirty = true;
5753
5754         trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
5755 }
5756
5757 static void shrink_ple_window(struct kvm_vcpu *vcpu)
5758 {
5759         struct vcpu_vmx *vmx = to_vmx(vcpu);
5760         int old = vmx->ple_window;
5761
5762         vmx->ple_window = __shrink_ple_window(old,
5763                                               ple_window_shrink, ple_window);
5764
5765         if (vmx->ple_window != old)
5766                 vmx->ple_window_dirty = true;
5767
5768         trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
5769 }
5770
5771 /*
5772  * ple_window_actual_max is computed to be one grow_ple_window() below
5773  * ple_window_max. (See __grow_ple_window for the reason.)
5774  * This prevents overflows, because ple_window_max is int.
5775  * ple_window_max effectively rounded down to a multiple of ple_window_grow in
5776  * this process.
5777  * ple_window_max is also prevented from setting vmx->ple_window < ple_window.
5778  */
5779 static void update_ple_window_actual_max(void)
5780 {
5781         ple_window_actual_max =
5782                         __shrink_ple_window(max(ple_window_max, ple_window),
5783                                             ple_window_grow, INT_MIN);
5784 }
5785
5786 /*
5787  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5788  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5789  */
5790 static int handle_pause(struct kvm_vcpu *vcpu)
5791 {
5792         if (ple_gap)
5793                 grow_ple_window(vcpu);
5794
5795         skip_emulated_instruction(vcpu);
5796         kvm_vcpu_on_spin(vcpu);
5797
5798         return 1;
5799 }
5800
5801 static int handle_nop(struct kvm_vcpu *vcpu)
5802 {
5803         skip_emulated_instruction(vcpu);
5804         return 1;
5805 }
5806
5807 static int handle_mwait(struct kvm_vcpu *vcpu)
5808 {
5809         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
5810         return handle_nop(vcpu);
5811 }
5812
5813 static int handle_monitor(struct kvm_vcpu *vcpu)
5814 {
5815         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
5816         return handle_nop(vcpu);
5817 }
5818
5819 /*
5820  * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
5821  * We could reuse a single VMCS for all the L2 guests, but we also want the
5822  * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
5823  * allows keeping them loaded on the processor, and in the future will allow
5824  * optimizations where prepare_vmcs02 doesn't need to set all the fields on
5825  * every entry if they never change.
5826  * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
5827  * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
5828  *
5829  * The following functions allocate and free a vmcs02 in this pool.
5830  */
5831
5832 /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
5833 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
5834 {
5835         struct vmcs02_list *item;
5836         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5837                 if (item->vmptr == vmx->nested.current_vmptr) {
5838                         list_move(&item->list, &vmx->nested.vmcs02_pool);
5839                         return &item->vmcs02;
5840                 }
5841
5842         if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
5843                 /* Recycle the least recently used VMCS. */
5844                 item = list_entry(vmx->nested.vmcs02_pool.prev,
5845                         struct vmcs02_list, list);
5846                 item->vmptr = vmx->nested.current_vmptr;
5847                 list_move(&item->list, &vmx->nested.vmcs02_pool);
5848                 return &item->vmcs02;
5849         }
5850
5851         /* Create a new VMCS */
5852         item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
5853         if (!item)
5854                 return NULL;
5855         item->vmcs02.vmcs = alloc_vmcs();
5856         if (!item->vmcs02.vmcs) {
5857                 kfree(item);
5858                 return NULL;
5859         }
5860         loaded_vmcs_init(&item->vmcs02);
5861         item->vmptr = vmx->nested.current_vmptr;
5862         list_add(&(item->list), &(vmx->nested.vmcs02_pool));
5863         vmx->nested.vmcs02_num++;
5864         return &item->vmcs02;
5865 }
5866
5867 /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
5868 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
5869 {
5870         struct vmcs02_list *item;
5871         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5872                 if (item->vmptr == vmptr) {
5873                         free_loaded_vmcs(&item->vmcs02);
5874                         list_del(&item->list);
5875                         kfree(item);
5876                         vmx->nested.vmcs02_num--;
5877                         return;
5878                 }
5879 }
5880
5881 /*
5882  * Free all VMCSs saved for this vcpu, except the one pointed by
5883  * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
5884  * must be &vmx->vmcs01.
5885  */
5886 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
5887 {
5888         struct vmcs02_list *item, *n;
5889
5890         WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
5891         list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
5892                 /*
5893                  * Something will leak if the above WARN triggers.  Better than
5894                  * a use-after-free.
5895                  */
5896                 if (vmx->loaded_vmcs == &item->vmcs02)
5897                         continue;
5898
5899                 free_loaded_vmcs(&item->vmcs02);
5900                 list_del(&item->list);
5901                 kfree(item);
5902                 vmx->nested.vmcs02_num--;
5903         }
5904 }
5905
5906 /*
5907  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
5908  * set the success or error code of an emulated VMX instruction, as specified
5909  * by Vol 2B, VMX Instruction Reference, "Conventions".
5910  */
5911 static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5912 {
5913         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5914                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5915                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5916 }
5917
5918 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5919 {
5920         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5921                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5922                             X86_EFLAGS_SF | X86_EFLAGS_OF))
5923                         | X86_EFLAGS_CF);
5924 }
5925
5926 static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5927                                         u32 vm_instruction_error)
5928 {
5929         if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5930                 /*
5931                  * failValid writes the error number to the current VMCS, which
5932                  * can't be done there isn't a current VMCS.
5933                  */
5934                 nested_vmx_failInvalid(vcpu);
5935                 return;
5936         }
5937         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5938                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5939                             X86_EFLAGS_SF | X86_EFLAGS_OF))
5940                         | X86_EFLAGS_ZF);
5941         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5942         /*
5943          * We don't need to force a shadow sync because
5944          * VM_INSTRUCTION_ERROR is not shadowed
5945          */
5946 }
5947
5948 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
5949 {
5950         struct vcpu_vmx *vmx =
5951                 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
5952
5953         vmx->nested.preemption_timer_expired = true;
5954         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
5955         kvm_vcpu_kick(&vmx->vcpu);
5956
5957         return HRTIMER_NORESTART;
5958 }
5959
5960 /*
5961  * Decode the memory-address operand of a vmx instruction, as recorded on an
5962  * exit caused by such an instruction (run by a guest hypervisor).
5963  * On success, returns 0. When the operand is invalid, returns 1 and throws
5964  * #UD or #GP.
5965  */
5966 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5967                                  unsigned long exit_qualification,
5968                                  u32 vmx_instruction_info, gva_t *ret)
5969 {
5970         /*
5971          * According to Vol. 3B, "Information for VM Exits Due to Instruction
5972          * Execution", on an exit, vmx_instruction_info holds most of the
5973          * addressing components of the operand. Only the displacement part
5974          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
5975          * For how an actual address is calculated from all these components,
5976          * refer to Vol. 1, "Operand Addressing".
5977          */
5978         int  scaling = vmx_instruction_info & 3;
5979         int  addr_size = (vmx_instruction_info >> 7) & 7;
5980         bool is_reg = vmx_instruction_info & (1u << 10);
5981         int  seg_reg = (vmx_instruction_info >> 15) & 7;
5982         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
5983         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
5984         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
5985         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
5986
5987         if (is_reg) {
5988                 kvm_queue_exception(vcpu, UD_VECTOR);
5989                 return 1;
5990         }
5991
5992         /* Addr = segment_base + offset */
5993         /* offset = base + [index * scale] + displacement */
5994         *ret = vmx_get_segment_base(vcpu, seg_reg);
5995         if (base_is_valid)
5996                 *ret += kvm_register_read(vcpu, base_reg);
5997         if (index_is_valid)
5998                 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
5999         *ret += exit_qualification; /* holds the displacement */
6000
6001         if (addr_size == 1) /* 32 bit */
6002                 *ret &= 0xffffffff;
6003
6004         /*
6005          * TODO: throw #GP (and return 1) in various cases that the VM*
6006          * instructions require it - e.g., offset beyond segment limit,
6007          * unusable or unreadable/unwritable segment, non-canonical 64-bit
6008          * address, and so on. Currently these are not checked.
6009          */
6010         return 0;
6011 }
6012
6013 /*
6014  * This function performs the various checks including
6015  * - if it's 4KB aligned
6016  * - No bits beyond the physical address width are set
6017  * - Returns 0 on success or else 1
6018  * (Intel SDM Section 30.3)
6019  */
6020 static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6021                                   gpa_t *vmpointer)
6022 {
6023         gva_t gva;
6024         gpa_t vmptr;
6025         struct x86_exception e;
6026         struct page *page;
6027         struct vcpu_vmx *vmx = to_vmx(vcpu);
6028         int maxphyaddr = cpuid_maxphyaddr(vcpu);
6029
6030         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6031                         vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
6032                 return 1;
6033
6034         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6035                                 sizeof(vmptr), &e)) {
6036                 kvm_inject_page_fault(vcpu, &e);
6037                 return 1;
6038         }
6039
6040         switch (exit_reason) {
6041         case EXIT_REASON_VMON:
6042                 /*
6043                  * SDM 3: 24.11.5
6044                  * The first 4 bytes of VMXON region contain the supported
6045                  * VMCS revision identifier
6046                  *
6047                  * Note - IA32_VMX_BASIC[48] will never be 1
6048                  * for the nested case;
6049                  * which replaces physical address width with 32
6050                  *
6051                  */
6052                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6053                         nested_vmx_failInvalid(vcpu);
6054                         skip_emulated_instruction(vcpu);
6055                         return 1;
6056                 }
6057
6058                 page = nested_get_page(vcpu, vmptr);
6059                 if (page == NULL ||
6060                     *(u32 *)kmap(page) != VMCS12_REVISION) {
6061                         nested_vmx_failInvalid(vcpu);
6062                         kunmap(page);
6063                         skip_emulated_instruction(vcpu);
6064                         return 1;
6065                 }
6066                 kunmap(page);
6067                 vmx->nested.vmxon_ptr = vmptr;
6068                 break;
6069         case EXIT_REASON_VMCLEAR:
6070                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6071                         nested_vmx_failValid(vcpu,
6072                                              VMXERR_VMCLEAR_INVALID_ADDRESS);
6073                         skip_emulated_instruction(vcpu);
6074                         return 1;
6075                 }
6076
6077                 if (vmptr == vmx->nested.vmxon_ptr) {
6078                         nested_vmx_failValid(vcpu,
6079                                              VMXERR_VMCLEAR_VMXON_POINTER);
6080                         skip_emulated_instruction(vcpu);
6081                         return 1;
6082                 }
6083                 break;
6084         case EXIT_REASON_VMPTRLD:
6085                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6086                         nested_vmx_failValid(vcpu,
6087                                              VMXERR_VMPTRLD_INVALID_ADDRESS);
6088                         skip_emulated_instruction(vcpu);
6089                         return 1;
6090                 }
6091
6092                 if (vmptr == vmx->nested.vmxon_ptr) {
6093                         nested_vmx_failValid(vcpu,
6094                                              VMXERR_VMCLEAR_VMXON_POINTER);
6095                         skip_emulated_instruction(vcpu);
6096                         return 1;
6097                 }
6098                 break;
6099         default:
6100                 return 1; /* shouldn't happen */
6101         }
6102
6103         if (vmpointer)
6104                 *vmpointer = vmptr;
6105         return 0;
6106 }
6107
6108 /*
6109  * Emulate the VMXON instruction.
6110  * Currently, we just remember that VMX is active, and do not save or even
6111  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
6112  * do not currently need to store anything in that guest-allocated memory
6113  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
6114  * argument is different from the VMXON pointer (which the spec says they do).
6115  */
6116 static int handle_vmon(struct kvm_vcpu *vcpu)
6117 {
6118         struct kvm_segment cs;
6119         struct vcpu_vmx *vmx = to_vmx(vcpu);
6120         struct vmcs *shadow_vmcs;
6121         const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
6122                 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
6123
6124         /* The Intel VMX Instruction Reference lists a bunch of bits that
6125          * are prerequisite to running VMXON, most notably cr4.VMXE must be
6126          * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
6127          * Otherwise, we should fail with #UD. We test these now:
6128          */
6129         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
6130             !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
6131             (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
6132                 kvm_queue_exception(vcpu, UD_VECTOR);
6133                 return 1;
6134         }
6135
6136         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
6137         if (is_long_mode(vcpu) && !cs.l) {
6138                 kvm_queue_exception(vcpu, UD_VECTOR);
6139                 return 1;
6140         }
6141
6142         if (vmx_get_cpl(vcpu)) {
6143                 kvm_inject_gp(vcpu, 0);
6144                 return 1;
6145         }
6146
6147         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
6148                 return 1;
6149
6150         if (vmx->nested.vmxon) {
6151                 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
6152                 skip_emulated_instruction(vcpu);
6153                 return 1;
6154         }
6155
6156         if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
6157                         != VMXON_NEEDED_FEATURES) {
6158                 kvm_inject_gp(vcpu, 0);
6159                 return 1;
6160         }
6161
6162         if (enable_shadow_vmcs) {
6163                 shadow_vmcs = alloc_vmcs();
6164                 if (!shadow_vmcs)
6165                         return -ENOMEM;
6166                 /* mark vmcs as shadow */
6167                 shadow_vmcs->revision_id |= (1u << 31);
6168                 /* init shadow vmcs */
6169                 vmcs_clear(shadow_vmcs);
6170                 vmx->nested.current_shadow_vmcs = shadow_vmcs;
6171         }
6172
6173         INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
6174         vmx->nested.vmcs02_num = 0;
6175
6176         hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
6177                      HRTIMER_MODE_REL);
6178         vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
6179
6180         vmx->nested.vmxon = true;
6181
6182         skip_emulated_instruction(vcpu);
6183         nested_vmx_succeed(vcpu);
6184         return 1;
6185 }
6186
6187 /*
6188  * Intel's VMX Instruction Reference specifies a common set of prerequisites
6189  * for running VMX instructions (except VMXON, whose prerequisites are
6190  * slightly different). It also specifies what exception to inject otherwise.
6191  */
6192 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
6193 {
6194         struct kvm_segment cs;
6195         struct vcpu_vmx *vmx = to_vmx(vcpu);
6196
6197         if (!vmx->nested.vmxon) {
6198                 kvm_queue_exception(vcpu, UD_VECTOR);
6199                 return 0;
6200         }
6201
6202         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
6203         if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
6204             (is_long_mode(vcpu) && !cs.l)) {
6205                 kvm_queue_exception(vcpu, UD_VECTOR);
6206                 return 0;
6207         }
6208
6209         if (vmx_get_cpl(vcpu)) {
6210                 kvm_inject_gp(vcpu, 0);
6211                 return 0;
6212         }
6213
6214         return 1;
6215 }
6216
6217 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
6218 {
6219         u32 exec_control;
6220         if (vmx->nested.current_vmptr == -1ull)
6221                 return;
6222
6223         /* current_vmptr and current_vmcs12 are always set/reset together */
6224         if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
6225                 return;
6226
6227         if (enable_shadow_vmcs) {
6228                 /* copy to memory all shadowed fields in case
6229                    they were modified */
6230                 copy_shadow_to_vmcs12(vmx);
6231                 vmx->nested.sync_shadow_vmcs = false;
6232                 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6233                 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
6234                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
6235                 vmcs_write64(VMCS_LINK_POINTER, -1ull);
6236         }
6237         kunmap(vmx->nested.current_vmcs12_page);
6238         nested_release_page(vmx->nested.current_vmcs12_page);
6239         vmx->nested.current_vmptr = -1ull;
6240         vmx->nested.current_vmcs12 = NULL;
6241 }
6242
6243 /*
6244  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
6245  * just stops using VMX.
6246  */
6247 static void free_nested(struct vcpu_vmx *vmx)
6248 {
6249         if (!vmx->nested.vmxon)
6250                 return;
6251
6252         vmx->nested.vmxon = false;
6253         nested_release_vmcs12(vmx);
6254         if (enable_shadow_vmcs)
6255                 free_vmcs(vmx->nested.current_shadow_vmcs);
6256         /* Unpin physical memory we referred to in current vmcs02 */
6257         if (vmx->nested.apic_access_page) {
6258                 nested_release_page(vmx->nested.apic_access_page);
6259                 vmx->nested.apic_access_page = NULL;
6260         }
6261         if (vmx->nested.virtual_apic_page) {
6262                 nested_release_page(vmx->nested.virtual_apic_page);
6263                 vmx->nested.virtual_apic_page = NULL;
6264         }
6265
6266         nested_free_all_saved_vmcss(vmx);
6267 }
6268
6269 /* Emulate the VMXOFF instruction */
6270 static int handle_vmoff(struct kvm_vcpu *vcpu)
6271 {
6272         if (!nested_vmx_check_permission(vcpu))
6273                 return 1;
6274         free_nested(to_vmx(vcpu));
6275         skip_emulated_instruction(vcpu);
6276         nested_vmx_succeed(vcpu);
6277         return 1;
6278 }
6279
6280 /* Emulate the VMCLEAR instruction */
6281 static int handle_vmclear(struct kvm_vcpu *vcpu)
6282 {
6283         struct vcpu_vmx *vmx = to_vmx(vcpu);
6284         gpa_t vmptr;
6285         struct vmcs12 *vmcs12;
6286         struct page *page;
6287
6288         if (!nested_vmx_check_permission(vcpu))
6289                 return 1;
6290
6291         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
6292                 return 1;
6293
6294         if (vmptr == vmx->nested.current_vmptr)
6295                 nested_release_vmcs12(vmx);
6296
6297         page = nested_get_page(vcpu, vmptr);
6298         if (page == NULL) {
6299                 /*
6300                  * For accurate processor emulation, VMCLEAR beyond available
6301                  * physical memory should do nothing at all. However, it is
6302                  * possible that a nested vmx bug, not a guest hypervisor bug,
6303                  * resulted in this case, so let's shut down before doing any
6304                  * more damage:
6305                  */
6306                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6307                 return 1;
6308         }
6309         vmcs12 = kmap(page);
6310         vmcs12->launch_state = 0;
6311         kunmap(page);
6312         nested_release_page(page);
6313
6314         nested_free_vmcs02(vmx, vmptr);
6315
6316         skip_emulated_instruction(vcpu);
6317         nested_vmx_succeed(vcpu);
6318         return 1;
6319 }
6320
6321 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
6322
6323 /* Emulate the VMLAUNCH instruction */
6324 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
6325 {
6326         return nested_vmx_run(vcpu, true);
6327 }
6328
6329 /* Emulate the VMRESUME instruction */
6330 static int handle_vmresume(struct kvm_vcpu *vcpu)
6331 {
6332
6333         return nested_vmx_run(vcpu, false);
6334 }
6335
6336 enum vmcs_field_type {
6337         VMCS_FIELD_TYPE_U16 = 0,
6338         VMCS_FIELD_TYPE_U64 = 1,
6339         VMCS_FIELD_TYPE_U32 = 2,
6340         VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
6341 };
6342
6343 static inline int vmcs_field_type(unsigned long field)
6344 {
6345         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
6346                 return VMCS_FIELD_TYPE_U32;
6347         return (field >> 13) & 0x3 ;
6348 }
6349
6350 static inline int vmcs_field_readonly(unsigned long field)
6351 {
6352         return (((field >> 10) & 0x3) == 1);
6353 }
6354
6355 /*
6356  * Read a vmcs12 field. Since these can have varying lengths and we return
6357  * one type, we chose the biggest type (u64) and zero-extend the return value
6358  * to that size. Note that the caller, handle_vmread, might need to use only
6359  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
6360  * 64-bit fields are to be returned).
6361  */
6362 static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
6363                                         unsigned long field, u64 *ret)
6364 {
6365         short offset = vmcs_field_to_offset(field);
6366         char *p;
6367
6368         if (offset < 0)
6369                 return 0;
6370
6371         p = ((char *)(get_vmcs12(vcpu))) + offset;
6372
6373         switch (vmcs_field_type(field)) {
6374         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6375                 *ret = *((natural_width *)p);
6376                 return 1;
6377         case VMCS_FIELD_TYPE_U16:
6378                 *ret = *((u16 *)p);
6379                 return 1;
6380         case VMCS_FIELD_TYPE_U32:
6381                 *ret = *((u32 *)p);
6382                 return 1;
6383         case VMCS_FIELD_TYPE_U64:
6384                 *ret = *((u64 *)p);
6385                 return 1;
6386         default:
6387                 return 0; /* can never happen. */
6388         }
6389 }
6390
6391
6392 static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
6393                                     unsigned long field, u64 field_value){
6394         short offset = vmcs_field_to_offset(field);
6395         char *p = ((char *) get_vmcs12(vcpu)) + offset;
6396         if (offset < 0)
6397                 return false;
6398
6399         switch (vmcs_field_type(field)) {
6400         case VMCS_FIELD_TYPE_U16:
6401                 *(u16 *)p = field_value;
6402                 return true;
6403         case VMCS_FIELD_TYPE_U32:
6404                 *(u32 *)p = field_value;
6405                 return true;
6406         case VMCS_FIELD_TYPE_U64:
6407                 *(u64 *)p = field_value;
6408                 return true;
6409         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6410                 *(natural_width *)p = field_value;
6411                 return true;
6412         default:
6413                 return false; /* can never happen. */
6414         }
6415
6416 }
6417
6418 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6419 {
6420         int i;
6421         unsigned long field;
6422         u64 field_value;
6423         struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6424         const unsigned long *fields = shadow_read_write_fields;
6425         const int num_fields = max_shadow_read_write_fields;
6426
6427         preempt_disable();
6428
6429         vmcs_load(shadow_vmcs);
6430
6431         for (i = 0; i < num_fields; i++) {
6432                 field = fields[i];
6433                 switch (vmcs_field_type(field)) {
6434                 case VMCS_FIELD_TYPE_U16:
6435                         field_value = vmcs_read16(field);
6436                         break;
6437                 case VMCS_FIELD_TYPE_U32:
6438                         field_value = vmcs_read32(field);
6439                         break;
6440                 case VMCS_FIELD_TYPE_U64:
6441                         field_value = vmcs_read64(field);
6442                         break;
6443                 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6444                         field_value = vmcs_readl(field);
6445                         break;
6446                 }
6447                 vmcs12_write_any(&vmx->vcpu, field, field_value);
6448         }
6449
6450         vmcs_clear(shadow_vmcs);
6451         vmcs_load(vmx->loaded_vmcs->vmcs);
6452
6453         preempt_enable();
6454 }
6455
6456 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
6457 {
6458         const unsigned long *fields[] = {
6459                 shadow_read_write_fields,
6460                 shadow_read_only_fields
6461         };
6462         const int max_fields[] = {
6463                 max_shadow_read_write_fields,
6464                 max_shadow_read_only_fields
6465         };
6466         int i, q;
6467         unsigned long field;
6468         u64 field_value = 0;
6469         struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6470
6471         vmcs_load(shadow_vmcs);
6472
6473         for (q = 0; q < ARRAY_SIZE(fields); q++) {
6474                 for (i = 0; i < max_fields[q]; i++) {
6475                         field = fields[q][i];
6476                         vmcs12_read_any(&vmx->vcpu, field, &field_value);
6477
6478                         switch (vmcs_field_type(field)) {
6479                         case VMCS_FIELD_TYPE_U16:
6480                                 vmcs_write16(field, (u16)field_value);
6481                                 break;
6482                         case VMCS_FIELD_TYPE_U32:
6483                                 vmcs_write32(field, (u32)field_value);
6484                                 break;
6485                         case VMCS_FIELD_TYPE_U64:
6486                                 vmcs_write64(field, (u64)field_value);
6487                                 break;
6488                         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6489                                 vmcs_writel(field, (long)field_value);
6490                                 break;
6491                         }
6492                 }
6493         }
6494
6495         vmcs_clear(shadow_vmcs);
6496         vmcs_load(vmx->loaded_vmcs->vmcs);
6497 }
6498
6499 /*
6500  * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
6501  * used before) all generate the same failure when it is missing.
6502  */
6503 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
6504 {
6505         struct vcpu_vmx *vmx = to_vmx(vcpu);
6506         if (vmx->nested.current_vmptr == -1ull) {
6507                 nested_vmx_failInvalid(vcpu);
6508                 skip_emulated_instruction(vcpu);
6509                 return 0;
6510         }
6511         return 1;
6512 }
6513
6514 static int handle_vmread(struct kvm_vcpu *vcpu)
6515 {
6516         unsigned long field;
6517         u64 field_value;
6518         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6519         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6520         gva_t gva = 0;
6521
6522         if (!nested_vmx_check_permission(vcpu) ||
6523             !nested_vmx_check_vmcs12(vcpu))
6524                 return 1;
6525
6526         /* Decode instruction info and find the field to read */
6527         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6528         /* Read the field, zero-extended to a u64 field_value */
6529         if (!vmcs12_read_any(vcpu, field, &field_value)) {
6530                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
6531                 skip_emulated_instruction(vcpu);
6532                 return 1;
6533         }
6534         /*
6535          * Now copy part of this value to register or memory, as requested.
6536          * Note that the number of bits actually copied is 32 or 64 depending
6537          * on the guest's mode (32 or 64 bit), not on the given field's length.
6538          */
6539         if (vmx_instruction_info & (1u << 10)) {
6540                 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
6541                         field_value);
6542         } else {
6543                 if (get_vmx_mem_address(vcpu, exit_qualification,
6544                                 vmx_instruction_info, &gva))
6545                         return 1;
6546                 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
6547                 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
6548                              &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
6549         }
6550
6551         nested_vmx_succeed(vcpu);
6552         skip_emulated_instruction(vcpu);
6553         return 1;
6554 }
6555
6556
6557 static int handle_vmwrite(struct kvm_vcpu *vcpu)
6558 {
6559         unsigned long field;
6560         gva_t gva;
6561         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6562         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6563         /* The value to write might be 32 or 64 bits, depending on L1's long
6564          * mode, and eventually we need to write that into a field of several
6565          * possible lengths. The code below first zero-extends the value to 64
6566          * bit (field_value), and then copies only the approriate number of
6567          * bits into the vmcs12 field.
6568          */
6569         u64 field_value = 0;
6570         struct x86_exception e;
6571
6572         if (!nested_vmx_check_permission(vcpu) ||
6573             !nested_vmx_check_vmcs12(vcpu))
6574                 return 1;
6575
6576         if (vmx_instruction_info & (1u << 10))
6577                 field_value = kvm_register_readl(vcpu,
6578                         (((vmx_instruction_info) >> 3) & 0xf));
6579         else {
6580                 if (get_vmx_mem_address(vcpu, exit_qualification,
6581                                 vmx_instruction_info, &gva))
6582                         return 1;
6583                 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
6584                            &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
6585                         kvm_inject_page_fault(vcpu, &e);
6586                         return 1;
6587                 }
6588         }
6589
6590
6591         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6592         if (vmcs_field_readonly(field)) {
6593                 nested_vmx_failValid(vcpu,
6594                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
6595                 skip_emulated_instruction(vcpu);
6596                 return 1;
6597         }
6598
6599         if (!vmcs12_write_any(vcpu, field, field_value)) {
6600                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
6601                 skip_emulated_instruction(vcpu);
6602                 return 1;
6603         }
6604
6605         nested_vmx_succeed(vcpu);
6606         skip_emulated_instruction(vcpu);
6607         return 1;
6608 }
6609
6610 /* Emulate the VMPTRLD instruction */
6611 static int handle_vmptrld(struct kvm_vcpu *vcpu)
6612 {
6613         struct vcpu_vmx *vmx = to_vmx(vcpu);
6614         gpa_t vmptr;
6615         u32 exec_control;
6616
6617         if (!nested_vmx_check_permission(vcpu))
6618                 return 1;
6619
6620         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
6621                 return 1;
6622
6623         if (vmx->nested.current_vmptr != vmptr) {
6624                 struct vmcs12 *new_vmcs12;
6625                 struct page *page;
6626                 page = nested_get_page(vcpu, vmptr);
6627                 if (page == NULL) {
6628                         nested_vmx_failInvalid(vcpu);
6629                         skip_emulated_instruction(vcpu);
6630                         return 1;
6631                 }
6632                 new_vmcs12 = kmap(page);
6633                 if (new_vmcs12->revision_id != VMCS12_REVISION) {
6634                         kunmap(page);
6635                         nested_release_page_clean(page);
6636                         nested_vmx_failValid(vcpu,
6637                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
6638                         skip_emulated_instruction(vcpu);
6639                         return 1;
6640                 }
6641
6642                 nested_release_vmcs12(vmx);
6643                 vmx->nested.current_vmptr = vmptr;
6644                 vmx->nested.current_vmcs12 = new_vmcs12;
6645                 vmx->nested.current_vmcs12_page = page;
6646                 if (enable_shadow_vmcs) {
6647                         exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6648                         exec_control |= SECONDARY_EXEC_SHADOW_VMCS;
6649                         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
6650                         vmcs_write64(VMCS_LINK_POINTER,
6651                                      __pa(vmx->nested.current_shadow_vmcs));
6652                         vmx->nested.sync_shadow_vmcs = true;
6653                 }
6654         }
6655
6656         nested_vmx_succeed(vcpu);
6657         skip_emulated_instruction(vcpu);
6658         return 1;
6659 }
6660
6661 /* Emulate the VMPTRST instruction */
6662 static int handle_vmptrst(struct kvm_vcpu *vcpu)
6663 {
6664         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6665         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6666         gva_t vmcs_gva;
6667         struct x86_exception e;
6668
6669         if (!nested_vmx_check_permission(vcpu))
6670                 return 1;
6671
6672         if (get_vmx_mem_address(vcpu, exit_qualification,
6673                         vmx_instruction_info, &vmcs_gva))
6674                 return 1;
6675         /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
6676         if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
6677                                  (void *)&to_vmx(vcpu)->nested.current_vmptr,
6678                                  sizeof(u64), &e)) {
6679                 kvm_inject_page_fault(vcpu, &e);
6680                 return 1;
6681         }
6682         nested_vmx_succeed(vcpu);
6683         skip_emulated_instruction(vcpu);
6684         return 1;
6685 }
6686
6687 /* Emulate the INVEPT instruction */
6688 static int handle_invept(struct kvm_vcpu *vcpu)
6689 {
6690         u32 vmx_instruction_info, types;
6691         unsigned long type;
6692         gva_t gva;
6693         struct x86_exception e;
6694         struct {
6695                 u64 eptp, gpa;
6696         } operand;
6697
6698         if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
6699             !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
6700                 kvm_queue_exception(vcpu, UD_VECTOR);
6701                 return 1;
6702         }
6703
6704         if (!nested_vmx_check_permission(vcpu))
6705                 return 1;
6706
6707         if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
6708                 kvm_queue_exception(vcpu, UD_VECTOR);
6709                 return 1;
6710         }
6711
6712         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6713         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
6714
6715         types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
6716
6717         if (!(types & (1UL << type))) {
6718                 nested_vmx_failValid(vcpu,
6719                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
6720                 return 1;
6721         }
6722
6723         /* According to the Intel VMX instruction reference, the memory
6724          * operand is read even if it isn't needed (e.g., for type==global)
6725          */
6726         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6727                         vmx_instruction_info, &gva))
6728                 return 1;
6729         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
6730                                 sizeof(operand), &e)) {
6731                 kvm_inject_page_fault(vcpu, &e);
6732                 return 1;
6733         }
6734
6735         switch (type) {
6736         case VMX_EPT_EXTENT_GLOBAL:
6737                 kvm_mmu_sync_roots(vcpu);
6738                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
6739                 nested_vmx_succeed(vcpu);
6740                 break;
6741         default:
6742                 /* Trap single context invalidation invept calls */
6743                 BUG_ON(1);
6744                 break;
6745         }
6746
6747         skip_emulated_instruction(vcpu);
6748         return 1;
6749 }
6750
6751 static int handle_invvpid(struct kvm_vcpu *vcpu)
6752 {
6753         kvm_queue_exception(vcpu, UD_VECTOR);
6754         return 1;
6755 }
6756
6757 /*
6758  * The exit handlers return 1 if the exit was handled fully and guest execution
6759  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
6760  * to be done to userspace and return 0.
6761  */
6762 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6763         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
6764         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
6765         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
6766         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
6767         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
6768         [EXIT_REASON_CR_ACCESS]               = handle_cr,
6769         [EXIT_REASON_DR_ACCESS]               = handle_dr,
6770         [EXIT_REASON_CPUID]                   = handle_cpuid,
6771         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
6772         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
6773         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
6774         [EXIT_REASON_HLT]                     = handle_halt,
6775         [EXIT_REASON_INVD]                    = handle_invd,
6776         [EXIT_REASON_INVLPG]                  = handle_invlpg,
6777         [EXIT_REASON_RDPMC]                   = handle_rdpmc,
6778         [EXIT_REASON_VMCALL]                  = handle_vmcall,
6779         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
6780         [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
6781         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
6782         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
6783         [EXIT_REASON_VMREAD]                  = handle_vmread,
6784         [EXIT_REASON_VMRESUME]                = handle_vmresume,
6785         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
6786         [EXIT_REASON_VMOFF]                   = handle_vmoff,
6787         [EXIT_REASON_VMON]                    = handle_vmon,
6788         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
6789         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
6790         [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
6791         [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
6792         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
6793         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
6794         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
6795         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
6796         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
6797         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
6798         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
6799         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_mwait,
6800         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
6801         [EXIT_REASON_INVEPT]                  = handle_invept,
6802         [EXIT_REASON_INVVPID]                 = handle_invvpid,
6803 };
6804
6805 static const int kvm_vmx_max_exit_handlers =
6806         ARRAY_SIZE(kvm_vmx_exit_handlers);
6807
6808 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
6809                                        struct vmcs12 *vmcs12)
6810 {
6811         unsigned long exit_qualification;
6812         gpa_t bitmap, last_bitmap;
6813         unsigned int port;
6814         int size;
6815         u8 b;
6816
6817         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
6818                 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
6819
6820         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6821
6822         port = exit_qualification >> 16;
6823         size = (exit_qualification & 7) + 1;
6824
6825         last_bitmap = (gpa_t)-1;
6826         b = -1;
6827
6828         while (size > 0) {
6829                 if (port < 0x8000)
6830                         bitmap = vmcs12->io_bitmap_a;
6831                 else if (port < 0x10000)
6832                         bitmap = vmcs12->io_bitmap_b;
6833                 else
6834                         return 1;
6835                 bitmap += (port & 0x7fff) / 8;
6836
6837                 if (last_bitmap != bitmap)
6838                         if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
6839                                 return 1;
6840                 if (b & (1 << (port & 7)))
6841                         return 1;
6842
6843                 port++;
6844                 size--;
6845                 last_bitmap = bitmap;
6846         }
6847
6848         return 0;
6849 }
6850
6851 /*
6852  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
6853  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
6854  * disinterest in the current event (read or write a specific MSR) by using an
6855  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
6856  */
6857 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
6858         struct vmcs12 *vmcs12, u32 exit_reason)
6859 {
6860         u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
6861         gpa_t bitmap;
6862
6863         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
6864                 return 1;
6865
6866         /*
6867          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
6868          * for the four combinations of read/write and low/high MSR numbers.
6869          * First we need to figure out which of the four to use:
6870          */
6871         bitmap = vmcs12->msr_bitmap;
6872         if (exit_reason == EXIT_REASON_MSR_WRITE)
6873                 bitmap += 2048;
6874         if (msr_index >= 0xc0000000) {
6875                 msr_index -= 0xc0000000;
6876                 bitmap += 1024;
6877         }
6878
6879         /* Then read the msr_index'th bit from this bitmap: */
6880         if (msr_index < 1024*8) {
6881                 unsigned char b;
6882                 if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
6883                         return 1;
6884                 return 1 & (b >> (msr_index & 7));
6885         } else
6886                 return 1; /* let L1 handle the wrong parameter */
6887 }
6888
6889 /*
6890  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
6891  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
6892  * intercept (via guest_host_mask etc.) the current event.
6893  */
6894 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
6895         struct vmcs12 *vmcs12)
6896 {
6897         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6898         int cr = exit_qualification & 15;
6899         int reg = (exit_qualification >> 8) & 15;
6900         unsigned long val = kvm_register_readl(vcpu, reg);
6901
6902         switch ((exit_qualification >> 4) & 3) {
6903         case 0: /* mov to cr */
6904                 switch (cr) {
6905                 case 0:
6906                         if (vmcs12->cr0_guest_host_mask &
6907                             (val ^ vmcs12->cr0_read_shadow))
6908                                 return 1;
6909                         break;
6910                 case 3:
6911                         if ((vmcs12->cr3_target_count >= 1 &&
6912                                         vmcs12->cr3_target_value0 == val) ||
6913                                 (vmcs12->cr3_target_count >= 2 &&
6914                                         vmcs12->cr3_target_value1 == val) ||
6915                                 (vmcs12->cr3_target_count >= 3 &&
6916                                         vmcs12->cr3_target_value2 == val) ||
6917                                 (vmcs12->cr3_target_count >= 4 &&
6918                                         vmcs12->cr3_target_value3 == val))
6919                                 return 0;
6920                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6921                                 return 1;
6922                         break;
6923                 case 4:
6924                         if (vmcs12->cr4_guest_host_mask &
6925                             (vmcs12->cr4_read_shadow ^ val))
6926                                 return 1;
6927                         break;
6928                 case 8:
6929                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6930                                 return 1;
6931                         break;
6932                 }
6933                 break;
6934         case 2: /* clts */
6935                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6936                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
6937                         return 1;
6938                 break;
6939         case 1: /* mov from cr */
6940                 switch (cr) {
6941                 case 3:
6942                         if (vmcs12->cpu_based_vm_exec_control &
6943                             CPU_BASED_CR3_STORE_EXITING)
6944                                 return 1;
6945                         break;
6946                 case 8:
6947                         if (vmcs12->cpu_based_vm_exec_control &
6948                             CPU_BASED_CR8_STORE_EXITING)
6949                                 return 1;
6950                         break;
6951                 }
6952                 break;
6953         case 3: /* lmsw */
6954                 /*
6955                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
6956                  * cr0. Other attempted changes are ignored, with no exit.
6957                  */
6958                 if (vmcs12->cr0_guest_host_mask & 0xe &
6959                     (val ^ vmcs12->cr0_read_shadow))
6960                         return 1;
6961                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6962                     !(vmcs12->cr0_read_shadow & 0x1) &&
6963                     (val & 0x1))
6964                         return 1;
6965                 break;
6966         }
6967         return 0;
6968 }
6969
6970 /*
6971  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
6972  * should handle it ourselves in L0 (and then continue L2). Only call this
6973  * when in is_guest_mode (L2).
6974  */
6975 static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
6976 {
6977         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6978         struct vcpu_vmx *vmx = to_vmx(vcpu);
6979         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6980         u32 exit_reason = vmx->exit_reason;
6981
6982         trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
6983                                 vmcs_readl(EXIT_QUALIFICATION),
6984                                 vmx->idt_vectoring_info,
6985                                 intr_info,
6986                                 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6987                                 KVM_ISA_VMX);
6988
6989         if (vmx->nested.nested_run_pending)
6990                 return 0;
6991
6992         if (unlikely(vmx->fail)) {
6993                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
6994                                     vmcs_read32(VM_INSTRUCTION_ERROR));
6995                 return 1;
6996         }
6997
6998         switch (exit_reason) {
6999         case EXIT_REASON_EXCEPTION_NMI:
7000                 if (!is_exception(intr_info))
7001                         return 0;
7002                 else if (is_page_fault(intr_info))
7003                         return enable_ept;
7004                 else if (is_no_device(intr_info) &&
7005                          !(vmcs12->guest_cr0 & X86_CR0_TS))
7006                         return 0;
7007                 return vmcs12->exception_bitmap &
7008                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
7009         case EXIT_REASON_EXTERNAL_INTERRUPT:
7010                 return 0;
7011         case EXIT_REASON_TRIPLE_FAULT:
7012                 return 1;
7013         case EXIT_REASON_PENDING_INTERRUPT:
7014                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
7015         case EXIT_REASON_NMI_WINDOW:
7016                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
7017         case EXIT_REASON_TASK_SWITCH:
7018                 return 1;
7019         case EXIT_REASON_CPUID:
7020                 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
7021                         return 0;
7022                 return 1;
7023         case EXIT_REASON_HLT:
7024                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
7025         case EXIT_REASON_INVD:
7026                 return 1;
7027         case EXIT_REASON_INVLPG:
7028                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
7029         case EXIT_REASON_RDPMC:
7030                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
7031         case EXIT_REASON_RDTSC:
7032                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
7033         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
7034         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
7035         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
7036         case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
7037         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
7038         case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
7039                 /*
7040                  * VMX instructions trap unconditionally. This allows L1 to
7041                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
7042                  */
7043                 return 1;
7044         case EXIT_REASON_CR_ACCESS:
7045                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
7046         case EXIT_REASON_DR_ACCESS:
7047                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
7048         case EXIT_REASON_IO_INSTRUCTION:
7049                 return nested_vmx_exit_handled_io(vcpu, vmcs12);
7050         case EXIT_REASON_MSR_READ:
7051         case EXIT_REASON_MSR_WRITE:
7052                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
7053         case EXIT_REASON_INVALID_STATE:
7054                 return 1;
7055         case EXIT_REASON_MWAIT_INSTRUCTION:
7056                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
7057         case EXIT_REASON_MONITOR_INSTRUCTION:
7058                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
7059         case EXIT_REASON_PAUSE_INSTRUCTION:
7060                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
7061                         nested_cpu_has2(vmcs12,
7062                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
7063         case EXIT_REASON_MCE_DURING_VMENTRY:
7064                 return 0;
7065         case EXIT_REASON_TPR_BELOW_THRESHOLD:
7066                 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
7067         case EXIT_REASON_APIC_ACCESS:
7068                 return nested_cpu_has2(vmcs12,
7069                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
7070         case EXIT_REASON_EPT_VIOLATION:
7071                 /*
7072                  * L0 always deals with the EPT violation. If nested EPT is
7073                  * used, and the nested mmu code discovers that the address is
7074                  * missing in the guest EPT table (EPT12), the EPT violation
7075                  * will be injected with nested_ept_inject_page_fault()
7076                  */
7077                 return 0;
7078         case EXIT_REASON_EPT_MISCONFIG:
7079                 /*
7080                  * L2 never uses directly L1's EPT, but rather L0's own EPT
7081                  * table (shadow on EPT) or a merged EPT table that L0 built
7082                  * (EPT on EPT). So any problems with the structure of the
7083                  * table is L0's fault.
7084                  */
7085                 return 0;
7086         case EXIT_REASON_WBINVD:
7087                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
7088         case EXIT_REASON_XSETBV:
7089                 return 1;
7090         default:
7091                 return 1;
7092         }
7093 }
7094
7095 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
7096 {
7097         *info1 = vmcs_readl(EXIT_QUALIFICATION);
7098         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
7099 }
7100
7101 /*
7102  * The guest has exited.  See if we can fix it or if we need userspace
7103  * assistance.
7104  */
7105 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
7106 {
7107         struct vcpu_vmx *vmx = to_vmx(vcpu);
7108         u32 exit_reason = vmx->exit_reason;
7109         u32 vectoring_info = vmx->idt_vectoring_info;
7110
7111         /* If guest state is invalid, start emulating */
7112         if (vmx->emulation_required)
7113                 return handle_invalid_guest_state(vcpu);
7114
7115         if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
7116                 nested_vmx_vmexit(vcpu, exit_reason,
7117                                   vmcs_read32(VM_EXIT_INTR_INFO),
7118                                   vmcs_readl(EXIT_QUALIFICATION));
7119                 return 1;
7120         }
7121
7122         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
7123                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
7124                 vcpu->run->fail_entry.hardware_entry_failure_reason
7125                         = exit_reason;
7126                 return 0;
7127         }
7128
7129         if (unlikely(vmx->fail)) {
7130                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
7131                 vcpu->run->fail_entry.hardware_entry_failure_reason
7132                         = vmcs_read32(VM_INSTRUCTION_ERROR);
7133                 return 0;
7134         }
7135
7136         /*
7137          * Note:
7138          * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
7139          * delivery event since it indicates guest is accessing MMIO.
7140          * The vm-exit can be triggered again after return to guest that
7141          * will cause infinite loop.
7142          */
7143         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
7144                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
7145                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
7146                         exit_reason != EXIT_REASON_TASK_SWITCH)) {
7147                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7148                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
7149                 vcpu->run->internal.ndata = 2;
7150                 vcpu->run->internal.data[0] = vectoring_info;
7151                 vcpu->run->internal.data[1] = exit_reason;
7152                 return 0;
7153         }
7154
7155         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
7156             !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
7157                                         get_vmcs12(vcpu))))) {
7158                 if (vmx_interrupt_allowed(vcpu)) {
7159                         vmx->soft_vnmi_blocked = 0;
7160                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
7161                            vcpu->arch.nmi_pending) {
7162                         /*
7163                          * This CPU don't support us in finding the end of an
7164                          * NMI-blocked window if the guest runs with IRQs
7165                          * disabled. So we pull the trigger after 1 s of
7166                          * futile waiting, but inform the user about this.
7167                          */
7168                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
7169                                "state on VCPU %d after 1 s timeout\n",
7170                                __func__, vcpu->vcpu_id);
7171                         vmx->soft_vnmi_blocked = 0;
7172                 }
7173         }
7174
7175         if (exit_reason < kvm_vmx_max_exit_handlers
7176             && kvm_vmx_exit_handlers[exit_reason])
7177                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
7178         else {
7179                 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
7180                 kvm_queue_exception(vcpu, UD_VECTOR);
7181                 return 1;
7182         }
7183 }
7184
7185 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
7186 {
7187         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7188
7189         if (is_guest_mode(vcpu) &&
7190                 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
7191                 return;
7192
7193         if (irr == -1 || tpr < irr) {
7194                 vmcs_write32(TPR_THRESHOLD, 0);
7195                 return;
7196         }
7197
7198         vmcs_write32(TPR_THRESHOLD, irr);
7199 }
7200
7201 static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
7202 {
7203         u32 sec_exec_control;
7204
7205         /*
7206          * There is not point to enable virtualize x2apic without enable
7207          * apicv
7208          */
7209         if (!cpu_has_vmx_virtualize_x2apic_mode() ||
7210                                 !vmx_vm_has_apicv(vcpu->kvm))
7211                 return;
7212
7213         if (!vm_need_tpr_shadow(vcpu->kvm))
7214                 return;
7215
7216         sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7217
7218         if (set) {
7219                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7220                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
7221         } else {
7222                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
7223                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7224         }
7225         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
7226
7227         vmx_set_msr_bitmap(vcpu);
7228 }
7229
7230 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
7231 {
7232         struct vcpu_vmx *vmx = to_vmx(vcpu);
7233
7234         /*
7235          * Currently we do not handle the nested case where L2 has an
7236          * APIC access page of its own; that page is still pinned.
7237          * Hence, we skip the case where the VCPU is in guest mode _and_
7238          * L1 prepared an APIC access page for L2.
7239          *
7240          * For the case where L1 and L2 share the same APIC access page
7241          * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
7242          * in the vmcs12), this function will only update either the vmcs01
7243          * or the vmcs02.  If the former, the vmcs02 will be updated by
7244          * prepare_vmcs02.  If the latter, the vmcs01 will be updated in
7245          * the next L2->L1 exit.
7246          */
7247         if (!is_guest_mode(vcpu) ||
7248             !nested_cpu_has2(vmx->nested.current_vmcs12,
7249                              SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
7250                 vmcs_write64(APIC_ACCESS_ADDR, hpa);
7251 }
7252
7253 static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
7254 {
7255         u16 status;
7256         u8 old;
7257
7258         if (!vmx_vm_has_apicv(kvm))
7259                 return;
7260
7261         if (isr == -1)
7262                 isr = 0;
7263
7264         status = vmcs_read16(GUEST_INTR_STATUS);
7265         old = status >> 8;
7266         if (isr != old) {
7267                 status &= 0xff;
7268                 status |= isr << 8;
7269                 vmcs_write16(GUEST_INTR_STATUS, status);
7270         }
7271 }
7272
7273 static void vmx_set_rvi(int vector)
7274 {
7275         u16 status;
7276         u8 old;
7277
7278         status = vmcs_read16(GUEST_INTR_STATUS);
7279         old = (u8)status & 0xff;
7280         if ((u8)vector != old) {
7281                 status &= ~0xff;
7282                 status |= (u8)vector;
7283                 vmcs_write16(GUEST_INTR_STATUS, status);
7284         }
7285 }
7286
7287 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
7288 {
7289         if (max_irr == -1)
7290                 return;
7291
7292         /*
7293          * If a vmexit is needed, vmx_check_nested_events handles it.
7294          */
7295         if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
7296                 return;
7297
7298         if (!is_guest_mode(vcpu)) {
7299                 vmx_set_rvi(max_irr);
7300                 return;
7301         }
7302
7303         /*
7304          * Fall back to pre-APICv interrupt injection since L2
7305          * is run without virtual interrupt delivery.
7306          */
7307         if (!kvm_event_needs_reinjection(vcpu) &&
7308             vmx_interrupt_allowed(vcpu)) {
7309                 kvm_queue_interrupt(vcpu, max_irr, false);
7310                 vmx_inject_irq(vcpu);
7311         }
7312 }
7313
7314 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
7315 {
7316         if (!vmx_vm_has_apicv(vcpu->kvm))
7317                 return;
7318
7319         vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
7320         vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
7321         vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
7322         vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
7323 }
7324
7325 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
7326 {
7327         u32 exit_intr_info;
7328
7329         if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
7330               || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
7331                 return;
7332
7333         vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7334         exit_intr_info = vmx->exit_intr_info;
7335
7336         /* Handle machine checks before interrupts are enabled */
7337         if (is_machine_check(exit_intr_info))
7338                 kvm_machine_check();
7339
7340         /* We need to handle NMIs before interrupts are enabled */
7341         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
7342             (exit_intr_info & INTR_INFO_VALID_MASK)) {
7343                 kvm_before_handle_nmi(&vmx->vcpu);
7344                 asm("int $2");
7345                 kvm_after_handle_nmi(&vmx->vcpu);
7346         }
7347 }
7348
7349 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
7350 {
7351         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7352
7353         /*
7354          * If external interrupt exists, IF bit is set in rflags/eflags on the
7355          * interrupt stack frame, and interrupt will be enabled on a return
7356          * from interrupt handler.
7357          */
7358         if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
7359                         == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
7360                 unsigned int vector;
7361                 unsigned long entry;
7362                 gate_desc *desc;
7363                 struct vcpu_vmx *vmx = to_vmx(vcpu);
7364 #ifdef CONFIG_X86_64
7365                 unsigned long tmp;
7366 #endif
7367
7368                 vector =  exit_intr_info & INTR_INFO_VECTOR_MASK;
7369                 desc = (gate_desc *)vmx->host_idt_base + vector;
7370                 entry = gate_offset(*desc);
7371                 asm volatile(
7372 #ifdef CONFIG_X86_64
7373                         "mov %%" _ASM_SP ", %[sp]\n\t"
7374                         "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
7375                         "push $%c[ss]\n\t"
7376                         "push %[sp]\n\t"
7377 #endif
7378                         "pushf\n\t"
7379                         "orl $0x200, (%%" _ASM_SP ")\n\t"
7380                         __ASM_SIZE(push) " $%c[cs]\n\t"
7381                         "call *%[entry]\n\t"
7382                         :
7383 #ifdef CONFIG_X86_64
7384                         [sp]"=&r"(tmp)
7385 #endif
7386                         :
7387                         [entry]"r"(entry),
7388                         [ss]"i"(__KERNEL_DS),
7389                         [cs]"i"(__KERNEL_CS)
7390                         );
7391         } else
7392                 local_irq_enable();
7393 }
7394
7395 static bool vmx_mpx_supported(void)
7396 {
7397         return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
7398                 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
7399 }
7400
7401 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7402 {
7403         u32 exit_intr_info;
7404         bool unblock_nmi;
7405         u8 vector;
7406         bool idtv_info_valid;
7407
7408         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7409
7410         if (cpu_has_virtual_nmis()) {
7411                 if (vmx->nmi_known_unmasked)
7412                         return;
7413                 /*
7414                  * Can't use vmx->exit_intr_info since we're not sure what
7415                  * the exit reason is.
7416                  */
7417                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7418                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7419                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7420                 /*
7421                  * SDM 3: 27.7.1.2 (September 2008)
7422                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
7423                  * a guest IRET fault.
7424                  * SDM 3: 23.2.2 (September 2008)
7425                  * Bit 12 is undefined in any of the following cases:
7426                  *  If the VM exit sets the valid bit in the IDT-vectoring
7427                  *   information field.
7428                  *  If the VM exit is due to a double fault.
7429                  */
7430                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7431                     vector != DF_VECTOR && !idtv_info_valid)
7432                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7433                                       GUEST_INTR_STATE_NMI);
7434                 else
7435                         vmx->nmi_known_unmasked =
7436                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7437                                   & GUEST_INTR_STATE_NMI);
7438         } else if (unlikely(vmx->soft_vnmi_blocked))
7439                 vmx->vnmi_blocked_time +=
7440                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
7441 }
7442
7443 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7444                                       u32 idt_vectoring_info,
7445                                       int instr_len_field,
7446                                       int error_code_field)
7447 {
7448         u8 vector;
7449         int type;
7450         bool idtv_info_valid;
7451
7452         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7453
7454         vcpu->arch.nmi_injected = false;
7455         kvm_clear_exception_queue(vcpu);
7456         kvm_clear_interrupt_queue(vcpu);
7457
7458         if (!idtv_info_valid)
7459                 return;
7460
7461         kvm_make_request(KVM_REQ_EVENT, vcpu);
7462
7463         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7464         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7465
7466         switch (type) {
7467         case INTR_TYPE_NMI_INTR:
7468                 vcpu->arch.nmi_injected = true;
7469                 /*
7470                  * SDM 3: 27.7.1.2 (September 2008)
7471                  * Clear bit "block by NMI" before VM entry if a NMI
7472                  * delivery faulted.
7473                  */
7474                 vmx_set_nmi_mask(vcpu, false);
7475                 break;
7476         case INTR_TYPE_SOFT_EXCEPTION:
7477                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7478                 /* fall through */
7479         case INTR_TYPE_HARD_EXCEPTION:
7480                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7481                         u32 err = vmcs_read32(error_code_field);
7482                         kvm_requeue_exception_e(vcpu, vector, err);
7483                 } else
7484                         kvm_requeue_exception(vcpu, vector);
7485                 break;
7486         case INTR_TYPE_SOFT_INTR:
7487                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7488                 /* fall through */
7489         case INTR_TYPE_EXT_INTR:
7490                 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7491                 break;
7492         default:
7493                 break;
7494         }
7495 }
7496
7497 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7498 {
7499         __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7500                                   VM_EXIT_INSTRUCTION_LEN,
7501                                   IDT_VECTORING_ERROR_CODE);
7502 }
7503
7504 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7505 {
7506         __vmx_complete_interrupts(vcpu,
7507                                   vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7508                                   VM_ENTRY_INSTRUCTION_LEN,
7509                                   VM_ENTRY_EXCEPTION_ERROR_CODE);
7510
7511         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7512 }
7513
7514 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7515 {
7516         int i, nr_msrs;
7517         struct perf_guest_switch_msr *msrs;
7518
7519         msrs = perf_guest_get_msrs(&nr_msrs);
7520
7521         if (!msrs)
7522                 return;
7523
7524         for (i = 0; i < nr_msrs; i++)
7525                 if (msrs[i].host == msrs[i].guest)
7526                         clear_atomic_switch_msr(vmx, msrs[i].msr);
7527                 else
7528                         add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7529                                         msrs[i].host);
7530 }
7531
7532 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
7533 {
7534         struct vcpu_vmx *vmx = to_vmx(vcpu);
7535         unsigned long debugctlmsr, cr4;
7536
7537         /* Record the guest's net vcpu time for enforced NMI injections. */
7538         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
7539                 vmx->entry_time = ktime_get();
7540
7541         /* Don't enter VMX if guest state is invalid, let the exit handler
7542            start emulation until we arrive back to a valid state */
7543         if (vmx->emulation_required)
7544                 return;
7545
7546         if (vmx->ple_window_dirty) {
7547                 vmx->ple_window_dirty = false;
7548                 vmcs_write32(PLE_WINDOW, vmx->ple_window);
7549         }
7550
7551         if (vmx->nested.sync_shadow_vmcs) {
7552                 copy_vmcs12_to_shadow(vmx);
7553                 vmx->nested.sync_shadow_vmcs = false;
7554         }
7555
7556         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
7557                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7558         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
7559                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7560
7561         cr4 = read_cr4();
7562         if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
7563                 vmcs_writel(HOST_CR4, cr4);
7564                 vmx->host_state.vmcs_host_cr4 = cr4;
7565         }
7566
7567         /* When single-stepping over STI and MOV SS, we must clear the
7568          * corresponding interruptibility bits in the guest state. Otherwise
7569          * vmentry fails as it then expects bit 14 (BS) in pending debug
7570          * exceptions being set, but that's not correct for the guest debugging
7571          * case. */
7572         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7573                 vmx_set_interrupt_shadow(vcpu, 0);
7574
7575         atomic_switch_perf_msrs(vmx);
7576         debugctlmsr = get_debugctlmsr();
7577
7578         vmx->__launched = vmx->loaded_vmcs->launched;
7579         asm(
7580                 /* Store host registers */
7581                 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
7582                 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
7583                 "push %%" _ASM_CX " \n\t"
7584                 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
7585                 "je 1f \n\t"
7586                 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
7587                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
7588                 "1: \n\t"
7589                 /* Reload cr2 if changed */
7590                 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
7591                 "mov %%cr2, %%" _ASM_DX " \n\t"
7592                 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
7593                 "je 2f \n\t"
7594                 "mov %%" _ASM_AX", %%cr2 \n\t"
7595                 "2: \n\t"
7596                 /* Check if vmlaunch of vmresume is needed */
7597                 "cmpl $0, %c[launched](%0) \n\t"
7598                 /* Load guest registers.  Don't clobber flags. */
7599                 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
7600                 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
7601                 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
7602                 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
7603                 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
7604                 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
7605 #ifdef CONFIG_X86_64
7606                 "mov %c[r8](%0),  %%r8  \n\t"
7607                 "mov %c[r9](%0),  %%r9  \n\t"
7608                 "mov %c[r10](%0), %%r10 \n\t"
7609                 "mov %c[r11](%0), %%r11 \n\t"
7610                 "mov %c[r12](%0), %%r12 \n\t"
7611                 "mov %c[r13](%0), %%r13 \n\t"
7612                 "mov %c[r14](%0), %%r14 \n\t"
7613                 "mov %c[r15](%0), %%r15 \n\t"
7614 #endif
7615                 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
7616
7617                 /* Enter guest mode */
7618                 "jne 1f \n\t"
7619                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
7620                 "jmp 2f \n\t"
7621                 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
7622                 "2: "
7623                 /* Save guest registers, load host registers, keep flags */
7624                 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
7625                 "pop %0 \n\t"
7626                 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
7627                 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
7628                 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
7629                 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
7630                 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
7631                 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
7632                 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
7633 #ifdef CONFIG_X86_64
7634                 "mov %%r8,  %c[r8](%0) \n\t"
7635                 "mov %%r9,  %c[r9](%0) \n\t"
7636                 "mov %%r10, %c[r10](%0) \n\t"
7637                 "mov %%r11, %c[r11](%0) \n\t"
7638                 "mov %%r12, %c[r12](%0) \n\t"
7639                 "mov %%r13, %c[r13](%0) \n\t"
7640                 "mov %%r14, %c[r14](%0) \n\t"
7641                 "mov %%r15, %c[r15](%0) \n\t"
7642 #endif
7643                 "mov %%cr2, %%" _ASM_AX "   \n\t"
7644                 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
7645
7646                 "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
7647                 "setbe %c[fail](%0) \n\t"
7648                 ".pushsection .rodata \n\t"
7649                 ".global vmx_return \n\t"
7650                 "vmx_return: " _ASM_PTR " 2b \n\t"
7651                 ".popsection"
7652               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
7653                 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
7654                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
7655                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
7656                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
7657                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
7658                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
7659                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
7660                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
7661                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
7662                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
7663 #ifdef CONFIG_X86_64
7664                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
7665                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
7666                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
7667                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
7668                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
7669                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
7670                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
7671                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
7672 #endif
7673                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
7674                 [wordsize]"i"(sizeof(ulong))
7675               : "cc", "memory"
7676 #ifdef CONFIG_X86_64
7677                 , "rax", "rbx", "rdi", "rsi"
7678                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
7679 #else
7680                 , "eax", "ebx", "edi", "esi"
7681 #endif
7682               );
7683
7684         /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7685         if (debugctlmsr)
7686                 update_debugctlmsr(debugctlmsr);
7687
7688 #ifndef CONFIG_X86_64
7689         /*
7690          * The sysexit path does not restore ds/es, so we must set them to
7691          * a reasonable value ourselves.
7692          *
7693          * We can't defer this to vmx_load_host_state() since that function
7694          * may be executed in interrupt context, which saves and restore segments
7695          * around it, nullifying its effect.
7696          */
7697         loadsegment(ds, __USER_DS);
7698         loadsegment(es, __USER_DS);
7699 #endif
7700
7701         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
7702                                   | (1 << VCPU_EXREG_RFLAGS)
7703                                   | (1 << VCPU_EXREG_PDPTR)
7704                                   | (1 << VCPU_EXREG_SEGMENTS)
7705                                   | (1 << VCPU_EXREG_CR3));
7706         vcpu->arch.regs_dirty = 0;
7707
7708         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7709
7710         vmx->loaded_vmcs->launched = 1;
7711
7712         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
7713         trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
7714
7715         /*
7716          * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
7717          * we did not inject a still-pending event to L1 now because of
7718          * nested_run_pending, we need to re-enable this bit.
7719          */
7720         if (vmx->nested.nested_run_pending)
7721                 kvm_make_request(KVM_REQ_EVENT, vcpu);
7722
7723         vmx->nested.nested_run_pending = 0;
7724
7725         vmx_complete_atomic_exit(vmx);
7726         vmx_recover_nmi_blocking(vmx);
7727         vmx_complete_interrupts(vmx);
7728 }
7729
7730 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
7731 {
7732         struct vcpu_vmx *vmx = to_vmx(vcpu);
7733         int cpu;
7734
7735         if (vmx->loaded_vmcs == &vmx->vmcs01)
7736                 return;
7737
7738         cpu = get_cpu();
7739         vmx->loaded_vmcs = &vmx->vmcs01;
7740         vmx_vcpu_put(vcpu);
7741         vmx_vcpu_load(vcpu, cpu);
7742         vcpu->cpu = cpu;
7743         put_cpu();
7744 }
7745
7746 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
7747 {
7748         struct vcpu_vmx *vmx = to_vmx(vcpu);
7749
7750         free_vpid(vmx);
7751         leave_guest_mode(vcpu);
7752         vmx_load_vmcs01(vcpu);
7753         free_nested(vmx);
7754         free_loaded_vmcs(vmx->loaded_vmcs);
7755         kfree(vmx->guest_msrs);
7756         kvm_vcpu_uninit(vcpu);
7757         kmem_cache_free(kvm_vcpu_cache, vmx);
7758 }
7759
7760 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
7761 {
7762         int err;
7763         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
7764         int cpu;
7765
7766         if (!vmx)
7767                 return ERR_PTR(-ENOMEM);
7768
7769         allocate_vpid(vmx);
7770
7771         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
7772         if (err)
7773                 goto free_vcpu;
7774
7775         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
7776         BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
7777                      > PAGE_SIZE);
7778
7779         err = -ENOMEM;
7780         if (!vmx->guest_msrs) {
7781                 goto uninit_vcpu;
7782         }
7783
7784         vmx->loaded_vmcs = &vmx->vmcs01;
7785         vmx->loaded_vmcs->vmcs = alloc_vmcs();
7786         if (!vmx->loaded_vmcs->vmcs)
7787                 goto free_msrs;
7788         if (!vmm_exclusive)
7789                 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
7790         loaded_vmcs_init(vmx->loaded_vmcs);
7791         if (!vmm_exclusive)
7792                 kvm_cpu_vmxoff();
7793
7794         cpu = get_cpu();
7795         vmx_vcpu_load(&vmx->vcpu, cpu);
7796         vmx->vcpu.cpu = cpu;
7797         err = vmx_vcpu_setup(vmx);
7798         vmx_vcpu_put(&vmx->vcpu);
7799         put_cpu();
7800         if (err)
7801                 goto free_vmcs;
7802         if (vm_need_virtualize_apic_accesses(kvm)) {
7803                 err = alloc_apic_access_page(kvm);
7804                 if (err)
7805                         goto free_vmcs;
7806         }
7807
7808         if (enable_ept) {
7809                 if (!kvm->arch.ept_identity_map_addr)
7810                         kvm->arch.ept_identity_map_addr =
7811                                 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
7812                 err = init_rmode_identity_map(kvm);
7813                 if (err)
7814                         goto free_vmcs;
7815         }
7816
7817         vmx->nested.current_vmptr = -1ull;
7818         vmx->nested.current_vmcs12 = NULL;
7819
7820         return &vmx->vcpu;
7821
7822 free_vmcs:
7823         free_loaded_vmcs(vmx->loaded_vmcs);
7824 free_msrs:
7825         kfree(vmx->guest_msrs);
7826 uninit_vcpu:
7827         kvm_vcpu_uninit(&vmx->vcpu);
7828 free_vcpu:
7829         free_vpid(vmx);
7830         kmem_cache_free(kvm_vcpu_cache, vmx);
7831         return ERR_PTR(err);
7832 }
7833
7834 static void __init vmx_check_processor_compat(void *rtn)
7835 {
7836         struct vmcs_config vmcs_conf;
7837
7838         *(int *)rtn = 0;
7839         if (setup_vmcs_config(&vmcs_conf) < 0)
7840                 *(int *)rtn = -EIO;
7841         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
7842                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
7843                                 smp_processor_id());
7844                 *(int *)rtn = -EIO;
7845         }
7846 }
7847
7848 static int get_ept_level(void)
7849 {
7850         return VMX_EPT_DEFAULT_GAW + 1;
7851 }
7852
7853 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7854 {
7855         u64 ret;
7856
7857         /* For VT-d and EPT combination
7858          * 1. MMIO: always map as UC
7859          * 2. EPT with VT-d:
7860          *   a. VT-d without snooping control feature: can't guarantee the
7861          *      result, try to trust guest.
7862          *   b. VT-d with snooping control feature: snooping control feature of
7863          *      VT-d engine can guarantee the cache correctness. Just set it
7864          *      to WB to keep consistent with host. So the same as item 3.
7865          * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
7866          *    consistent with host MTRR
7867          */
7868         if (is_mmio)
7869                 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7870         else if (kvm_arch_has_noncoherent_dma(vcpu->kvm))
7871                 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
7872                       VMX_EPT_MT_EPTE_SHIFT;
7873         else
7874                 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
7875                         | VMX_EPT_IPAT_BIT;
7876
7877         return ret;
7878 }
7879
7880 static int vmx_get_lpage_level(void)
7881 {
7882         if (enable_ept && !cpu_has_vmx_ept_1g_page())
7883                 return PT_DIRECTORY_LEVEL;
7884         else
7885                 /* For shadow and EPT supported 1GB page */
7886                 return PT_PDPE_LEVEL;
7887 }
7888
7889 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
7890 {
7891         struct kvm_cpuid_entry2 *best;
7892         struct vcpu_vmx *vmx = to_vmx(vcpu);
7893         u32 exec_control;
7894
7895         vmx->rdtscp_enabled = false;
7896         if (vmx_rdtscp_supported()) {
7897                 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7898                 if (exec_control & SECONDARY_EXEC_RDTSCP) {
7899                         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
7900                         if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
7901                                 vmx->rdtscp_enabled = true;
7902                         else {
7903                                 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7904                                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7905                                                 exec_control);
7906                         }
7907                 }
7908         }
7909
7910         /* Exposing INVPCID only when PCID is exposed */
7911         best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
7912         if (vmx_invpcid_supported() &&
7913             best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
7914             guest_cpuid_has_pcid(vcpu)) {
7915                 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7916                 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
7917                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7918                              exec_control);
7919         } else {
7920                 if (cpu_has_secondary_exec_ctrls()) {
7921                         exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7922                         exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
7923                         vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7924                                      exec_control);
7925                 }
7926                 if (best)
7927                         best->ebx &= ~bit(X86_FEATURE_INVPCID);
7928         }
7929 }
7930
7931 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
7932 {
7933         if (func == 1 && nested)
7934                 entry->ecx |= bit(X86_FEATURE_VMX);
7935 }
7936
7937 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
7938                 struct x86_exception *fault)
7939 {
7940         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7941         u32 exit_reason;
7942
7943         if (fault->error_code & PFERR_RSVD_MASK)
7944                 exit_reason = EXIT_REASON_EPT_MISCONFIG;
7945         else
7946                 exit_reason = EXIT_REASON_EPT_VIOLATION;
7947         nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
7948         vmcs12->guest_physical_address = fault->address;
7949 }
7950
7951 /* Callbacks for nested_ept_init_mmu_context: */
7952
7953 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
7954 {
7955         /* return the page table to be shadowed - in our case, EPT12 */
7956         return get_vmcs12(vcpu)->ept_pointer;
7957 }
7958
7959 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
7960 {
7961         kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
7962                         nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
7963
7964         vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
7965         vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
7966         vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
7967
7968         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
7969 }
7970
7971 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
7972 {
7973         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
7974 }
7975
7976 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
7977                 struct x86_exception *fault)
7978 {
7979         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7980
7981         WARN_ON(!is_guest_mode(vcpu));
7982
7983         /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
7984         if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
7985                 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
7986                                   vmcs_read32(VM_EXIT_INTR_INFO),
7987                                   vmcs_readl(EXIT_QUALIFICATION));
7988         else
7989                 kvm_inject_page_fault(vcpu, fault);
7990 }
7991
7992 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
7993                                         struct vmcs12 *vmcs12)
7994 {
7995         struct vcpu_vmx *vmx = to_vmx(vcpu);
7996
7997         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
7998                 /* TODO: Also verify bits beyond physical address width are 0 */
7999                 if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
8000                         return false;
8001
8002                 /*
8003                  * Translate L1 physical address to host physical
8004                  * address for vmcs02. Keep the page pinned, so this
8005                  * physical address remains valid. We keep a reference
8006                  * to it so we can release it later.
8007                  */
8008                 if (vmx->nested.apic_access_page) /* shouldn't happen */
8009                         nested_release_page(vmx->nested.apic_access_page);
8010                 vmx->nested.apic_access_page =
8011                         nested_get_page(vcpu, vmcs12->apic_access_addr);
8012         }
8013
8014         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
8015                 /* TODO: Also verify bits beyond physical address width are 0 */
8016                 if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr))
8017                         return false;
8018
8019                 if (vmx->nested.virtual_apic_page) /* shouldn't happen */
8020                         nested_release_page(vmx->nested.virtual_apic_page);
8021                 vmx->nested.virtual_apic_page =
8022                         nested_get_page(vcpu, vmcs12->virtual_apic_page_addr);
8023
8024                 /*
8025                  * Failing the vm entry is _not_ what the processor does
8026                  * but it's basically the only possibility we have.
8027                  * We could still enter the guest if CR8 load exits are
8028                  * enabled, CR8 store exits are enabled, and virtualize APIC
8029                  * access is disabled; in this case the processor would never
8030                  * use the TPR shadow and we could simply clear the bit from
8031                  * the execution control.  But such a configuration is useless,
8032                  * so let's keep the code simple.
8033                  */
8034                 if (!vmx->nested.virtual_apic_page)
8035                         return false;
8036         }
8037
8038         return true;
8039 }
8040
8041 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
8042 {
8043         u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
8044         struct vcpu_vmx *vmx = to_vmx(vcpu);
8045
8046         if (vcpu->arch.virtual_tsc_khz == 0)
8047                 return;
8048
8049         /* Make sure short timeouts reliably trigger an immediate vmexit.
8050          * hrtimer_start does not guarantee this. */
8051         if (preemption_timeout <= 1) {
8052                 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
8053                 return;
8054         }
8055
8056         preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
8057         preemption_timeout *= 1000000;
8058         do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
8059         hrtimer_start(&vmx->nested.preemption_timer,
8060                       ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
8061 }
8062
8063 /*
8064  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
8065  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
8066  * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
8067  * guest in a way that will both be appropriate to L1's requests, and our
8068  * needs. In addition to modifying the active vmcs (which is vmcs02), this
8069  * function also has additional necessary side-effects, like setting various
8070  * vcpu->arch fields.
8071  */
8072 static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8073 {
8074         struct vcpu_vmx *vmx = to_vmx(vcpu);
8075         u32 exec_control;
8076
8077         vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
8078         vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
8079         vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
8080         vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
8081         vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
8082         vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
8083         vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
8084         vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
8085         vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
8086         vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
8087         vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
8088         vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
8089         vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
8090         vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
8091         vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
8092         vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
8093         vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
8094         vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
8095         vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
8096         vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
8097         vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
8098         vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
8099         vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
8100         vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
8101         vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
8102         vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
8103         vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
8104         vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
8105         vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
8106         vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
8107         vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
8108         vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
8109         vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
8110         vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
8111         vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
8112         vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
8113
8114         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
8115                 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
8116                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
8117         } else {
8118                 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
8119                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
8120         }
8121         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
8122                 vmcs12->vm_entry_intr_info_field);
8123         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
8124                 vmcs12->vm_entry_exception_error_code);
8125         vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
8126                 vmcs12->vm_entry_instruction_len);
8127         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
8128                 vmcs12->guest_interruptibility_info);
8129         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
8130         vmx_set_rflags(vcpu, vmcs12->guest_rflags);
8131         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
8132                 vmcs12->guest_pending_dbg_exceptions);
8133         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
8134         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
8135
8136         vmcs_write64(VMCS_LINK_POINTER, -1ull);
8137
8138         exec_control = vmcs12->pin_based_vm_exec_control;
8139         exec_control |= vmcs_config.pin_based_exec_ctrl;
8140         exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
8141                           PIN_BASED_POSTED_INTR);
8142         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
8143
8144         vmx->nested.preemption_timer_expired = false;
8145         if (nested_cpu_has_preemption_timer(vmcs12))
8146                 vmx_start_preemption_timer(vcpu);
8147
8148         /*
8149          * Whether page-faults are trapped is determined by a combination of
8150          * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
8151          * If enable_ept, L0 doesn't care about page faults and we should
8152          * set all of these to L1's desires. However, if !enable_ept, L0 does
8153          * care about (at least some) page faults, and because it is not easy
8154          * (if at all possible?) to merge L0 and L1's desires, we simply ask
8155          * to exit on each and every L2 page fault. This is done by setting
8156          * MASK=MATCH=0 and (see below) EB.PF=1.
8157          * Note that below we don't need special code to set EB.PF beyond the
8158          * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
8159          * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
8160          * !enable_ept, EB.PF is 1, so the "or" will always be 1.
8161          *
8162          * A problem with this approach (when !enable_ept) is that L1 may be
8163          * injected with more page faults than it asked for. This could have
8164          * caused problems, but in practice existing hypervisors don't care.
8165          * To fix this, we will need to emulate the PFEC checking (on the L1
8166          * page tables), using walk_addr(), when injecting PFs to L1.
8167          */
8168         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
8169                 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
8170         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
8171                 enable_ept ? vmcs12->page_fault_error_code_match : 0);
8172
8173         if (cpu_has_secondary_exec_ctrls()) {
8174                 exec_control = vmx_secondary_exec_control(vmx);
8175                 if (!vmx->rdtscp_enabled)
8176                         exec_control &= ~SECONDARY_EXEC_RDTSCP;
8177                 /* Take the following fields only from vmcs12 */
8178                 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
8179                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
8180                                   SECONDARY_EXEC_APIC_REGISTER_VIRT);
8181                 if (nested_cpu_has(vmcs12,
8182                                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
8183                         exec_control |= vmcs12->secondary_vm_exec_control;
8184
8185                 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
8186                         /*
8187                          * If translation failed, no matter: This feature asks
8188                          * to exit when accessing the given address, and if it
8189                          * can never be accessed, this feature won't do
8190                          * anything anyway.
8191                          */
8192                         if (!vmx->nested.apic_access_page)
8193                                 exec_control &=
8194                                   ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8195                         else
8196                                 vmcs_write64(APIC_ACCESS_ADDR,
8197                                   page_to_phys(vmx->nested.apic_access_page));
8198                 } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
8199                         exec_control |=
8200                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8201                         kvm_vcpu_reload_apic_access_page(vcpu);
8202                 }
8203
8204                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
8205         }
8206
8207
8208         /*
8209          * Set host-state according to L0's settings (vmcs12 is irrelevant here)
8210          * Some constant fields are set here by vmx_set_constant_host_state().
8211          * Other fields are different per CPU, and will be set later when
8212          * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
8213          */
8214         vmx_set_constant_host_state(vmx);
8215
8216         /*
8217          * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
8218          * entry, but only if the current (host) sp changed from the value
8219          * we wrote last (vmx->host_rsp). This cache is no longer relevant
8220          * if we switch vmcs, and rather than hold a separate cache per vmcs,
8221          * here we just force the write to happen on entry.
8222          */
8223         vmx->host_rsp = 0;
8224
8225         exec_control = vmx_exec_control(vmx); /* L0's desires */
8226         exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
8227         exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
8228         exec_control &= ~CPU_BASED_TPR_SHADOW;
8229         exec_control |= vmcs12->cpu_based_vm_exec_control;
8230
8231         if (exec_control & CPU_BASED_TPR_SHADOW) {
8232                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
8233                                 page_to_phys(vmx->nested.virtual_apic_page));
8234                 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
8235         }
8236
8237         /*
8238          * Merging of IO and MSR bitmaps not currently supported.
8239          * Rather, exit every time.
8240          */
8241         exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
8242         exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
8243         exec_control |= CPU_BASED_UNCOND_IO_EXITING;
8244
8245         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
8246
8247         /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
8248          * bitwise-or of what L1 wants to trap for L2, and what we want to
8249          * trap. Note that CR0.TS also needs updating - we do this later.
8250          */
8251         update_exception_bitmap(vcpu);
8252         vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
8253         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
8254
8255         /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
8256          * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
8257          * bits are further modified by vmx_set_efer() below.
8258          */
8259         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
8260
8261         /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
8262          * emulated by vmx_set_efer(), below.
8263          */
8264         vm_entry_controls_init(vmx, 
8265                 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
8266                         ~VM_ENTRY_IA32E_MODE) |
8267                 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
8268
8269         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
8270                 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
8271                 vcpu->arch.pat = vmcs12->guest_ia32_pat;
8272         } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
8273                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
8274
8275
8276         set_cr4_guest_host_mask(vmx);
8277
8278         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
8279                 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
8280
8281         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
8282                 vmcs_write64(TSC_OFFSET,
8283                         vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
8284         else
8285                 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
8286
8287         if (enable_vpid) {
8288                 /*
8289                  * Trivially support vpid by letting L2s share their parent
8290                  * L1's vpid. TODO: move to a more elaborate solution, giving
8291                  * each L2 its own vpid and exposing the vpid feature to L1.
8292                  */
8293                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
8294                 vmx_flush_tlb(vcpu);
8295         }
8296
8297         if (nested_cpu_has_ept(vmcs12)) {
8298                 kvm_mmu_unload(vcpu);
8299                 nested_ept_init_mmu_context(vcpu);
8300         }
8301
8302         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
8303                 vcpu->arch.efer = vmcs12->guest_ia32_efer;
8304         else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
8305                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
8306         else
8307                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
8308         /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
8309         vmx_set_efer(vcpu, vcpu->arch.efer);
8310
8311         /*
8312          * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
8313          * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
8314          * The CR0_READ_SHADOW is what L2 should have expected to read given
8315          * the specifications by L1; It's not enough to take
8316          * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
8317          * have more bits than L1 expected.
8318          */
8319         vmx_set_cr0(vcpu, vmcs12->guest_cr0);
8320         vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
8321
8322         vmx_set_cr4(vcpu, vmcs12->guest_cr4);
8323         vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
8324
8325         /* shadow page tables on either EPT or shadow page tables */
8326         kvm_set_cr3(vcpu, vmcs12->guest_cr3);
8327         kvm_mmu_reset_context(vcpu);
8328
8329         if (!enable_ept)
8330                 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
8331
8332         /*
8333          * L1 may access the L2's PDPTR, so save them to construct vmcs12
8334          */
8335         if (enable_ept) {
8336                 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
8337                 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
8338                 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
8339                 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
8340         }
8341
8342         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
8343         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
8344 }
8345
8346 /*
8347  * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
8348  * for running an L2 nested guest.
8349  */
8350 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8351 {
8352         struct vmcs12 *vmcs12;
8353         struct vcpu_vmx *vmx = to_vmx(vcpu);
8354         int cpu;
8355         struct loaded_vmcs *vmcs02;
8356         bool ia32e;
8357
8358         if (!nested_vmx_check_permission(vcpu) ||
8359             !nested_vmx_check_vmcs12(vcpu))
8360                 return 1;
8361
8362         skip_emulated_instruction(vcpu);
8363         vmcs12 = get_vmcs12(vcpu);
8364
8365         if (enable_shadow_vmcs)
8366                 copy_shadow_to_vmcs12(vmx);
8367
8368         /*
8369          * The nested entry process starts with enforcing various prerequisites
8370          * on vmcs12 as required by the Intel SDM, and act appropriately when
8371          * they fail: As the SDM explains, some conditions should cause the
8372          * instruction to fail, while others will cause the instruction to seem
8373          * to succeed, but return an EXIT_REASON_INVALID_STATE.
8374          * To speed up the normal (success) code path, we should avoid checking
8375          * for misconfigurations which will anyway be caught by the processor
8376          * when using the merged vmcs02.
8377          */
8378         if (vmcs12->launch_state == launch) {
8379                 nested_vmx_failValid(vcpu,
8380                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
8381                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
8382                 return 1;
8383         }
8384
8385         if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
8386             vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
8387                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8388                 return 1;
8389         }
8390
8391         if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
8392                         !PAGE_ALIGNED(vmcs12->msr_bitmap)) {
8393                 /*TODO: Also verify bits beyond physical address width are 0*/
8394                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8395                 return 1;
8396         }
8397
8398         if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
8399                 /*TODO: Also verify bits beyond physical address width are 0*/
8400                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8401                 return 1;
8402         }
8403
8404         if (vmcs12->vm_entry_msr_load_count > 0 ||
8405             vmcs12->vm_exit_msr_load_count > 0 ||
8406             vmcs12->vm_exit_msr_store_count > 0) {
8407                 pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
8408                                     __func__);
8409                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8410                 return 1;
8411         }
8412
8413         if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
8414                                 nested_vmx_true_procbased_ctls_low,
8415                                 nested_vmx_procbased_ctls_high) ||
8416             !vmx_control_verify(vmcs12->secondary_vm_exec_control,
8417               nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
8418             !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
8419               nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
8420             !vmx_control_verify(vmcs12->vm_exit_controls,
8421                                 nested_vmx_true_exit_ctls_low,
8422                                 nested_vmx_exit_ctls_high) ||
8423             !vmx_control_verify(vmcs12->vm_entry_controls,
8424                                 nested_vmx_true_entry_ctls_low,
8425                                 nested_vmx_entry_ctls_high))
8426         {
8427                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8428                 return 1;
8429         }
8430
8431         if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
8432             ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
8433                 nested_vmx_failValid(vcpu,
8434                         VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
8435                 return 1;
8436         }
8437
8438         if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) ||
8439             ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
8440                 nested_vmx_entry_failure(vcpu, vmcs12,
8441                         EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
8442                 return 1;
8443         }
8444         if (vmcs12->vmcs_link_pointer != -1ull) {
8445                 nested_vmx_entry_failure(vcpu, vmcs12,
8446                         EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
8447                 return 1;
8448         }
8449
8450         /*
8451          * If the load IA32_EFER VM-entry control is 1, the following checks
8452          * are performed on the field for the IA32_EFER MSR:
8453          * - Bits reserved in the IA32_EFER MSR must be 0.
8454          * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
8455          *   the IA-32e mode guest VM-exit control. It must also be identical
8456          *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
8457          *   CR0.PG) is 1.
8458          */
8459         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
8460                 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
8461                 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
8462                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
8463                     ((vmcs12->guest_cr0 & X86_CR0_PG) &&
8464                      ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
8465                         nested_vmx_entry_failure(vcpu, vmcs12,
8466                                 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
8467                         return 1;
8468                 }
8469         }
8470
8471         /*
8472          * If the load IA32_EFER VM-exit control is 1, bits reserved in the
8473          * IA32_EFER MSR must be 0 in the field for that register. In addition,
8474          * the values of the LMA and LME bits in the field must each be that of
8475          * the host address-space size VM-exit control.
8476          */
8477         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
8478                 ia32e = (vmcs12->vm_exit_controls &
8479                          VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
8480                 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
8481                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
8482                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
8483                         nested_vmx_entry_failure(vcpu, vmcs12,
8484                                 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
8485                         return 1;
8486                 }
8487         }
8488
8489         /*
8490          * We're finally done with prerequisite checking, and can start with
8491          * the nested entry.
8492          */
8493
8494         vmcs02 = nested_get_current_vmcs02(vmx);
8495         if (!vmcs02)
8496                 return -ENOMEM;
8497
8498         enter_guest_mode(vcpu);
8499
8500         vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
8501
8502         if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
8503                 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8504
8505         cpu = get_cpu();
8506         vmx->loaded_vmcs = vmcs02;
8507         vmx_vcpu_put(vcpu);
8508         vmx_vcpu_load(vcpu, cpu);
8509         vcpu->cpu = cpu;
8510         put_cpu();
8511
8512         vmx_segment_cache_clear(vmx);
8513
8514         vmcs12->launch_state = 1;
8515
8516         prepare_vmcs02(vcpu, vmcs12);
8517
8518         if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
8519                 return kvm_emulate_halt(vcpu);
8520
8521         vmx->nested.nested_run_pending = 1;
8522
8523         /*
8524          * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
8525          * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
8526          * returned as far as L1 is concerned. It will only return (and set
8527          * the success flag) when L2 exits (see nested_vmx_vmexit()).
8528          */
8529         return 1;
8530 }
8531
8532 /*
8533  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
8534  * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
8535  * This function returns the new value we should put in vmcs12.guest_cr0.
8536  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
8537  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
8538  *     available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
8539  *     didn't trap the bit, because if L1 did, so would L0).
8540  *  2. Bits that L1 asked to trap (and therefore L0 also did) could not have
8541  *     been modified by L2, and L1 knows it. So just leave the old value of
8542  *     the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
8543  *     isn't relevant, because if L0 traps this bit it can set it to anything.
8544  *  3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
8545  *     changed these bits, and therefore they need to be updated, but L0
8546  *     didn't necessarily allow them to be changed in GUEST_CR0 - and rather
8547  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
8548  */
8549 static inline unsigned long
8550 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8551 {
8552         return
8553         /*1*/   (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
8554         /*2*/   (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
8555         /*3*/   (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
8556                         vcpu->arch.cr0_guest_owned_bits));
8557 }
8558
8559 static inline unsigned long
8560 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8561 {
8562         return
8563         /*1*/   (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
8564         /*2*/   (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
8565         /*3*/   (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
8566                         vcpu->arch.cr4_guest_owned_bits));
8567 }
8568
8569 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
8570                                        struct vmcs12 *vmcs12)
8571 {
8572         u32 idt_vectoring;
8573         unsigned int nr;
8574
8575         if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
8576                 nr = vcpu->arch.exception.nr;
8577                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
8578
8579                 if (kvm_exception_is_soft(nr)) {
8580                         vmcs12->vm_exit_instruction_len =
8581                                 vcpu->arch.event_exit_inst_len;
8582                         idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
8583                 } else
8584                         idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
8585
8586                 if (vcpu->arch.exception.has_error_code) {
8587                         idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
8588                         vmcs12->idt_vectoring_error_code =
8589                                 vcpu->arch.exception.error_code;
8590                 }
8591
8592                 vmcs12->idt_vectoring_info_field = idt_vectoring;
8593         } else if (vcpu->arch.nmi_injected) {
8594                 vmcs12->idt_vectoring_info_field =
8595                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
8596         } else if (vcpu->arch.interrupt.pending) {
8597                 nr = vcpu->arch.interrupt.nr;
8598                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
8599
8600                 if (vcpu->arch.interrupt.soft) {
8601                         idt_vectoring |= INTR_TYPE_SOFT_INTR;
8602                         vmcs12->vm_entry_instruction_len =
8603                                 vcpu->arch.event_exit_inst_len;
8604                 } else
8605                         idt_vectoring |= INTR_TYPE_EXT_INTR;
8606
8607                 vmcs12->idt_vectoring_info_field = idt_vectoring;
8608         }
8609 }
8610
8611 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
8612 {
8613         struct vcpu_vmx *vmx = to_vmx(vcpu);
8614
8615         if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
8616             vmx->nested.preemption_timer_expired) {
8617                 if (vmx->nested.nested_run_pending)
8618                         return -EBUSY;
8619                 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
8620                 return 0;
8621         }
8622
8623         if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
8624                 if (vmx->nested.nested_run_pending ||
8625                     vcpu->arch.interrupt.pending)
8626                         return -EBUSY;
8627                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
8628                                   NMI_VECTOR | INTR_TYPE_NMI_INTR |
8629                                   INTR_INFO_VALID_MASK, 0);
8630                 /*
8631                  * The NMI-triggered VM exit counts as injection:
8632                  * clear this one and block further NMIs.
8633                  */
8634                 vcpu->arch.nmi_pending = 0;
8635                 vmx_set_nmi_mask(vcpu, true);
8636                 return 0;
8637         }
8638
8639         if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
8640             nested_exit_on_intr(vcpu)) {
8641                 if (vmx->nested.nested_run_pending)
8642                         return -EBUSY;
8643                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
8644         }
8645
8646         return 0;
8647 }
8648
8649 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
8650 {
8651         ktime_t remaining =
8652                 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
8653         u64 value;
8654
8655         if (ktime_to_ns(remaining) <= 0)
8656                 return 0;
8657
8658         value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
8659         do_div(value, 1000000);
8660         return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
8661 }
8662
8663 /*
8664  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
8665  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
8666  * and this function updates it to reflect the changes to the guest state while
8667  * L2 was running (and perhaps made some exits which were handled directly by L0
8668  * without going back to L1), and to reflect the exit reason.
8669  * Note that we do not have to copy here all VMCS fields, just those that
8670  * could have changed by the L2 guest or the exit - i.e., the guest-state and
8671  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
8672  * which already writes to vmcs12 directly.
8673  */
8674 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
8675                            u32 exit_reason, u32 exit_intr_info,
8676                            unsigned long exit_qualification)
8677 {
8678         /* update guest state fields: */
8679         vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
8680         vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
8681
8682         vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
8683         vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
8684         vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
8685
8686         vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
8687         vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
8688         vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
8689         vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
8690         vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
8691         vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
8692         vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
8693         vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
8694         vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
8695         vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
8696         vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
8697         vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
8698         vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
8699         vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
8700         vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
8701         vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
8702         vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
8703         vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
8704         vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
8705         vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
8706         vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
8707         vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
8708         vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
8709         vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
8710         vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
8711         vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
8712         vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
8713         vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
8714         vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
8715         vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
8716         vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
8717         vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
8718         vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
8719         vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
8720         vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
8721         vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
8722
8723         vmcs12->guest_interruptibility_info =
8724                 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
8725         vmcs12->guest_pending_dbg_exceptions =
8726                 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
8727         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
8728                 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
8729         else
8730                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
8731
8732         if (nested_cpu_has_preemption_timer(vmcs12)) {
8733                 if (vmcs12->vm_exit_controls &
8734                     VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
8735                         vmcs12->vmx_preemption_timer_value =
8736                                 vmx_get_preemption_timer_value(vcpu);
8737                 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
8738         }
8739
8740         /*
8741          * In some cases (usually, nested EPT), L2 is allowed to change its
8742          * own CR3 without exiting. If it has changed it, we must keep it.
8743          * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
8744          * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
8745          *
8746          * Additionally, restore L2's PDPTR to vmcs12.
8747          */
8748         if (enable_ept) {
8749                 vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
8750                 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
8751                 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
8752                 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
8753                 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
8754         }
8755
8756         vmcs12->vm_entry_controls =
8757                 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
8758                 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
8759
8760         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
8761                 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
8762                 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8763         }
8764
8765         /* TODO: These cannot have changed unless we have MSR bitmaps and
8766          * the relevant bit asks not to trap the change */
8767         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
8768                 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
8769         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
8770                 vmcs12->guest_ia32_efer = vcpu->arch.efer;
8771         vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
8772         vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
8773         vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
8774         if (vmx_mpx_supported())
8775                 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
8776
8777         /* update exit information fields: */
8778
8779         vmcs12->vm_exit_reason = exit_reason;
8780         vmcs12->exit_qualification = exit_qualification;
8781
8782         vmcs12->vm_exit_intr_info = exit_intr_info;
8783         if ((vmcs12->vm_exit_intr_info &
8784              (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
8785             (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
8786                 vmcs12->vm_exit_intr_error_code =
8787                         vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
8788         vmcs12->idt_vectoring_info_field = 0;
8789         vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
8790         vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8791
8792         if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
8793                 /* vm_entry_intr_info_field is cleared on exit. Emulate this
8794                  * instead of reading the real value. */
8795                 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
8796
8797                 /*
8798                  * Transfer the event that L0 or L1 may wanted to inject into
8799                  * L2 to IDT_VECTORING_INFO_FIELD.
8800                  */
8801                 vmcs12_save_pending_event(vcpu, vmcs12);
8802         }
8803
8804         /*
8805          * Drop what we picked up for L2 via vmx_complete_interrupts. It is
8806          * preserved above and would only end up incorrectly in L1.
8807          */
8808         vcpu->arch.nmi_injected = false;
8809         kvm_clear_exception_queue(vcpu);
8810         kvm_clear_interrupt_queue(vcpu);
8811 }
8812
8813 /*
8814  * A part of what we need to when the nested L2 guest exits and we want to
8815  * run its L1 parent, is to reset L1's guest state to the host state specified
8816  * in vmcs12.
8817  * This function is to be called not only on normal nested exit, but also on
8818  * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
8819  * Failures During or After Loading Guest State").
8820  * This function should be called when the active VMCS is L1's (vmcs01).
8821  */
8822 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8823                                    struct vmcs12 *vmcs12)
8824 {
8825         struct kvm_segment seg;
8826
8827         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
8828                 vcpu->arch.efer = vmcs12->host_ia32_efer;
8829         else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
8830                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
8831         else
8832                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
8833         vmx_set_efer(vcpu, vcpu->arch.efer);
8834
8835         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
8836         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
8837         vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
8838         /*
8839          * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
8840          * actually changed, because it depends on the current state of
8841          * fpu_active (which may have changed).
8842          * Note that vmx_set_cr0 refers to efer set above.
8843          */
8844         vmx_set_cr0(vcpu, vmcs12->host_cr0);
8845         /*
8846          * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
8847          * to apply the same changes to L1's vmcs. We just set cr0 correctly,
8848          * but we also need to update cr0_guest_host_mask and exception_bitmap.
8849          */
8850         update_exception_bitmap(vcpu);
8851         vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
8852         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
8853
8854         /*
8855          * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
8856          * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
8857          */
8858         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
8859         kvm_set_cr4(vcpu, vmcs12->host_cr4);
8860
8861         nested_ept_uninit_mmu_context(vcpu);
8862
8863         kvm_set_cr3(vcpu, vmcs12->host_cr3);
8864         kvm_mmu_reset_context(vcpu);
8865
8866         if (!enable_ept)
8867                 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
8868
8869         if (enable_vpid) {
8870                 /*
8871                  * Trivially support vpid by letting L2s share their parent
8872                  * L1's vpid. TODO: move to a more elaborate solution, giving
8873                  * each L2 its own vpid and exposing the vpid feature to L1.
8874                  */
8875                 vmx_flush_tlb(vcpu);
8876         }
8877
8878
8879         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
8880         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
8881         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
8882         vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
8883         vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
8884
8885         /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
8886         if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
8887                 vmcs_write64(GUEST_BNDCFGS, 0);
8888
8889         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
8890                 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
8891                 vcpu->arch.pat = vmcs12->host_ia32_pat;
8892         }
8893         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
8894                 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
8895                         vmcs12->host_ia32_perf_global_ctrl);
8896
8897         /* Set L1 segment info according to Intel SDM
8898             27.5.2 Loading Host Segment and Descriptor-Table Registers */
8899         seg = (struct kvm_segment) {
8900                 .base = 0,
8901                 .limit = 0xFFFFFFFF,
8902                 .selector = vmcs12->host_cs_selector,
8903                 .type = 11,
8904                 .present = 1,
8905                 .s = 1,
8906                 .g = 1
8907         };
8908         if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
8909                 seg.l = 1;
8910         else
8911                 seg.db = 1;
8912         vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
8913         seg = (struct kvm_segment) {
8914                 .base = 0,
8915                 .limit = 0xFFFFFFFF,
8916                 .type = 3,
8917                 .present = 1,
8918                 .s = 1,
8919                 .db = 1,
8920                 .g = 1
8921         };
8922         seg.selector = vmcs12->host_ds_selector;
8923         vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
8924         seg.selector = vmcs12->host_es_selector;
8925         vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
8926         seg.selector = vmcs12->host_ss_selector;
8927         vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
8928         seg.selector = vmcs12->host_fs_selector;
8929         seg.base = vmcs12->host_fs_base;
8930         vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
8931         seg.selector = vmcs12->host_gs_selector;
8932         seg.base = vmcs12->host_gs_base;
8933         vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
8934         seg = (struct kvm_segment) {
8935                 .base = vmcs12->host_tr_base,
8936                 .limit = 0x67,
8937                 .selector = vmcs12->host_tr_selector,
8938                 .type = 11,
8939                 .present = 1
8940         };
8941         vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
8942
8943         kvm_set_dr(vcpu, 7, 0x400);
8944         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
8945 }
8946
8947 /*
8948  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
8949  * and modify vmcs12 to make it see what it would expect to see there if
8950  * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
8951  */
8952 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
8953                               u32 exit_intr_info,
8954                               unsigned long exit_qualification)
8955 {
8956         struct vcpu_vmx *vmx = to_vmx(vcpu);
8957         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8958
8959         /* trying to cancel vmlaunch/vmresume is a bug */
8960         WARN_ON_ONCE(vmx->nested.nested_run_pending);
8961
8962         leave_guest_mode(vcpu);
8963         prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
8964                        exit_qualification);
8965
8966         vmx_load_vmcs01(vcpu);
8967
8968         if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
8969             && nested_exit_intr_ack_set(vcpu)) {
8970                 int irq = kvm_cpu_get_interrupt(vcpu);
8971                 WARN_ON(irq < 0);
8972                 vmcs12->vm_exit_intr_info = irq |
8973                         INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
8974         }
8975
8976         trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
8977                                        vmcs12->exit_qualification,
8978                                        vmcs12->idt_vectoring_info_field,
8979                                        vmcs12->vm_exit_intr_info,
8980                                        vmcs12->vm_exit_intr_error_code,
8981                                        KVM_ISA_VMX);
8982
8983         vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
8984         vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
8985         vmx_segment_cache_clear(vmx);
8986
8987         /* if no vmcs02 cache requested, remove the one we used */
8988         if (VMCS02_POOL_SIZE == 0)
8989                 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
8990
8991         load_vmcs12_host_state(vcpu, vmcs12);
8992
8993         /* Update TSC_OFFSET if TSC was changed while L2 ran */
8994         vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
8995
8996         /* This is needed for same reason as it was needed in prepare_vmcs02 */
8997         vmx->host_rsp = 0;
8998
8999         /* Unpin physical memory we referred to in vmcs02 */
9000         if (vmx->nested.apic_access_page) {
9001                 nested_release_page(vmx->nested.apic_access_page);
9002                 vmx->nested.apic_access_page = NULL;
9003         }
9004         if (vmx->nested.virtual_apic_page) {
9005                 nested_release_page(vmx->nested.virtual_apic_page);
9006                 vmx->nested.virtual_apic_page = NULL;
9007         }
9008
9009         /*
9010          * We are now running in L2, mmu_notifier will force to reload the
9011          * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
9012          */
9013         kvm_vcpu_reload_apic_access_page(vcpu);
9014
9015         /*
9016          * Exiting from L2 to L1, we're now back to L1 which thinks it just
9017          * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
9018          * success or failure flag accordingly.
9019          */
9020         if (unlikely(vmx->fail)) {
9021                 vmx->fail = 0;
9022                 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
9023         } else
9024                 nested_vmx_succeed(vcpu);
9025         if (enable_shadow_vmcs)
9026                 vmx->nested.sync_shadow_vmcs = true;
9027
9028         /* in case we halted in L2 */
9029         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
9030 }
9031
9032 /*
9033  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
9034  */
9035 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
9036 {
9037         if (is_guest_mode(vcpu))
9038                 nested_vmx_vmexit(vcpu, -1, 0, 0);
9039         free_nested(to_vmx(vcpu));
9040 }
9041
9042 /*
9043  * L1's failure to enter L2 is a subset of a normal exit, as explained in
9044  * 23.7 "VM-entry failures during or after loading guest state" (this also
9045  * lists the acceptable exit-reason and exit-qualification parameters).
9046  * It should only be called before L2 actually succeeded to run, and when
9047  * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
9048  */
9049 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
9050                         struct vmcs12 *vmcs12,
9051                         u32 reason, unsigned long qualification)
9052 {
9053         load_vmcs12_host_state(vcpu, vmcs12);
9054         vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
9055         vmcs12->exit_qualification = qualification;
9056         nested_vmx_succeed(vcpu);
9057         if (enable_shadow_vmcs)
9058                 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
9059 }
9060
9061 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
9062                                struct x86_instruction_info *info,
9063                                enum x86_intercept_stage stage)
9064 {
9065         return X86EMUL_CONTINUE;
9066 }
9067
9068 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
9069 {
9070         if (ple_gap)
9071                 shrink_ple_window(vcpu);
9072 }
9073
9074 static struct kvm_x86_ops vmx_x86_ops = {
9075         .cpu_has_kvm_support = cpu_has_kvm_support,
9076         .disabled_by_bios = vmx_disabled_by_bios,
9077         .hardware_setup = hardware_setup,
9078         .hardware_unsetup = hardware_unsetup,
9079         .check_processor_compatibility = vmx_check_processor_compat,
9080         .hardware_enable = hardware_enable,
9081         .hardware_disable = hardware_disable,
9082         .cpu_has_accelerated_tpr = report_flexpriority,
9083
9084         .vcpu_create = vmx_create_vcpu,
9085         .vcpu_free = vmx_free_vcpu,
9086         .vcpu_reset = vmx_vcpu_reset,
9087
9088         .prepare_guest_switch = vmx_save_host_state,
9089         .vcpu_load = vmx_vcpu_load,
9090         .vcpu_put = vmx_vcpu_put,
9091
9092         .update_db_bp_intercept = update_exception_bitmap,
9093         .get_msr = vmx_get_msr,
9094         .set_msr = vmx_set_msr,
9095         .get_segment_base = vmx_get_segment_base,
9096         .get_segment = vmx_get_segment,
9097         .set_segment = vmx_set_segment,
9098         .get_cpl = vmx_get_cpl,
9099         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
9100         .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
9101         .decache_cr3 = vmx_decache_cr3,
9102         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
9103         .set_cr0 = vmx_set_cr0,
9104         .set_cr3 = vmx_set_cr3,
9105         .set_cr4 = vmx_set_cr4,
9106         .set_efer = vmx_set_efer,
9107         .get_idt = vmx_get_idt,
9108         .set_idt = vmx_set_idt,
9109         .get_gdt = vmx_get_gdt,
9110         .set_gdt = vmx_set_gdt,
9111         .get_dr6 = vmx_get_dr6,
9112         .set_dr6 = vmx_set_dr6,
9113         .set_dr7 = vmx_set_dr7,
9114         .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
9115         .cache_reg = vmx_cache_reg,
9116         .get_rflags = vmx_get_rflags,
9117         .set_rflags = vmx_set_rflags,
9118         .fpu_deactivate = vmx_fpu_deactivate,
9119
9120         .tlb_flush = vmx_flush_tlb,
9121
9122         .run = vmx_vcpu_run,
9123         .handle_exit = vmx_handle_exit,
9124         .skip_emulated_instruction = skip_emulated_instruction,
9125         .set_interrupt_shadow = vmx_set_interrupt_shadow,
9126         .get_interrupt_shadow = vmx_get_interrupt_shadow,
9127         .patch_hypercall = vmx_patch_hypercall,
9128         .set_irq = vmx_inject_irq,
9129         .set_nmi = vmx_inject_nmi,
9130         .queue_exception = vmx_queue_exception,
9131         .cancel_injection = vmx_cancel_injection,
9132         .interrupt_allowed = vmx_interrupt_allowed,
9133         .nmi_allowed = vmx_nmi_allowed,
9134         .get_nmi_mask = vmx_get_nmi_mask,
9135         .set_nmi_mask = vmx_set_nmi_mask,
9136         .enable_nmi_window = enable_nmi_window,
9137         .enable_irq_window = enable_irq_window,
9138         .update_cr8_intercept = update_cr8_intercept,
9139         .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
9140         .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
9141         .vm_has_apicv = vmx_vm_has_apicv,
9142         .load_eoi_exitmap = vmx_load_eoi_exitmap,
9143         .hwapic_irr_update = vmx_hwapic_irr_update,
9144         .hwapic_isr_update = vmx_hwapic_isr_update,
9145         .sync_pir_to_irr = vmx_sync_pir_to_irr,
9146         .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
9147
9148         .set_tss_addr = vmx_set_tss_addr,
9149         .get_tdp_level = get_ept_level,
9150         .get_mt_mask = vmx_get_mt_mask,
9151
9152         .get_exit_info = vmx_get_exit_info,
9153
9154         .get_lpage_level = vmx_get_lpage_level,
9155
9156         .cpuid_update = vmx_cpuid_update,
9157
9158         .rdtscp_supported = vmx_rdtscp_supported,
9159         .invpcid_supported = vmx_invpcid_supported,
9160
9161         .set_supported_cpuid = vmx_set_supported_cpuid,
9162
9163         .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
9164
9165         .set_tsc_khz = vmx_set_tsc_khz,
9166         .read_tsc_offset = vmx_read_tsc_offset,
9167         .write_tsc_offset = vmx_write_tsc_offset,
9168         .adjust_tsc_offset = vmx_adjust_tsc_offset,
9169         .compute_tsc_offset = vmx_compute_tsc_offset,
9170         .read_l1_tsc = vmx_read_l1_tsc,
9171
9172         .set_tdp_cr3 = vmx_set_cr3,
9173
9174         .check_intercept = vmx_check_intercept,
9175         .handle_external_intr = vmx_handle_external_intr,
9176         .mpx_supported = vmx_mpx_supported,
9177
9178         .check_nested_events = vmx_check_nested_events,
9179
9180         .sched_in = vmx_sched_in,
9181 };
9182
9183 static int __init vmx_init(void)
9184 {
9185         int r, i, msr;
9186
9187         rdmsrl_safe(MSR_EFER, &host_efer);
9188
9189         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
9190                 kvm_define_shared_msr(i, vmx_msr_index[i]);
9191
9192         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
9193         if (!vmx_io_bitmap_a)
9194                 return -ENOMEM;
9195
9196         r = -ENOMEM;
9197
9198         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
9199         if (!vmx_io_bitmap_b)
9200                 goto out;
9201
9202         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
9203         if (!vmx_msr_bitmap_legacy)
9204                 goto out1;
9205
9206         vmx_msr_bitmap_legacy_x2apic =
9207                                 (unsigned long *)__get_free_page(GFP_KERNEL);
9208         if (!vmx_msr_bitmap_legacy_x2apic)
9209                 goto out2;
9210
9211         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
9212         if (!vmx_msr_bitmap_longmode)
9213                 goto out3;
9214
9215         vmx_msr_bitmap_longmode_x2apic =
9216                                 (unsigned long *)__get_free_page(GFP_KERNEL);
9217         if (!vmx_msr_bitmap_longmode_x2apic)
9218                 goto out4;
9219         vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
9220         if (!vmx_vmread_bitmap)
9221                 goto out5;
9222
9223         vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
9224         if (!vmx_vmwrite_bitmap)
9225                 goto out6;
9226
9227         memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
9228         memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
9229
9230         /*
9231          * Allow direct access to the PC debug port (it is often used for I/O
9232          * delays, but the vmexits simply slow things down).
9233          */
9234         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
9235         clear_bit(0x80, vmx_io_bitmap_a);
9236
9237         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
9238
9239         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
9240         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
9241
9242         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
9243
9244         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
9245                      __alignof__(struct vcpu_vmx), THIS_MODULE);
9246         if (r)
9247                 goto out7;
9248
9249 #ifdef CONFIG_KEXEC
9250         rcu_assign_pointer(crash_vmclear_loaded_vmcss,
9251                            crash_vmclear_local_loaded_vmcss);
9252 #endif
9253
9254         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
9255         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
9256         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
9257         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
9258         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
9259         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
9260         vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
9261
9262         memcpy(vmx_msr_bitmap_legacy_x2apic,
9263                         vmx_msr_bitmap_legacy, PAGE_SIZE);
9264         memcpy(vmx_msr_bitmap_longmode_x2apic,
9265                         vmx_msr_bitmap_longmode, PAGE_SIZE);
9266
9267         if (enable_apicv) {
9268                 for (msr = 0x800; msr <= 0x8ff; msr++)
9269                         vmx_disable_intercept_msr_read_x2apic(msr);
9270
9271                 /* According SDM, in x2apic mode, the whole id reg is used.
9272                  * But in KVM, it only use the highest eight bits. Need to
9273                  * intercept it */
9274                 vmx_enable_intercept_msr_read_x2apic(0x802);
9275                 /* TMCCT */
9276                 vmx_enable_intercept_msr_read_x2apic(0x839);
9277                 /* TPR */
9278                 vmx_disable_intercept_msr_write_x2apic(0x808);
9279                 /* EOI */
9280                 vmx_disable_intercept_msr_write_x2apic(0x80b);
9281                 /* SELF-IPI */
9282                 vmx_disable_intercept_msr_write_x2apic(0x83f);
9283         }
9284
9285         if (enable_ept) {
9286                 kvm_mmu_set_mask_ptes(0ull,
9287                         (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
9288                         (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
9289                         0ull, VMX_EPT_EXECUTABLE_MASK);
9290                 ept_set_mmio_spte_mask();
9291                 kvm_enable_tdp();
9292         } else
9293                 kvm_disable_tdp();
9294
9295         update_ple_window_actual_max();
9296
9297         return 0;
9298
9299 out7:
9300         free_page((unsigned long)vmx_vmwrite_bitmap);
9301 out6:
9302         free_page((unsigned long)vmx_vmread_bitmap);
9303 out5:
9304         free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
9305 out4:
9306         free_page((unsigned long)vmx_msr_bitmap_longmode);
9307 out3:
9308         free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
9309 out2:
9310         free_page((unsigned long)vmx_msr_bitmap_legacy);
9311 out1:
9312         free_page((unsigned long)vmx_io_bitmap_b);
9313 out:
9314         free_page((unsigned long)vmx_io_bitmap_a);
9315         return r;
9316 }
9317
9318 static void __exit vmx_exit(void)
9319 {
9320         free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
9321         free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
9322         free_page((unsigned long)vmx_msr_bitmap_legacy);
9323         free_page((unsigned long)vmx_msr_bitmap_longmode);
9324         free_page((unsigned long)vmx_io_bitmap_b);
9325         free_page((unsigned long)vmx_io_bitmap_a);
9326         free_page((unsigned long)vmx_vmwrite_bitmap);
9327         free_page((unsigned long)vmx_vmread_bitmap);
9328
9329 #ifdef CONFIG_KEXEC
9330         RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
9331         synchronize_rcu();
9332 #endif
9333
9334         kvm_exit();
9335 }
9336
9337 module_init(vmx_init)
9338 module_exit(vmx_exit)