2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
42 #include <asm/traps.h>
44 #include <asm/tlbflush.h>
47 #include <asm/apicdef.h>
48 #include <asm/hypervisor.h>
49 #include <asm/kvm_guest.h>
51 static int kvmapf = 1;
53 static int parse_no_kvmapf(char *arg)
59 early_param("no-kvmapf", parse_no_kvmapf);
61 static int steal_acc = 1;
62 static int parse_no_stealacc(char *arg)
68 early_param("no-steal-acc", parse_no_stealacc);
70 static int kvmclock_vsyscall = 1;
71 static int parse_no_kvmclock_vsyscall(char *arg)
73 kvmclock_vsyscall = 0;
77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
80 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
81 static int has_steal_clock = 0;
84 * No need for any "IO delay" on KVM
86 static void kvm_io_delay(void)
90 #define KVM_TASK_SLEEP_HASHBITS 8
91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
93 struct kvm_task_sleep_node {
94 struct hlist_node link;
95 struct swait_queue_head wq;
101 static struct kvm_task_sleep_head {
103 struct hlist_head list;
104 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
106 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
109 struct hlist_node *p;
111 hlist_for_each(p, &b->list) {
112 struct kvm_task_sleep_node *n =
113 hlist_entry(p, typeof(*n), link);
114 if (n->token == token)
121 void kvm_async_pf_task_wait(u32 token)
123 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
124 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
125 struct kvm_task_sleep_node n, *e;
126 DECLARE_SWAITQUEUE(wait);
130 raw_spin_lock(&b->lock);
131 e = _find_apf_task(b, token);
133 /* dummy entry exist -> wake up was delivered ahead of PF */
136 raw_spin_unlock(&b->lock);
143 n.cpu = smp_processor_id();
144 n.halted = is_idle_task(current) || preempt_count() > 1;
145 init_swait_queue_head(&n.wq);
146 hlist_add_head(&n.link, &b->list);
147 raw_spin_unlock(&b->lock);
151 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
152 if (hlist_unhashed(&n.link))
161 * We cannot reschedule. So halt.
170 finish_swait(&n.wq, &wait);
175 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
177 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
179 hlist_del_init(&n->link);
181 smp_send_reschedule(n->cpu);
182 else if (swait_active(&n->wq))
186 static void apf_task_wake_all(void)
190 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
191 struct hlist_node *p, *next;
192 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
193 raw_spin_lock(&b->lock);
194 hlist_for_each_safe(p, next, &b->list) {
195 struct kvm_task_sleep_node *n =
196 hlist_entry(p, typeof(*n), link);
197 if (n->cpu == smp_processor_id())
198 apf_task_wake_one(n);
200 raw_spin_unlock(&b->lock);
204 void kvm_async_pf_task_wake(u32 token)
206 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
207 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
208 struct kvm_task_sleep_node *n;
216 raw_spin_lock(&b->lock);
217 n = _find_apf_task(b, token);
220 * async PF was not yet handled.
221 * Add dummy entry for the token.
223 n = kzalloc(sizeof(*n), GFP_ATOMIC);
226 * Allocation failed! Busy wait while other cpu
229 raw_spin_unlock(&b->lock);
234 n->cpu = smp_processor_id();
235 init_swait_queue_head(&n->wq);
236 hlist_add_head(&n->link, &b->list);
238 apf_task_wake_one(n);
239 raw_spin_unlock(&b->lock);
242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
244 u32 kvm_read_and_reset_pf_reason(void)
248 if (__this_cpu_read(apf_reason.enabled)) {
249 reason = __this_cpu_read(apf_reason.reason);
250 __this_cpu_write(apf_reason.reason, 0);
255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
256 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
259 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
261 enum ctx_state prev_state;
263 switch (kvm_read_and_reset_pf_reason()) {
265 trace_do_page_fault(regs, error_code);
267 case KVM_PV_REASON_PAGE_NOT_PRESENT:
268 /* page is swapped out by the host. */
269 prev_state = exception_enter();
271 kvm_async_pf_task_wait((u32)read_cr2());
272 exception_exit(prev_state);
274 case KVM_PV_REASON_PAGE_READY:
277 kvm_async_pf_task_wake((u32)read_cr2());
282 NOKPROBE_SYMBOL(do_async_page_fault);
284 static void __init paravirt_ops_setup(void)
286 pv_info.name = "KVM";
288 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
289 pv_cpu_ops.io_delay = kvm_io_delay;
291 #ifdef CONFIG_X86_IO_APIC
296 static void kvm_register_steal_time(void)
298 int cpu = smp_processor_id();
299 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
301 if (!has_steal_clock)
304 memset(st, 0, sizeof(*st));
306 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
307 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
308 cpu, (unsigned long long) slow_virt_to_phys(st));
311 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
313 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
316 * This relies on __test_and_clear_bit to modify the memory
317 * in a way that is atomic with respect to the local CPU.
318 * The hypervisor only accesses this memory from the local CPU so
319 * there's no need for lock or memory barriers.
320 * An optimization barrier is implied in apic write.
322 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
324 apic_write(APIC_EOI, APIC_EOI_ACK);
327 static void kvm_guest_cpu_init(void)
329 if (!kvm_para_available())
332 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
333 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
335 #ifdef CONFIG_PREEMPT
336 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
338 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
339 __this_cpu_write(apf_reason.enabled, 1);
340 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
344 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
346 /* Size alignment is implied but just to make it explicit. */
347 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
348 __this_cpu_write(kvm_apic_eoi, 0);
349 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
351 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
355 kvm_register_steal_time();
358 static void kvm_pv_disable_apf(void)
360 if (!__this_cpu_read(apf_reason.enabled))
363 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
364 __this_cpu_write(apf_reason.enabled, 0);
366 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
370 static void kvm_pv_guest_cpu_reboot(void *unused)
373 * We disable PV EOI before we load a new kernel by kexec,
374 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
375 * New kernel can re-enable when it boots.
377 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
378 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
379 kvm_pv_disable_apf();
380 kvm_disable_steal_time();
383 static int kvm_pv_reboot_notify(struct notifier_block *nb,
384 unsigned long code, void *unused)
386 if (code == SYS_RESTART)
387 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
391 static struct notifier_block kvm_pv_reboot_nb = {
392 .notifier_call = kvm_pv_reboot_notify,
395 static u64 kvm_steal_clock(int cpu)
398 struct kvm_steal_time *src;
401 src = &per_cpu(steal_time, cpu);
403 version = src->version;
407 } while ((version & 1) || (version != src->version));
412 void kvm_disable_steal_time(void)
414 if (!has_steal_clock)
417 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
421 static void __init kvm_smp_prepare_boot_cpu(void)
423 kvm_guest_cpu_init();
424 native_smp_prepare_boot_cpu();
428 static void kvm_guest_cpu_online(void *dummy)
430 kvm_guest_cpu_init();
433 static void kvm_guest_cpu_offline(void *dummy)
435 kvm_disable_steal_time();
436 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
437 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
438 kvm_pv_disable_apf();
442 static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
445 int cpu = (unsigned long)hcpu;
448 case CPU_DOWN_FAILED:
449 case CPU_ONLINE_FROZEN:
450 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
452 case CPU_DOWN_PREPARE:
453 case CPU_DOWN_PREPARE_FROZEN:
454 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
462 static struct notifier_block kvm_cpu_notifier = {
463 .notifier_call = kvm_cpu_notify,
467 static void __init kvm_apf_trap_init(void)
469 set_intr_gate(14, async_page_fault);
472 void __init kvm_guest_init(void)
476 if (!kvm_para_available())
479 paravirt_ops_setup();
480 register_reboot_notifier(&kvm_pv_reboot_nb);
481 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
482 raw_spin_lock_init(&async_pf_sleepers[i].lock);
483 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
484 x86_init.irqs.trap_init = kvm_apf_trap_init;
486 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
488 pv_time_ops.steal_clock = kvm_steal_clock;
491 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
492 apic_set_eoi_write(kvm_guest_apic_eoi_write);
494 if (kvmclock_vsyscall)
495 kvm_setup_vsyscall_timeinfo();
498 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
499 register_cpu_notifier(&kvm_cpu_notifier);
501 kvm_guest_cpu_init();
505 * Hard lockup detection is enabled by default. Disable it, as guests
506 * can get false positives too easily, for example if the host is
509 hardlockup_detector_disable();
512 static noinline uint32_t __kvm_cpuid_base(void)
514 if (boot_cpu_data.cpuid_level < 0)
515 return 0; /* So we don't blow up on old processors */
517 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
518 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
523 static inline uint32_t kvm_cpuid_base(void)
525 static int kvm_cpuid_base = -1;
527 if (kvm_cpuid_base == -1)
528 kvm_cpuid_base = __kvm_cpuid_base();
530 return kvm_cpuid_base;
533 bool kvm_para_available(void)
535 return kvm_cpuid_base() != 0;
537 EXPORT_SYMBOL_GPL(kvm_para_available);
539 unsigned int kvm_arch_para_features(void)
541 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
544 static uint32_t __init kvm_detect(void)
546 return kvm_cpuid_base();
549 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
551 .detect = kvm_detect,
552 .x2apic_available = kvm_para_available,
554 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
556 static __init int activate_jump_labels(void)
558 if (has_steal_clock) {
559 static_key_slow_inc(¶virt_steal_enabled);
561 static_key_slow_inc(¶virt_steal_rq_enabled);
566 arch_initcall(activate_jump_labels);
568 #ifdef CONFIG_PARAVIRT_SPINLOCKS
570 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
571 static void kvm_kick_cpu(int cpu)
574 unsigned long flags = 0;
576 apicid = per_cpu(x86_cpu_to_apicid, cpu);
577 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
581 #ifdef CONFIG_QUEUED_SPINLOCKS
583 #include <asm/qspinlock.h>
585 static void kvm_wait(u8 *ptr, u8 val)
592 local_irq_save(flags);
594 if (READ_ONCE(*ptr) != val)
598 * halt until it's our turn and kicked. Note that we do safe halt
599 * for irq enabled case to avoid hang when lock info is overwritten
600 * in irq spinlock slowpath and no spurious interrupt occur to save us.
602 if (arch_irqs_disabled_flags(flags))
608 local_irq_restore(flags);
611 #else /* !CONFIG_QUEUED_SPINLOCKS */
613 enum kvm_contention_stat {
617 RELEASED_SLOW_KICKED,
621 #ifdef CONFIG_KVM_DEBUG_FS
622 #define HISTO_BUCKETS 30
624 static struct kvm_spinlock_stats
626 u32 contention_stats[NR_CONTENTION_STATS];
627 u32 histo_spin_blocked[HISTO_BUCKETS+1];
631 static u8 zero_stats;
633 static inline void check_zero(void)
638 old = READ_ONCE(zero_stats);
640 ret = cmpxchg(&zero_stats, old, 0);
641 /* This ensures only one fellow resets the stat */
643 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
647 static inline void add_stats(enum kvm_contention_stat var, u32 val)
650 spinlock_stats.contention_stats[var] += val;
654 static inline u64 spin_time_start(void)
656 return sched_clock();
659 static void __spin_time_accum(u64 delta, u32 *array)
663 index = ilog2(delta);
666 if (index < HISTO_BUCKETS)
669 array[HISTO_BUCKETS]++;
672 static inline void spin_time_accum_blocked(u64 start)
676 delta = sched_clock() - start;
677 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
678 spinlock_stats.time_blocked += delta;
681 static struct dentry *d_spin_debug;
682 static struct dentry *d_kvm_debug;
684 static struct dentry *kvm_init_debugfs(void)
686 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
688 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
693 static int __init kvm_spinlock_debugfs(void)
695 struct dentry *d_kvm;
697 d_kvm = kvm_init_debugfs();
701 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
703 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
705 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
706 &spinlock_stats.contention_stats[TAKEN_SLOW]);
707 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
708 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
710 debugfs_create_u32("released_slow", 0444, d_spin_debug,
711 &spinlock_stats.contention_stats[RELEASED_SLOW]);
712 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
713 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
715 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
716 &spinlock_stats.time_blocked);
718 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
719 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
723 fs_initcall(kvm_spinlock_debugfs);
724 #else /* !CONFIG_KVM_DEBUG_FS */
725 static inline void add_stats(enum kvm_contention_stat var, u32 val)
729 static inline u64 spin_time_start(void)
734 static inline void spin_time_accum_blocked(u64 start)
737 #endif /* CONFIG_KVM_DEBUG_FS */
739 struct kvm_lock_waiting {
740 struct arch_spinlock *lock;
744 /* cpus 'waiting' on a spinlock to become available */
745 static cpumask_t waiting_cpus;
747 /* Track spinlock on which a cpu is waiting */
748 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
750 __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
752 struct kvm_lock_waiting *w;
761 w = this_cpu_ptr(&klock_waiting);
762 cpu = smp_processor_id();
763 start = spin_time_start();
766 * Make sure an interrupt handler can't upset things in a
767 * partially setup state.
769 local_irq_save(flags);
772 * The ordering protocol on this is that the "lock" pointer
773 * may only be set non-NULL if the "want" ticket is correct.
774 * If we're updating "want", we must first clear "lock".
782 add_stats(TAKEN_SLOW, 1);
785 * This uses set_bit, which is atomic but we should not rely on its
786 * reordering gurantees. So barrier is needed after this call.
788 cpumask_set_cpu(cpu, &waiting_cpus);
793 * Mark entry to slowpath before doing the pickup test to make
794 * sure we don't deadlock with an unlocker.
796 __ticket_enter_slowpath(lock);
798 /* make sure enter_slowpath, which is atomic does not cross the read */
799 smp_mb__after_atomic();
802 * check again make sure it didn't become free while
803 * we weren't looking.
805 head = READ_ONCE(lock->tickets.head);
806 if (__tickets_equal(head, want)) {
807 add_stats(TAKEN_SLOW_PICKUP, 1);
812 * halt until it's our turn and kicked. Note that we do safe halt
813 * for irq enabled case to avoid hang when lock info is overwritten
814 * in irq spinlock slowpath and no spurious interrupt occur to save us.
816 if (arch_irqs_disabled_flags(flags))
822 cpumask_clear_cpu(cpu, &waiting_cpus);
824 local_irq_restore(flags);
825 spin_time_accum_blocked(start);
827 PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
829 /* Kick vcpu waiting on @lock->head to reach value @ticket */
830 static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
834 add_stats(RELEASED_SLOW, 1);
835 for_each_cpu(cpu, &waiting_cpus) {
836 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
837 if (READ_ONCE(w->lock) == lock &&
838 READ_ONCE(w->want) == ticket) {
839 add_stats(RELEASED_SLOW_KICKED, 1);
846 #endif /* !CONFIG_QUEUED_SPINLOCKS */
849 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
851 void __init kvm_spinlock_init(void)
853 if (!kvm_para_available())
855 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
856 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
859 #ifdef CONFIG_QUEUED_SPINLOCKS
860 __pv_init_lock_hash();
861 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
862 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
863 pv_lock_ops.wait = kvm_wait;
864 pv_lock_ops.kick = kvm_kick_cpu;
865 #else /* !CONFIG_QUEUED_SPINLOCKS */
866 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
867 pv_lock_ops.unlock_kick = kvm_unlock_kick;
871 static __init int kvm_spinlock_init_jump(void)
873 if (!kvm_para_available())
875 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
878 static_key_slow_inc(¶virt_ticketlocks_enabled);
879 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
883 early_initcall(kvm_spinlock_init_jump);
885 #endif /* CONFIG_PARAVIRT_SPINLOCKS */