Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / arch / x86 / kernel / kvm.c
index 1f431f3..edbbfc8 100644 (file)
@@ -564,9 +564,6 @@ static void kvm_kick_cpu(int cpu)
        kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
 }
 
-
-#ifdef CONFIG_QUEUED_SPINLOCKS
-
 #include <asm/qspinlock.h>
 
 static void kvm_wait(u8 *ptr, u8 val)
@@ -595,243 +592,6 @@ out:
        local_irq_restore(flags);
 }
 
-#else /* !CONFIG_QUEUED_SPINLOCKS */
-
-enum kvm_contention_stat {
-       TAKEN_SLOW,
-       TAKEN_SLOW_PICKUP,
-       RELEASED_SLOW,
-       RELEASED_SLOW_KICKED,
-       NR_CONTENTION_STATS
-};
-
-#ifdef CONFIG_KVM_DEBUG_FS
-#define HISTO_BUCKETS  30
-
-static struct kvm_spinlock_stats
-{
-       u32 contention_stats[NR_CONTENTION_STATS];
-       u32 histo_spin_blocked[HISTO_BUCKETS+1];
-       u64 time_blocked;
-} spinlock_stats;
-
-static u8 zero_stats;
-
-static inline void check_zero(void)
-{
-       u8 ret;
-       u8 old;
-
-       old = READ_ONCE(zero_stats);
-       if (unlikely(old)) {
-               ret = cmpxchg(&zero_stats, old, 0);
-               /* This ensures only one fellow resets the stat */
-               if (ret == old)
-                       memset(&spinlock_stats, 0, sizeof(spinlock_stats));
-       }
-}
-
-static inline void add_stats(enum kvm_contention_stat var, u32 val)
-{
-       check_zero();
-       spinlock_stats.contention_stats[var] += val;
-}
-
-
-static inline u64 spin_time_start(void)
-{
-       return sched_clock();
-}
-
-static void __spin_time_accum(u64 delta, u32 *array)
-{
-       unsigned index;
-
-       index = ilog2(delta);
-       check_zero();
-
-       if (index < HISTO_BUCKETS)
-               array[index]++;
-       else
-               array[HISTO_BUCKETS]++;
-}
-
-static inline void spin_time_accum_blocked(u64 start)
-{
-       u32 delta;
-
-       delta = sched_clock() - start;
-       __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
-       spinlock_stats.time_blocked += delta;
-}
-
-static struct dentry *d_spin_debug;
-static struct dentry *d_kvm_debug;
-
-static struct dentry *kvm_init_debugfs(void)
-{
-       d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
-       if (!d_kvm_debug)
-               printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
-
-       return d_kvm_debug;
-}
-
-static int __init kvm_spinlock_debugfs(void)
-{
-       struct dentry *d_kvm;
-
-       d_kvm = kvm_init_debugfs();
-       if (d_kvm == NULL)
-               return -ENOMEM;
-
-       d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
-
-       debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
-
-       debugfs_create_u32("taken_slow", 0444, d_spin_debug,
-                  &spinlock_stats.contention_stats[TAKEN_SLOW]);
-       debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
-                  &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
-
-       debugfs_create_u32("released_slow", 0444, d_spin_debug,
-                  &spinlock_stats.contention_stats[RELEASED_SLOW]);
-       debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
-                  &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
-
-       debugfs_create_u64("time_blocked", 0444, d_spin_debug,
-                          &spinlock_stats.time_blocked);
-
-       debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
-                    spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
-
-       return 0;
-}
-fs_initcall(kvm_spinlock_debugfs);
-#else  /* !CONFIG_KVM_DEBUG_FS */
-static inline void add_stats(enum kvm_contention_stat var, u32 val)
-{
-}
-
-static inline u64 spin_time_start(void)
-{
-       return 0;
-}
-
-static inline void spin_time_accum_blocked(u64 start)
-{
-}
-#endif  /* CONFIG_KVM_DEBUG_FS */
-
-struct kvm_lock_waiting {
-       struct arch_spinlock *lock;
-       __ticket_t want;
-};
-
-/* cpus 'waiting' on a spinlock to become available */
-static cpumask_t waiting_cpus;
-
-/* Track spinlock on which a cpu is waiting */
-static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
-
-__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
-{
-       struct kvm_lock_waiting *w;
-       int cpu;
-       u64 start;
-       unsigned long flags;
-       __ticket_t head;
-
-       if (in_nmi())
-               return;
-
-       w = this_cpu_ptr(&klock_waiting);
-       cpu = smp_processor_id();
-       start = spin_time_start();
-
-       /*
-        * Make sure an interrupt handler can't upset things in a
-        * partially setup state.
-        */
-       local_irq_save(flags);
-
-       /*
-        * The ordering protocol on this is that the "lock" pointer
-        * may only be set non-NULL if the "want" ticket is correct.
-        * If we're updating "want", we must first clear "lock".
-        */
-       w->lock = NULL;
-       smp_wmb();
-       w->want = want;
-       smp_wmb();
-       w->lock = lock;
-
-       add_stats(TAKEN_SLOW, 1);
-
-       /*
-        * This uses set_bit, which is atomic but we should not rely on its
-        * reordering gurantees. So barrier is needed after this call.
-        */
-       cpumask_set_cpu(cpu, &waiting_cpus);
-
-       barrier();
-
-       /*
-        * Mark entry to slowpath before doing the pickup test to make
-        * sure we don't deadlock with an unlocker.
-        */
-       __ticket_enter_slowpath(lock);
-
-       /* make sure enter_slowpath, which is atomic does not cross the read */
-       smp_mb__after_atomic();
-
-       /*
-        * check again make sure it didn't become free while
-        * we weren't looking.
-        */
-       head = READ_ONCE(lock->tickets.head);
-       if (__tickets_equal(head, want)) {
-               add_stats(TAKEN_SLOW_PICKUP, 1);
-               goto out;
-       }
-
-       /*
-        * halt until it's our turn and kicked. Note that we do safe halt
-        * for irq enabled case to avoid hang when lock info is overwritten
-        * in irq spinlock slowpath and no spurious interrupt occur to save us.
-        */
-       if (arch_irqs_disabled_flags(flags))
-               halt();
-       else
-               safe_halt();
-
-out:
-       cpumask_clear_cpu(cpu, &waiting_cpus);
-       w->lock = NULL;
-       local_irq_restore(flags);
-       spin_time_accum_blocked(start);
-}
-PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
-
-/* Kick vcpu waiting on @lock->head to reach value @ticket */
-static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
-{
-       int cpu;
-
-       add_stats(RELEASED_SLOW, 1);
-       for_each_cpu(cpu, &waiting_cpus) {
-               const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
-               if (READ_ONCE(w->lock) == lock &&
-                   READ_ONCE(w->want) == ticket) {
-                       add_stats(RELEASED_SLOW_KICKED, 1);
-                       kvm_kick_cpu(cpu);
-                       break;
-               }
-       }
-}
-
-#endif /* !CONFIG_QUEUED_SPINLOCKS */
-
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  */
@@ -843,16 +603,11 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
-#ifdef CONFIG_QUEUED_SPINLOCKS
        __pv_init_lock_hash();
        pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
        pv_lock_ops.wait = kvm_wait;
        pv_lock_ops.kick = kvm_kick_cpu;
-#else /* !CONFIG_QUEUED_SPINLOCKS */
-       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
-       pv_lock_ops.unlock_kick = kvm_unlock_kick;
-#endif
 }
 
 static __init int kvm_spinlock_init_jump(void)