stop_machine: Remove stop_cpus_lock and lg_double_lock/unlock()
[cascardo/linux.git] / kernel / stop_machine.c
index 4a1ca5f..ae6f41f 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kallsyms.h>
 #include <linux/smpboot.h>
 #include <linux/atomic.h>
-#include <linux/lglock.h>
 #include <linux/nmi.h>
 
 /*
@@ -47,13 +46,9 @@ struct cpu_stopper {
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 static bool stop_machine_initialized = false;
 
-/*
- * Avoids a race between stop_two_cpus and global stop_cpus, where
- * the stoppers could get queued up in reverse order, leading to
- * system deadlock. Using an lglock means stop_two_cpus remains
- * relatively cheap.
- */
-DEFINE_STATIC_LGLOCK(stop_cpus_lock);
+/* static data for stop_cpus */
+static DEFINE_MUTEX(stop_cpus_mutex);
+static bool stop_cpus_in_progress;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 {
@@ -230,14 +225,26 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
        struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
        int err;
-
-       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+retry:
        spin_lock_irq(&stopper1->lock);
        spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
        err = -ENOENT;
        if (!stopper1->enabled || !stopper2->enabled)
                goto unlock;
+       /*
+        * Ensure that if we race with __stop_cpus() the stoppers won't get
+        * queued up in reverse order leading to system deadlock.
+        *
+        * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
+        * queued a work on cpu1 but not on cpu2, we hold both locks.
+        *
+        * It can be falsely true but it is safe to spin until it is cleared,
+        * queue_stop_cpus_work() does everything under preempt_disable().
+        */
+       err = -EDEADLK;
+       if (unlikely(stop_cpus_in_progress))
+                       goto unlock;
 
        err = 0;
        __cpu_stop_queue_work(stopper1, work1);
@@ -245,8 +252,12 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
 unlock:
        spin_unlock(&stopper2->lock);
        spin_unlock_irq(&stopper1->lock);
-       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
 
+       if (unlikely(err == -EDEADLK)) {
+               while (stop_cpus_in_progress)
+                       cpu_relax();
+               goto retry;
+       }
        return err;
 }
 /**
@@ -316,9 +327,6 @@ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
        return cpu_stop_queue_work(cpu, work_buf);
 }
 
-/* static data for stop_cpus */
-static DEFINE_MUTEX(stop_cpus_mutex);
-
 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
                                 cpu_stop_fn_t fn, void *arg,
                                 struct cpu_stop_done *done)
@@ -332,7 +340,8 @@ static bool queue_stop_cpus_work(const struct cpumask *cpumask,
         * preempted by a stopper which might wait for other stoppers
         * to enter @fn which can lead to deadlock.
         */
-       lg_global_lock(&stop_cpus_lock);
+       preempt_disable();
+       stop_cpus_in_progress = true;
        for_each_cpu(cpu, cpumask) {
                work = &per_cpu(cpu_stopper.stop_work, cpu);
                work->fn = fn;
@@ -341,7 +350,8 @@ static bool queue_stop_cpus_work(const struct cpumask *cpumask,
                if (cpu_stop_queue_work(cpu, work))
                        queued = true;
        }
-       lg_global_unlock(&stop_cpus_lock);
+       stop_cpus_in_progress = false;
+       preempt_enable();
 
        return queued;
 }