rmap: add argument to charge compound page
[cascardo/linux.git] / kernel / stop_machine.c
index a3bbaee..edb6de4 100644 (file)
@@ -28,7 +28,6 @@
  */
 struct cpu_stop_done {
        atomic_t                nr_todo;        /* nr left to execute */
-       bool                    executed;       /* actually executed? */
        int                     ret;            /* collected return value */
        struct completion       completion;     /* fired if nr_todo reaches 0 */
 };
@@ -63,14 +62,10 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 }
 
 /* signal completion unless @done is NULL */
-static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
+static void cpu_stop_signal_done(struct cpu_stop_done *done)
 {
-       if (done) {
-               if (executed)
-                       done->executed = true;
-               if (atomic_dec_and_test(&done->nr_todo))
-                       complete(&done->completion);
-       }
+       if (atomic_dec_and_test(&done->nr_todo))
+               complete(&done->completion);
 }
 
 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
@@ -81,17 +76,21 @@ static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
-static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
        unsigned long flags;
+       bool enabled;
 
        spin_lock_irqsave(&stopper->lock, flags);
-       if (stopper->enabled)
+       enabled = stopper->enabled;
+       if (enabled)
                __cpu_stop_queue_work(stopper, work);
-       else
-               cpu_stop_signal_done(work->done, false);
+       else if (work->done)
+               cpu_stop_signal_done(work->done);
        spin_unlock_irqrestore(&stopper->lock, flags);
+
+       return enabled;
 }
 
 /**
@@ -124,9 +123,10 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
        struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
 
        cpu_stop_init_done(&done, 1);
-       cpu_stop_queue_work(cpu, &work);
+       if (!cpu_stop_queue_work(cpu, &work))
+               return -ENOENT;
        wait_for_completion(&done.completion);
-       return done.executed ? done.ret : -ENOENT;
+       return done.ret;
 }
 
 /* This controls the threads on each CPU. */
@@ -258,7 +258,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
        struct cpu_stop_work work1, work2;
        struct multi_stop_data msdata;
 
-       preempt_disable();
        msdata = (struct multi_stop_data){
                .fn = fn,
                .data = arg,
@@ -277,16 +276,11 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
 
        if (cpu1 > cpu2)
                swap(cpu1, cpu2);
-       if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
-               preempt_enable();
+       if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
                return -ENOENT;
-       }
-
-       preempt_enable();
 
        wait_for_completion(&done.completion);
-
-       return done.executed ? done.ret : -ENOENT;
+       return done.ret;
 }
 
 /**
@@ -302,23 +296,28 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
  *
  * CONTEXT:
  * Don't care.
+ *
+ * RETURNS:
+ * true if cpu_stop_work was queued successfully and @fn will be called,
+ * false otherwise.
  */
-void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                        struct cpu_stop_work *work_buf)
 {
        *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
-       cpu_stop_queue_work(cpu, work_buf);
+       return cpu_stop_queue_work(cpu, work_buf);
 }
 
 /* static data for stop_cpus */
 static DEFINE_MUTEX(stop_cpus_mutex);
 
-static void queue_stop_cpus_work(const struct cpumask *cpumask,
+static bool queue_stop_cpus_work(const struct cpumask *cpumask,
                                 cpu_stop_fn_t fn, void *arg,
                                 struct cpu_stop_done *done)
 {
        struct cpu_stop_work *work;
        unsigned int cpu;
+       bool queued = false;
 
        /*
         * Disable preemption while queueing to avoid getting
@@ -331,9 +330,12 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
                work->fn = fn;
                work->arg = arg;
                work->done = done;
-               cpu_stop_queue_work(cpu, work);
+               if (cpu_stop_queue_work(cpu, work))
+                       queued = true;
        }
        lg_global_unlock(&stop_cpus_lock);
+
+       return queued;
 }
 
 static int __stop_cpus(const struct cpumask *cpumask,
@@ -342,9 +344,10 @@ static int __stop_cpus(const struct cpumask *cpumask,
        struct cpu_stop_done done;
 
        cpu_stop_init_done(&done, cpumask_weight(cpumask));
-       queue_stop_cpus_work(cpumask, fn, arg, &done);
+       if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
+               return -ENOENT;
        wait_for_completion(&done.completion);
-       return done.executed ? done.ret : -ENOENT;
+       return done.ret;
 }
 
 /**
@@ -432,7 +435,6 @@ static void cpu_stopper_thread(unsigned int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
        struct cpu_stop_work *work;
-       int ret;
 
 repeat:
        work = NULL;
@@ -448,23 +450,19 @@ repeat:
                cpu_stop_fn_t fn = work->fn;
                void *arg = work->arg;
                struct cpu_stop_done *done = work->done;
-               char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
-
-               /* cpu stop callbacks are not allowed to sleep */
-               preempt_disable();
+               int ret;
 
+               /* cpu stop callbacks must not sleep, make in_atomic() == T */
+               preempt_count_inc();
                ret = fn(arg);
-               if (ret)
-                       done->ret = ret;
-
-               /* restore preemption and check it's still balanced */
-               preempt_enable();
+               if (done) {
+                       if (ret)
+                               done->ret = ret;
+                       cpu_stop_signal_done(done);
+               }
+               preempt_count_dec();
                WARN_ONCE(preempt_count(),
-                         "cpu_stop: %s(%p) leaked preempt count\n",
-                         kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
-                                         ksym_buf), arg);
-
-               cpu_stop_signal_done(done, true);
+                         "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
                goto repeat;
        }
 }