workqueue: better define synchronization rule around rescuer->pool updates
[cascardo/linux.git] / kernel / workqueue.c
index f4feaca..fd9a28a 100644 (file)
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
        for ((pool) = &std_worker_pools(cpu)[0];                        \
             (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
 
-#define for_each_busy_worker(worker, i, pos, pool)                     \
-       hash_for_each(pool->busy_hash, i, pos, worker, hentry)
+#define for_each_busy_worker(worker, i, pool)                          \
+       hash_for_each(pool->busy_hash, i, worker, hentry)
 
 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
                                unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
                                                 struct work_struct *work)
 {
        struct worker *worker;
-       struct hlist_node *tmp;
 
-       hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
+       hash_for_each_possible(pool->busy_hash, worker, hentry,
                               (unsigned long)work)
                if (worker->current_work == work &&
                    worker->current_func == work->func)
@@ -1505,15 +1504,17 @@ static void worker_leave_idle(struct worker *worker)
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
  *
  * Works which are scheduled while the cpu is online must at least be
  * scheduled to a worker which is bound to the cpu so that if they are
  * flushed from cpu callbacks while cpu is going down, they are
  * guaranteed to execute on the cpu.
  *
- * This function is to be used by rogue workers and rescuers to bind
+ * This function is to be used by unbound workers and rescuers to bind
  * themselves to the target cpu and may race with cpu going down or
  * coming online.  kthread_bind() can't be used because it may put the
  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
@@ -1534,12 +1535,9 @@ static void worker_leave_idle(struct worker *worker)
  * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
-static bool worker_maybe_bind_and_lock(struct worker *worker)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
 __acquires(&pool->lock)
 {
-       struct worker_pool *pool = worker->pool;
-       struct task_struct *task = worker->task;
-
        while (true) {
                /*
                 * The following call may fail, succeed or succeed
@@ -1548,12 +1546,12 @@ __acquires(&pool->lock)
                 * against POOL_DISASSOCIATED.
                 */
                if (!(pool->flags & POOL_DISASSOCIATED))
-                       set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
+                       set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu));
 
                spin_lock_irq(&pool->lock);
                if (pool->flags & POOL_DISASSOCIATED)
                        return false;
-               if (task_cpu(task) == pool->cpu &&
+               if (task_cpu(current) == pool->cpu &&
                    cpumask_equal(&current->cpus_allowed,
                                  get_cpu_mask(pool->cpu)))
                        return true;
@@ -1577,7 +1575,7 @@ __acquires(&pool->lock)
 static void idle_worker_rebind(struct worker *worker)
 {
        /* CPU may go down again inbetween, clear UNBOUND only on success */
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        /* rebind complete, become available again */
@@ -1595,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 {
        struct worker *worker = container_of(work, struct worker, rebind_work);
 
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        spin_unlock_irq(&worker->pool->lock);
@@ -1626,7 +1624,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 static void rebind_workers(struct worker_pool *pool)
 {
        struct worker *worker, *n;
-       struct hlist_node *pos;
        int i;
 
        lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1645,7 @@ static void rebind_workers(struct worker_pool *pool)
        }
 
        /* rebind busy workers */
-       for_each_busy_worker(worker, i, pos, pool) {
+       for_each_busy_worker(worker, i, pool) {
                struct work_struct *rebind_work = &worker->rebind_work;
                struct workqueue_struct *wq;
 
@@ -2041,7 +2038,7 @@ static bool manage_workers(struct worker *worker)
                 * on @pool's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
                 */
-               if (worker_maybe_bind_and_lock(worker))
+               if (worker_maybe_bind_and_lock(pool))
                        worker->flags &= ~WORKER_UNBOUND;
                else
                        worker->flags |= WORKER_UNBOUND;
@@ -2360,8 +2357,8 @@ repeat:
                mayday_clear_cpu(cpu, wq->mayday_mask);
 
                /* migrate to the target cpu if possible */
+               worker_maybe_bind_and_lock(pool);
                rescuer->pool = pool;
-               worker_maybe_bind_and_lock(rescuer);
 
                /*
                 * Slurp in all works issued via this workqueue and
@@ -2382,6 +2379,7 @@ repeat:
                if (keep_working(pool))
                        wake_up_worker(pool);
 
+               rescuer->pool = NULL;
                spin_unlock_irq(&pool->lock);
        }
 
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
        int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
-       struct hlist_node *pos;
        int i;
 
        for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
                list_for_each_entry(worker, &pool->idle_list, entry)
                        worker->flags |= WORKER_UNBOUND;
 
-               for_each_busy_worker(worker, i, pos, pool)
+               for_each_busy_worker(worker, i, pool)
                        worker->flags |= WORKER_UNBOUND;
 
                pool->flags |= POOL_DISASSOCIATED;