Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[cascardo/linux.git] / kernel / workqueue.c
index d12bd95..ef071ca 100644 (file)
@@ -4607,84 +4607,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
 }
 
-/*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
-static int workqueue_cpu_up_callback(struct notifier_block *nfb,
-                                              unsigned long action,
-                                              void *hcpu)
+int workqueue_prepare_cpu(unsigned int cpu)
+{
+       struct worker_pool *pool;
+
+       for_each_cpu_worker_pool(pool, cpu) {
+               if (pool->nr_workers)
+                       continue;
+               if (!create_worker(pool))
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+int workqueue_online_cpu(unsigned int cpu)
 {
-       int cpu = (unsigned long)hcpu;
        struct worker_pool *pool;
        struct workqueue_struct *wq;
        int pi;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               for_each_cpu_worker_pool(pool, cpu) {
-                       if (pool->nr_workers)
-                               continue;
-                       if (!create_worker(pool))
-                               return NOTIFY_BAD;
-               }
-               break;
-
-       case CPU_DOWN_FAILED:
-       case CPU_ONLINE:
-               mutex_lock(&wq_pool_mutex);
+       mutex_lock(&wq_pool_mutex);
 
-               for_each_pool(pool, pi) {
-                       mutex_lock(&pool->attach_mutex);
+       for_each_pool(pool, pi) {
+               mutex_lock(&pool->attach_mutex);
 
-                       if (pool->cpu == cpu)
-                               rebind_workers(pool);
-                       else if (pool->cpu < 0)
-                               restore_unbound_workers_cpumask(pool, cpu);
+               if (pool->cpu == cpu)
+                       rebind_workers(pool);
+               else if (pool->cpu < 0)
+                       restore_unbound_workers_cpumask(pool, cpu);
 
-                       mutex_unlock(&pool->attach_mutex);
-               }
+               mutex_unlock(&pool->attach_mutex);
+       }
 
-               /* update NUMA affinity of unbound workqueues */
-               list_for_each_entry(wq, &workqueues, list)
-                       wq_update_unbound_numa(wq, cpu, true);
+       /* update NUMA affinity of unbound workqueues */
+       list_for_each_entry(wq, &workqueues, list)
+               wq_update_unbound_numa(wq, cpu, true);
 
-               mutex_unlock(&wq_pool_mutex);
-               break;
-       }
-       return NOTIFY_OK;
+       mutex_unlock(&wq_pool_mutex);
+       return 0;
 }
 
-/*
- * Workqueues should be brought down after normal priority CPU notifiers.
- * This will be registered as low priority CPU notifier.
- */
-static int workqueue_cpu_down_callback(struct notifier_block *nfb,
-                                                unsigned long action,
-                                                void *hcpu)
+int workqueue_offline_cpu(unsigned int cpu)
 {
-       int cpu = (unsigned long)hcpu;
        struct work_struct unbind_work;
        struct workqueue_struct *wq;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_DOWN_PREPARE:
-               /* unbinding per-cpu workers should happen on the local CPU */
-               INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-               queue_work_on(cpu, system_highpri_wq, &unbind_work);
-
-               /* update NUMA affinity of unbound workqueues */
-               mutex_lock(&wq_pool_mutex);
-               list_for_each_entry(wq, &workqueues, list)
-                       wq_update_unbound_numa(wq, cpu, false);
-               mutex_unlock(&wq_pool_mutex);
-
-               /* wait for per-cpu unbinding to finish */
-               flush_work(&unbind_work);
-               destroy_work_on_stack(&unbind_work);
-               break;
-       }
-       return NOTIFY_OK;
+       /* unbinding per-cpu workers should happen on the local CPU */
+       INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
+       queue_work_on(cpu, system_highpri_wq, &unbind_work);
+
+       /* update NUMA affinity of unbound workqueues */
+       mutex_lock(&wq_pool_mutex);
+       list_for_each_entry(wq, &workqueues, list)
+               wq_update_unbound_numa(wq, cpu, false);
+       mutex_unlock(&wq_pool_mutex);
+
+       /* wait for per-cpu unbinding to finish */
+       flush_work(&unbind_work);
+       destroy_work_on_stack(&unbind_work);
+       return 0;
 }
 
 #ifdef CONFIG_SMP
@@ -5486,9 +5467,6 @@ static int __init init_workqueues(void)
 
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
-       cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
-       hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
-
        wq_numa_init();
 
        /* initialize CPU pools */