kthread: kthread worker API cleanup
[cascardo/linux.git] / kernel / kthread.c
index 4ab4c37..c52a05a 100644 (file)
@@ -138,7 +138,7 @@ void *kthread_data(struct task_struct *task)
 }
 
 /**
- * probe_kthread_data - speculative version of kthread_data()
+ * kthread_probe_data - speculative version of kthread_data()
  * @task: possible kthread task in question
  *
  * @task could be a kthread task.  Return the data value specified when it
@@ -146,7 +146,7 @@ void *kthread_data(struct task_struct *task)
  * inaccessible for any reason, %NULL is returned.  This function requires
  * that @task itself is safe to dereference.
  */
-void *probe_kthread_data(struct task_struct *task)
+void *kthread_probe_data(struct task_struct *task)
 {
        struct kthread *kthread = to_kthread(task);
        void *data = NULL;
@@ -540,7 +540,7 @@ int kthreadd(void *unused)
        return 0;
 }
 
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
                                const char *name,
                                struct lock_class_key *key)
 {
@@ -549,7 +549,7 @@ void __init_kthread_worker(struct kthread_worker *worker,
        INIT_LIST_HEAD(&worker->work_list);
        worker->task = NULL;
 }
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
 
 /**
  * kthread_worker_fn - kthread function to process kthread_worker
@@ -606,7 +606,7 @@ repeat:
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
 /* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
+static void kthread_insert_work(struct kthread_worker *worker,
                               struct kthread_work *work,
                               struct list_head *pos)
 {
@@ -619,7 +619,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
 }
 
 /**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
  * @worker: target kthread_worker
  * @work: kthread_work to queue
  *
@@ -627,7 +627,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
  * must have been created with kthread_worker_create().  Returns %true
  * if @work was successfully queued, %false if it was already pending.
  */
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
                        struct kthread_work *work)
 {
        bool ret = false;
@@ -635,13 +635,13 @@ bool queue_kthread_work(struct kthread_worker *worker,
 
        spin_lock_irqsave(&worker->lock, flags);
        if (list_empty(&work->node)) {
-               insert_kthread_work(worker, work, &worker->work_list);
+               kthread_insert_work(worker, work, &worker->work_list);
                ret = true;
        }
        spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_work);
 
 struct kthread_flush_work {
        struct kthread_work     work;
@@ -656,12 +656,12 @@ static void kthread_flush_work_fn(struct kthread_work *work)
 }
 
 /**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
  * @work: work to flush
  *
  * If @work is queued or executing, wait for it to finish execution.
  */
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
@@ -682,9 +682,10 @@ retry:
        }
 
        if (!list_empty(&work->node))
-               insert_kthread_work(worker, &fwork.work, work->node.next);
+               kthread_insert_work(worker, &fwork.work, work->node.next);
        else if (worker->current_work == work)
-               insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+               kthread_insert_work(worker, &fwork.work,
+                                   worker->work_list.next);
        else
                noop = true;
 
@@ -693,23 +694,23 @@ retry:
        if (!noop)
                wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
 
 /**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_flush_worker - flush all current works on a kthread_worker
  * @worker: worker to flush
  *
  * Wait until all currently executing or pending works on @worker are
  * finished.
  */
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
        };
 
-       queue_kthread_work(worker, &fwork.work);
+       kthread_queue_work(worker, &fwork.work);
        wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);