locking/mutex: Introduce ww_mutex_set_context_slowpath()
authorDavidlohr Bueso <dave@stgolabs.net>
Tue, 6 Jan 2015 19:45:06 +0000 (11:45 -0800)
committerIngo Molnar <mingo@kernel.org>
Wed, 14 Jan 2015 14:07:30 +0000 (15:07 +0100)
... which is equivalent to the fastpath counter part.
This mainly allows getting some WW specific code out
of generic mutex paths.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1420573509-24774-4-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/mutex.c

index 6db3d0d..c67a60b 100644 (file)
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
 }
 
 /*
- * after acquiring lock with fastpath or when we lost out in contested
+ * After acquiring lock with fastpath or when we lost out in contested
  * slowpath, set ctx and wake up any waiters so they can recheck.
  *
  * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,6 +191,30 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
        spin_unlock_mutex(&lock->base.wait_lock, flags);
 }
 
+/*
+ * After acquiring lock in the slowpath set ctx and wake up any
+ * waiters so they can recheck.
+ *
+ * Callers must hold the mutex wait_lock.
+ */
+static __always_inline void
+ww_mutex_set_context_slowpath(struct ww_mutex *lock,
+                             struct ww_acquire_ctx *ctx)
+{
+       struct mutex_waiter *cur;
+
+       ww_mutex_lock_acquired(lock, ctx);
+       lock->ctx = ctx;
+
+       /*
+        * Give any possible sleeping processes the chance to wake up,
+        * so they can recheck if they have to back off.
+        */
+       list_for_each_entry(cur, &lock->base.wait_list, list) {
+               debug_mutex_wake_waiter(&lock->base, cur);
+               wake_up_process(cur->task);
+       }
+}
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -576,23 +600,7 @@ skip_wait:
 
        if (use_ww_ctx) {
                struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-               struct mutex_waiter *cur;
-
-               /*
-                * This branch gets optimized out for the common case,
-                * and is only important for ww_mutex_lock.
-                */
-               ww_mutex_lock_acquired(ww, ww_ctx);
-               ww->ctx = ww_ctx;
-
-               /*
-                * Give any possible sleeping processes the chance to wake up,
-                * so they can recheck if they have to back off.
-                */
-               list_for_each_entry(cur, &lock->wait_list, list) {
-                       debug_mutex_wake_waiter(lock, cur);
-                       wake_up_process(cur->task);
-               }
+               ww_mutex_set_context_slowpath(ww, ww_ctx);
        }
 
        spin_unlock_mutex(&lock->wait_lock, flags);