rtmutex: Simplify and document try_to_take_rtmutex()
[cascardo/linux.git] / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19
20 #include "rtmutex_common.h"
21
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner        bit0
29  * NULL         0       lock is free (fast acquire possible)
30  * NULL         1       lock is free and has waiters and the top waiter
31  *                              is going to take the lock*
32  * taskpointer  0       lock is held (fast release possible)
33  * taskpointer  1       lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52         unsigned long val = (unsigned long)owner;
53
54         if (rt_mutex_has_waiters(lock))
55                 val |= RT_MUTEX_HAS_WAITERS;
56
57         lock->owner = (struct task_struct *)val;
58 }
59
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62         lock->owner = (struct task_struct *)
63                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         if (!rt_mutex_has_waiters(lock))
69                 clear_rt_mutex_waiters(lock);
70 }
71
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80         unsigned long owner, *p = (unsigned long *) &lock->owner;
81
82         do {
83                 owner = *p;
84         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86
87 /*
88  * Safe fastpath aware unlock:
89  * 1) Clear the waiters bit
90  * 2) Drop lock->wait_lock
91  * 3) Try to unlock the lock with cmpxchg
92  */
93 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94         __releases(lock->wait_lock)
95 {
96         struct task_struct *owner = rt_mutex_owner(lock);
97
98         clear_rt_mutex_waiters(lock);
99         raw_spin_unlock(&lock->wait_lock);
100         /*
101          * If a new waiter comes in between the unlock and the cmpxchg
102          * we have two situations:
103          *
104          * unlock(wait_lock);
105          *                                      lock(wait_lock);
106          * cmpxchg(p, owner, 0) == owner
107          *                                      mark_rt_mutex_waiters(lock);
108          *                                      acquire(lock);
109          * or:
110          *
111          * unlock(wait_lock);
112          *                                      lock(wait_lock);
113          *                                      mark_rt_mutex_waiters(lock);
114          *
115          * cmpxchg(p, owner, 0) != owner
116          *                                      enqueue_waiter();
117          *                                      unlock(wait_lock);
118          * lock(wait_lock);
119          * wake waiter();
120          * unlock(wait_lock);
121          *                                      lock(wait_lock);
122          *                                      acquire(lock);
123          */
124         return rt_mutex_cmpxchg(lock, owner, NULL);
125 }
126
127 #else
128 # define rt_mutex_cmpxchg(l,c,n)        (0)
129 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
130 {
131         lock->owner = (struct task_struct *)
132                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
133 }
134
135 /*
136  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
137  */
138 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139         __releases(lock->wait_lock)
140 {
141         lock->owner = NULL;
142         raw_spin_unlock(&lock->wait_lock);
143         return true;
144 }
145 #endif
146
147 static inline int
148 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
149                      struct rt_mutex_waiter *right)
150 {
151         if (left->prio < right->prio)
152                 return 1;
153
154         /*
155          * If both waiters have dl_prio(), we check the deadlines of the
156          * associated tasks.
157          * If left waiter has a dl_prio(), and we didn't return 1 above,
158          * then right waiter has a dl_prio() too.
159          */
160         if (dl_prio(left->prio))
161                 return (left->task->dl.deadline < right->task->dl.deadline);
162
163         return 0;
164 }
165
166 static void
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
168 {
169         struct rb_node **link = &lock->waiters.rb_node;
170         struct rb_node *parent = NULL;
171         struct rt_mutex_waiter *entry;
172         int leftmost = 1;
173
174         while (*link) {
175                 parent = *link;
176                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
177                 if (rt_mutex_waiter_less(waiter, entry)) {
178                         link = &parent->rb_left;
179                 } else {
180                         link = &parent->rb_right;
181                         leftmost = 0;
182                 }
183         }
184
185         if (leftmost)
186                 lock->waiters_leftmost = &waiter->tree_entry;
187
188         rb_link_node(&waiter->tree_entry, parent, link);
189         rb_insert_color(&waiter->tree_entry, &lock->waiters);
190 }
191
192 static void
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
194 {
195         if (RB_EMPTY_NODE(&waiter->tree_entry))
196                 return;
197
198         if (lock->waiters_leftmost == &waiter->tree_entry)
199                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
200
201         rb_erase(&waiter->tree_entry, &lock->waiters);
202         RB_CLEAR_NODE(&waiter->tree_entry);
203 }
204
205 static void
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
207 {
208         struct rb_node **link = &task->pi_waiters.rb_node;
209         struct rb_node *parent = NULL;
210         struct rt_mutex_waiter *entry;
211         int leftmost = 1;
212
213         while (*link) {
214                 parent = *link;
215                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
216                 if (rt_mutex_waiter_less(waiter, entry)) {
217                         link = &parent->rb_left;
218                 } else {
219                         link = &parent->rb_right;
220                         leftmost = 0;
221                 }
222         }
223
224         if (leftmost)
225                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
226
227         rb_link_node(&waiter->pi_tree_entry, parent, link);
228         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
229 }
230
231 static void
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
233 {
234         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
235                 return;
236
237         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
238                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
239
240         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
241         RB_CLEAR_NODE(&waiter->pi_tree_entry);
242 }
243
244 /*
245  * Calculate task priority from the waiter tree priority
246  *
247  * Return task->normal_prio when the waiter tree is empty or when
248  * the waiter is not allowed to do priority boosting
249  */
250 int rt_mutex_getprio(struct task_struct *task)
251 {
252         if (likely(!task_has_pi_waiters(task)))
253                 return task->normal_prio;
254
255         return min(task_top_pi_waiter(task)->prio,
256                    task->normal_prio);
257 }
258
259 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
260 {
261         if (likely(!task_has_pi_waiters(task)))
262                 return NULL;
263
264         return task_top_pi_waiter(task)->task;
265 }
266
267 /*
268  * Called by sched_setscheduler() to check whether the priority change
269  * is overruled by a possible priority boosting.
270  */
271 int rt_mutex_check_prio(struct task_struct *task, int newprio)
272 {
273         if (!task_has_pi_waiters(task))
274                 return 0;
275
276         return task_top_pi_waiter(task)->task->prio <= newprio;
277 }
278
279 /*
280  * Adjust the priority of a task, after its pi_waiters got modified.
281  *
282  * This can be both boosting and unboosting. task->pi_lock must be held.
283  */
284 static void __rt_mutex_adjust_prio(struct task_struct *task)
285 {
286         int prio = rt_mutex_getprio(task);
287
288         if (task->prio != prio || dl_prio(prio))
289                 rt_mutex_setprio(task, prio);
290 }
291
292 /*
293  * Adjust task priority (undo boosting). Called from the exit path of
294  * rt_mutex_slowunlock() and rt_mutex_slowlock().
295  *
296  * (Note: We do this outside of the protection of lock->wait_lock to
297  * allow the lock to be taken while or before we readjust the priority
298  * of task. We do not use the spin_xx_mutex() variants here as we are
299  * outside of the debug path.)
300  */
301 static void rt_mutex_adjust_prio(struct task_struct *task)
302 {
303         unsigned long flags;
304
305         raw_spin_lock_irqsave(&task->pi_lock, flags);
306         __rt_mutex_adjust_prio(task);
307         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
308 }
309
310 /*
311  * Max number of times we'll walk the boosting chain:
312  */
313 int max_lock_depth = 1024;
314
315 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
316 {
317         return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
318 }
319
320 /*
321  * Adjust the priority chain. Also used for deadlock detection.
322  * Decreases task's usage by one - may thus free the task.
323  *
324  * @task:       the task owning the mutex (owner) for which a chain walk is
325  *              probably needed
326  * @deadlock_detect: do we have to carry out deadlock detection?
327  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
328  *              things for a task that has just got its priority adjusted, and
329  *              is waiting on a mutex)
330  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
331  *              we dropped its pi_lock. Is never dereferenced, only used for
332  *              comparison to detect lock chain changes.
333  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
334  *              its priority to the mutex owner (can be NULL in the case
335  *              depicted above or if the top waiter is gone away and we are
336  *              actually deboosting the owner)
337  * @top_task:   the current top waiter
338  *
339  * Returns 0 or -EDEADLK.
340  */
341 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
342                                       int deadlock_detect,
343                                       struct rt_mutex *orig_lock,
344                                       struct rt_mutex *next_lock,
345                                       struct rt_mutex_waiter *orig_waiter,
346                                       struct task_struct *top_task)
347 {
348         struct rt_mutex *lock;
349         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
350         int detect_deadlock, ret = 0, depth = 0;
351         unsigned long flags;
352
353         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
354                                                          deadlock_detect);
355
356         /*
357          * The (de)boosting is a step by step approach with a lot of
358          * pitfalls. We want this to be preemptible and we want hold a
359          * maximum of two locks per step. So we have to check
360          * carefully whether things change under us.
361          */
362  again:
363         if (++depth > max_lock_depth) {
364                 static int prev_max;
365
366                 /*
367                  * Print this only once. If the admin changes the limit,
368                  * print a new message when reaching the limit again.
369                  */
370                 if (prev_max != max_lock_depth) {
371                         prev_max = max_lock_depth;
372                         printk(KERN_WARNING "Maximum lock depth %d reached "
373                                "task: %s (%d)\n", max_lock_depth,
374                                top_task->comm, task_pid_nr(top_task));
375                 }
376                 put_task_struct(task);
377
378                 return -EDEADLK;
379         }
380  retry:
381         /*
382          * Task can not go away as we did a get_task() before !
383          */
384         raw_spin_lock_irqsave(&task->pi_lock, flags);
385
386         waiter = task->pi_blocked_on;
387         /*
388          * Check whether the end of the boosting chain has been
389          * reached or the state of the chain has changed while we
390          * dropped the locks.
391          */
392         if (!waiter)
393                 goto out_unlock_pi;
394
395         /*
396          * Check the orig_waiter state. After we dropped the locks,
397          * the previous owner of the lock might have released the lock.
398          */
399         if (orig_waiter && !rt_mutex_owner(orig_lock))
400                 goto out_unlock_pi;
401
402         /*
403          * We dropped all locks after taking a refcount on @task, so
404          * the task might have moved on in the lock chain or even left
405          * the chain completely and blocks now on an unrelated lock or
406          * on @orig_lock.
407          *
408          * We stored the lock on which @task was blocked in @next_lock,
409          * so we can detect the chain change.
410          */
411         if (next_lock != waiter->lock)
412                 goto out_unlock_pi;
413
414         /*
415          * Drop out, when the task has no waiters. Note,
416          * top_waiter can be NULL, when we are in the deboosting
417          * mode!
418          */
419         if (top_waiter) {
420                 if (!task_has_pi_waiters(task))
421                         goto out_unlock_pi;
422                 /*
423                  * If deadlock detection is off, we stop here if we
424                  * are not the top pi waiter of the task.
425                  */
426                 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
427                         goto out_unlock_pi;
428         }
429
430         /*
431          * When deadlock detection is off then we check, if further
432          * priority adjustment is necessary.
433          */
434         if (!detect_deadlock && waiter->prio == task->prio)
435                 goto out_unlock_pi;
436
437         lock = waiter->lock;
438         if (!raw_spin_trylock(&lock->wait_lock)) {
439                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
440                 cpu_relax();
441                 goto retry;
442         }
443
444         /*
445          * Deadlock detection. If the lock is the same as the original
446          * lock which caused us to walk the lock chain or if the
447          * current lock is owned by the task which initiated the chain
448          * walk, we detected a deadlock.
449          */
450         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
451                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
452                 raw_spin_unlock(&lock->wait_lock);
453                 ret = -EDEADLK;
454                 goto out_unlock_pi;
455         }
456
457         top_waiter = rt_mutex_top_waiter(lock);
458
459         /* Requeue the waiter */
460         rt_mutex_dequeue(lock, waiter);
461         waiter->prio = task->prio;
462         rt_mutex_enqueue(lock, waiter);
463
464         /* Release the task */
465         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
466         if (!rt_mutex_owner(lock)) {
467                 /*
468                  * If the requeue above changed the top waiter, then we need
469                  * to wake the new top waiter up to try to get the lock.
470                  */
471
472                 if (top_waiter != rt_mutex_top_waiter(lock))
473                         wake_up_process(rt_mutex_top_waiter(lock)->task);
474                 raw_spin_unlock(&lock->wait_lock);
475                 goto out_put_task;
476         }
477         put_task_struct(task);
478
479         /* Grab the next task */
480         task = rt_mutex_owner(lock);
481         get_task_struct(task);
482         raw_spin_lock_irqsave(&task->pi_lock, flags);
483
484         if (waiter == rt_mutex_top_waiter(lock)) {
485                 /* Boost the owner */
486                 rt_mutex_dequeue_pi(task, top_waiter);
487                 rt_mutex_enqueue_pi(task, waiter);
488                 __rt_mutex_adjust_prio(task);
489
490         } else if (top_waiter == waiter) {
491                 /* Deboost the owner */
492                 rt_mutex_dequeue_pi(task, waiter);
493                 waiter = rt_mutex_top_waiter(lock);
494                 rt_mutex_enqueue_pi(task, waiter);
495                 __rt_mutex_adjust_prio(task);
496         }
497
498         /*
499          * Check whether the task which owns the current lock is pi
500          * blocked itself. If yes we store a pointer to the lock for
501          * the lock chain change detection above. After we dropped
502          * task->pi_lock next_lock cannot be dereferenced anymore.
503          */
504         next_lock = task_blocked_on_lock(task);
505
506         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
507
508         top_waiter = rt_mutex_top_waiter(lock);
509         raw_spin_unlock(&lock->wait_lock);
510
511         /*
512          * We reached the end of the lock chain. Stop right here. No
513          * point to go back just to figure that out.
514          */
515         if (!next_lock)
516                 goto out_put_task;
517
518         if (!detect_deadlock && waiter != top_waiter)
519                 goto out_put_task;
520
521         goto again;
522
523  out_unlock_pi:
524         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
525  out_put_task:
526         put_task_struct(task);
527
528         return ret;
529 }
530
531 /*
532  * Try to take an rt-mutex
533  *
534  * Must be called with lock->wait_lock held.
535  *
536  * @lock:   The lock to be acquired.
537  * @task:   The task which wants to acquire the lock
538  * @waiter: The waiter that is queued to the lock's wait list if the
539  *          callsite called task_blocked_on_lock(), otherwise NULL
540  */
541 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
542                                 struct rt_mutex_waiter *waiter)
543 {
544         unsigned long flags;
545
546         /*
547          * Before testing whether we can acquire @lock, we set the
548          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
549          * other tasks which try to modify @lock into the slow path
550          * and they serialize on @lock->wait_lock.
551          *
552          * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
553          * as explained at the top of this file if and only if:
554          *
555          * - There is a lock owner. The caller must fixup the
556          *   transient state if it does a trylock or leaves the lock
557          *   function due to a signal or timeout.
558          *
559          * - @task acquires the lock and there are no other
560          *   waiters. This is undone in rt_mutex_set_owner(@task) at
561          *   the end of this function.
562          */
563         mark_rt_mutex_waiters(lock);
564
565         /*
566          * If @lock has an owner, give up.
567          */
568         if (rt_mutex_owner(lock))
569                 return 0;
570
571         /*
572          * If @waiter != NULL, @task has already enqueued the waiter
573          * into @lock waiter list. If @waiter == NULL then this is a
574          * trylock attempt.
575          */
576         if (waiter) {
577                 /*
578                  * If waiter is not the highest priority waiter of
579                  * @lock, give up.
580                  */
581                 if (waiter != rt_mutex_top_waiter(lock))
582                         return 0;
583
584                 /*
585                  * We can acquire the lock. Remove the waiter from the
586                  * lock waiters list.
587                  */
588                 rt_mutex_dequeue(lock, waiter);
589
590         } else {
591                 /*
592                  * If the lock has waiters already we check whether @task is
593                  * eligible to take over the lock.
594                  *
595                  * If there are no other waiters, @task can acquire
596                  * the lock.  @task->pi_blocked_on is NULL, so it does
597                  * not need to be dequeued.
598                  */
599                 if (rt_mutex_has_waiters(lock)) {
600                         /*
601                          * If @task->prio is greater than or equal to
602                          * the top waiter priority (kernel view),
603                          * @task lost.
604                          */
605                         if (task->prio >= rt_mutex_top_waiter(lock)->prio)
606                                 return 0;
607
608                         /*
609                          * The current top waiter stays enqueued. We
610                          * don't have to change anything in the lock
611                          * waiters order.
612                          */
613                 } else {
614                         /*
615                          * No waiters. Take the lock without the
616                          * pi_lock dance.@task->pi_blocked_on is NULL
617                          * and we have no waiters to enqueue in @task
618                          * pi waiters list.
619                          */
620                         goto takeit;
621                 }
622         }
623
624         /*
625          * Clear @task->pi_blocked_on. Requires protection by
626          * @task->pi_lock. Redundant operation for the @waiter == NULL
627          * case, but conditionals are more expensive than a redundant
628          * store.
629          */
630         raw_spin_lock_irqsave(&task->pi_lock, flags);
631         task->pi_blocked_on = NULL;
632         /*
633          * Finish the lock acquisition. @task is the new owner. If
634          * other waiters exist we have to insert the highest priority
635          * waiter into @task->pi_waiters list.
636          */
637         if (rt_mutex_has_waiters(lock))
638                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
639         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
640
641 takeit:
642         /* We got the lock. */
643         debug_rt_mutex_lock(lock);
644
645         /*
646          * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
647          * are still waiters or clears it.
648          */
649         rt_mutex_set_owner(lock, task);
650
651         rt_mutex_deadlock_account_lock(lock, task);
652
653         return 1;
654 }
655
656 /*
657  * Task blocks on lock.
658  *
659  * Prepare waiter and propagate pi chain
660  *
661  * This must be called with lock->wait_lock held.
662  */
663 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
664                                    struct rt_mutex_waiter *waiter,
665                                    struct task_struct *task,
666                                    int detect_deadlock)
667 {
668         struct task_struct *owner = rt_mutex_owner(lock);
669         struct rt_mutex_waiter *top_waiter = waiter;
670         struct rt_mutex *next_lock;
671         int chain_walk = 0, res;
672         unsigned long flags;
673
674         /*
675          * Early deadlock detection. We really don't want the task to
676          * enqueue on itself just to untangle the mess later. It's not
677          * only an optimization. We drop the locks, so another waiter
678          * can come in before the chain walk detects the deadlock. So
679          * the other will detect the deadlock and return -EDEADLOCK,
680          * which is wrong, as the other waiter is not in a deadlock
681          * situation.
682          */
683         if (owner == task)
684                 return -EDEADLK;
685
686         raw_spin_lock_irqsave(&task->pi_lock, flags);
687         __rt_mutex_adjust_prio(task);
688         waiter->task = task;
689         waiter->lock = lock;
690         waiter->prio = task->prio;
691
692         /* Get the top priority waiter on the lock */
693         if (rt_mutex_has_waiters(lock))
694                 top_waiter = rt_mutex_top_waiter(lock);
695         rt_mutex_enqueue(lock, waiter);
696
697         task->pi_blocked_on = waiter;
698
699         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
700
701         if (!owner)
702                 return 0;
703
704         raw_spin_lock_irqsave(&owner->pi_lock, flags);
705         if (waiter == rt_mutex_top_waiter(lock)) {
706                 rt_mutex_dequeue_pi(owner, top_waiter);
707                 rt_mutex_enqueue_pi(owner, waiter);
708
709                 __rt_mutex_adjust_prio(owner);
710                 if (owner->pi_blocked_on)
711                         chain_walk = 1;
712         } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
713                 chain_walk = 1;
714         }
715
716         /* Store the lock on which owner is blocked or NULL */
717         next_lock = task_blocked_on_lock(owner);
718
719         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
720         /*
721          * Even if full deadlock detection is on, if the owner is not
722          * blocked itself, we can avoid finding this out in the chain
723          * walk.
724          */
725         if (!chain_walk || !next_lock)
726                 return 0;
727
728         /*
729          * The owner can't disappear while holding a lock,
730          * so the owner struct is protected by wait_lock.
731          * Gets dropped in rt_mutex_adjust_prio_chain()!
732          */
733         get_task_struct(owner);
734
735         raw_spin_unlock(&lock->wait_lock);
736
737         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
738                                          next_lock, waiter, task);
739
740         raw_spin_lock(&lock->wait_lock);
741
742         return res;
743 }
744
745 /*
746  * Wake up the next waiter on the lock.
747  *
748  * Remove the top waiter from the current tasks pi waiter list and
749  * wake it up.
750  *
751  * Called with lock->wait_lock held.
752  */
753 static void wakeup_next_waiter(struct rt_mutex *lock)
754 {
755         struct rt_mutex_waiter *waiter;
756         unsigned long flags;
757
758         raw_spin_lock_irqsave(&current->pi_lock, flags);
759
760         waiter = rt_mutex_top_waiter(lock);
761
762         /*
763          * Remove it from current->pi_waiters. We do not adjust a
764          * possible priority boost right now. We execute wakeup in the
765          * boosted mode and go back to normal after releasing
766          * lock->wait_lock.
767          */
768         rt_mutex_dequeue_pi(current, waiter);
769
770         /*
771          * As we are waking up the top waiter, and the waiter stays
772          * queued on the lock until it gets the lock, this lock
773          * obviously has waiters. Just set the bit here and this has
774          * the added benefit of forcing all new tasks into the
775          * slow path making sure no task of lower priority than
776          * the top waiter can steal this lock.
777          */
778         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
779
780         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
781
782         /*
783          * It's safe to dereference waiter as it cannot go away as
784          * long as we hold lock->wait_lock. The waiter task needs to
785          * acquire it in order to dequeue the waiter.
786          */
787         wake_up_process(waiter->task);
788 }
789
790 /*
791  * Remove a waiter from a lock and give up
792  *
793  * Must be called with lock->wait_lock held and
794  * have just failed to try_to_take_rt_mutex().
795  */
796 static void remove_waiter(struct rt_mutex *lock,
797                           struct rt_mutex_waiter *waiter)
798 {
799         int first = (waiter == rt_mutex_top_waiter(lock));
800         struct task_struct *owner = rt_mutex_owner(lock);
801         struct rt_mutex *next_lock = NULL;
802         unsigned long flags;
803
804         raw_spin_lock_irqsave(&current->pi_lock, flags);
805         rt_mutex_dequeue(lock, waiter);
806         current->pi_blocked_on = NULL;
807         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
808
809         if (!owner)
810                 return;
811
812         if (first) {
813
814                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
815
816                 rt_mutex_dequeue_pi(owner, waiter);
817
818                 if (rt_mutex_has_waiters(lock)) {
819                         struct rt_mutex_waiter *next;
820
821                         next = rt_mutex_top_waiter(lock);
822                         rt_mutex_enqueue_pi(owner, next);
823                 }
824                 __rt_mutex_adjust_prio(owner);
825
826                 /* Store the lock on which owner is blocked or NULL */
827                 next_lock = task_blocked_on_lock(owner);
828
829                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
830         }
831
832         if (!next_lock)
833                 return;
834
835         /* gets dropped in rt_mutex_adjust_prio_chain()! */
836         get_task_struct(owner);
837
838         raw_spin_unlock(&lock->wait_lock);
839
840         rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
841
842         raw_spin_lock(&lock->wait_lock);
843 }
844
845 /*
846  * Recheck the pi chain, in case we got a priority setting
847  *
848  * Called from sched_setscheduler
849  */
850 void rt_mutex_adjust_pi(struct task_struct *task)
851 {
852         struct rt_mutex_waiter *waiter;
853         struct rt_mutex *next_lock;
854         unsigned long flags;
855
856         raw_spin_lock_irqsave(&task->pi_lock, flags);
857
858         waiter = task->pi_blocked_on;
859         if (!waiter || (waiter->prio == task->prio &&
860                         !dl_prio(task->prio))) {
861                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
862                 return;
863         }
864         next_lock = waiter->lock;
865         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
866
867         /* gets dropped in rt_mutex_adjust_prio_chain()! */
868         get_task_struct(task);
869
870         rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
871 }
872
873 /**
874  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
875  * @lock:                the rt_mutex to take
876  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
877  *                       or TASK_UNINTERRUPTIBLE)
878  * @timeout:             the pre-initialized and started timer, or NULL for none
879  * @waiter:              the pre-initialized rt_mutex_waiter
880  *
881  * lock->wait_lock must be held by the caller.
882  */
883 static int __sched
884 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
885                     struct hrtimer_sleeper *timeout,
886                     struct rt_mutex_waiter *waiter)
887 {
888         int ret = 0;
889
890         for (;;) {
891                 /* Try to acquire the lock: */
892                 if (try_to_take_rt_mutex(lock, current, waiter))
893                         break;
894
895                 /*
896                  * TASK_INTERRUPTIBLE checks for signals and
897                  * timeout. Ignored otherwise.
898                  */
899                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
900                         /* Signal pending? */
901                         if (signal_pending(current))
902                                 ret = -EINTR;
903                         if (timeout && !timeout->task)
904                                 ret = -ETIMEDOUT;
905                         if (ret)
906                                 break;
907                 }
908
909                 raw_spin_unlock(&lock->wait_lock);
910
911                 debug_rt_mutex_print_deadlock(waiter);
912
913                 schedule_rt_mutex(lock);
914
915                 raw_spin_lock(&lock->wait_lock);
916                 set_current_state(state);
917         }
918
919         return ret;
920 }
921
922 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
923                                      struct rt_mutex_waiter *w)
924 {
925         /*
926          * If the result is not -EDEADLOCK or the caller requested
927          * deadlock detection, nothing to do here.
928          */
929         if (res != -EDEADLOCK || detect_deadlock)
930                 return;
931
932         /*
933          * Yell lowdly and stop the task right here.
934          */
935         rt_mutex_print_deadlock(w);
936         while (1) {
937                 set_current_state(TASK_INTERRUPTIBLE);
938                 schedule();
939         }
940 }
941
942 /*
943  * Slow path lock function:
944  */
945 static int __sched
946 rt_mutex_slowlock(struct rt_mutex *lock, int state,
947                   struct hrtimer_sleeper *timeout,
948                   int detect_deadlock)
949 {
950         struct rt_mutex_waiter waiter;
951         int ret = 0;
952
953         debug_rt_mutex_init_waiter(&waiter);
954         RB_CLEAR_NODE(&waiter.pi_tree_entry);
955         RB_CLEAR_NODE(&waiter.tree_entry);
956
957         raw_spin_lock(&lock->wait_lock);
958
959         /* Try to acquire the lock again: */
960         if (try_to_take_rt_mutex(lock, current, NULL)) {
961                 raw_spin_unlock(&lock->wait_lock);
962                 return 0;
963         }
964
965         set_current_state(state);
966
967         /* Setup the timer, when timeout != NULL */
968         if (unlikely(timeout)) {
969                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
970                 if (!hrtimer_active(&timeout->timer))
971                         timeout->task = NULL;
972         }
973
974         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
975
976         if (likely(!ret))
977                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
978
979         set_current_state(TASK_RUNNING);
980
981         if (unlikely(ret)) {
982                 remove_waiter(lock, &waiter);
983                 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
984         }
985
986         /*
987          * try_to_take_rt_mutex() sets the waiter bit
988          * unconditionally. We might have to fix that up.
989          */
990         fixup_rt_mutex_waiters(lock);
991
992         raw_spin_unlock(&lock->wait_lock);
993
994         /* Remove pending timer: */
995         if (unlikely(timeout))
996                 hrtimer_cancel(&timeout->timer);
997
998         debug_rt_mutex_free_waiter(&waiter);
999
1000         return ret;
1001 }
1002
1003 /*
1004  * Slow path try-lock function:
1005  */
1006 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1007 {
1008         int ret;
1009
1010         /*
1011          * If the lock already has an owner we fail to get the lock.
1012          * This can be done without taking the @lock->wait_lock as
1013          * it is only being read, and this is a trylock anyway.
1014          */
1015         if (rt_mutex_owner(lock))
1016                 return 0;
1017
1018         /*
1019          * The mutex has currently no owner. Lock the wait lock and
1020          * try to acquire the lock.
1021          */
1022         raw_spin_lock(&lock->wait_lock);
1023
1024         ret = try_to_take_rt_mutex(lock, current, NULL);
1025
1026         /*
1027          * try_to_take_rt_mutex() sets the lock waiters bit
1028          * unconditionally. Clean this up.
1029          */
1030         fixup_rt_mutex_waiters(lock);
1031
1032         raw_spin_unlock(&lock->wait_lock);
1033
1034         return ret;
1035 }
1036
1037 /*
1038  * Slow path to release a rt-mutex:
1039  */
1040 static void __sched
1041 rt_mutex_slowunlock(struct rt_mutex *lock)
1042 {
1043         raw_spin_lock(&lock->wait_lock);
1044
1045         debug_rt_mutex_unlock(lock);
1046
1047         rt_mutex_deadlock_account_unlock(current);
1048
1049         /*
1050          * We must be careful here if the fast path is enabled. If we
1051          * have no waiters queued we cannot set owner to NULL here
1052          * because of:
1053          *
1054          * foo->lock->owner = NULL;
1055          *                      rtmutex_lock(foo->lock);   <- fast path
1056          *                      free = atomic_dec_and_test(foo->refcnt);
1057          *                      rtmutex_unlock(foo->lock); <- fast path
1058          *                      if (free)
1059          *                              kfree(foo);
1060          * raw_spin_unlock(foo->lock->wait_lock);
1061          *
1062          * So for the fastpath enabled kernel:
1063          *
1064          * Nothing can set the waiters bit as long as we hold
1065          * lock->wait_lock. So we do the following sequence:
1066          *
1067          *      owner = rt_mutex_owner(lock);
1068          *      clear_rt_mutex_waiters(lock);
1069          *      raw_spin_unlock(&lock->wait_lock);
1070          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
1071          *              return;
1072          *      goto retry;
1073          *
1074          * The fastpath disabled variant is simple as all access to
1075          * lock->owner is serialized by lock->wait_lock:
1076          *
1077          *      lock->owner = NULL;
1078          *      raw_spin_unlock(&lock->wait_lock);
1079          */
1080         while (!rt_mutex_has_waiters(lock)) {
1081                 /* Drops lock->wait_lock ! */
1082                 if (unlock_rt_mutex_safe(lock) == true)
1083                         return;
1084                 /* Relock the rtmutex and try again */
1085                 raw_spin_lock(&lock->wait_lock);
1086         }
1087
1088         /*
1089          * The wakeup next waiter path does not suffer from the above
1090          * race. See the comments there.
1091          */
1092         wakeup_next_waiter(lock);
1093
1094         raw_spin_unlock(&lock->wait_lock);
1095
1096         /* Undo pi boosting if necessary: */
1097         rt_mutex_adjust_prio(current);
1098 }
1099
1100 /*
1101  * debug aware fast / slowpath lock,trylock,unlock
1102  *
1103  * The atomic acquire/release ops are compiled away, when either the
1104  * architecture does not support cmpxchg or when debugging is enabled.
1105  */
1106 static inline int
1107 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1108                   int detect_deadlock,
1109                   int (*slowfn)(struct rt_mutex *lock, int state,
1110                                 struct hrtimer_sleeper *timeout,
1111                                 int detect_deadlock))
1112 {
1113         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1114                 rt_mutex_deadlock_account_lock(lock, current);
1115                 return 0;
1116         } else
1117                 return slowfn(lock, state, NULL, detect_deadlock);
1118 }
1119
1120 static inline int
1121 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1122                         struct hrtimer_sleeper *timeout, int detect_deadlock,
1123                         int (*slowfn)(struct rt_mutex *lock, int state,
1124                                       struct hrtimer_sleeper *timeout,
1125                                       int detect_deadlock))
1126 {
1127         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1128                 rt_mutex_deadlock_account_lock(lock, current);
1129                 return 0;
1130         } else
1131                 return slowfn(lock, state, timeout, detect_deadlock);
1132 }
1133
1134 static inline int
1135 rt_mutex_fasttrylock(struct rt_mutex *lock,
1136                      int (*slowfn)(struct rt_mutex *lock))
1137 {
1138         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1139                 rt_mutex_deadlock_account_lock(lock, current);
1140                 return 1;
1141         }
1142         return slowfn(lock);
1143 }
1144
1145 static inline void
1146 rt_mutex_fastunlock(struct rt_mutex *lock,
1147                     void (*slowfn)(struct rt_mutex *lock))
1148 {
1149         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
1150                 rt_mutex_deadlock_account_unlock(current);
1151         else
1152                 slowfn(lock);
1153 }
1154
1155 /**
1156  * rt_mutex_lock - lock a rt_mutex
1157  *
1158  * @lock: the rt_mutex to be locked
1159  */
1160 void __sched rt_mutex_lock(struct rt_mutex *lock)
1161 {
1162         might_sleep();
1163
1164         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
1165 }
1166 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1167
1168 /**
1169  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1170  *
1171  * @lock:               the rt_mutex to be locked
1172  * @detect_deadlock:    deadlock detection on/off
1173  *
1174  * Returns:
1175  *  0           on success
1176  * -EINTR       when interrupted by a signal
1177  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1178  */
1179 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
1180                                                  int detect_deadlock)
1181 {
1182         might_sleep();
1183
1184         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1185                                  detect_deadlock, rt_mutex_slowlock);
1186 }
1187 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1188
1189 /**
1190  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1191  *                      the timeout structure is provided
1192  *                      by the caller
1193  *
1194  * @lock:               the rt_mutex to be locked
1195  * @timeout:            timeout structure or NULL (no timeout)
1196  * @detect_deadlock:    deadlock detection on/off
1197  *
1198  * Returns:
1199  *  0           on success
1200  * -EINTR       when interrupted by a signal
1201  * -ETIMEDOUT   when the timeout expired
1202  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1203  */
1204 int
1205 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1206                     int detect_deadlock)
1207 {
1208         might_sleep();
1209
1210         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1211                                        detect_deadlock, rt_mutex_slowlock);
1212 }
1213 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1214
1215 /**
1216  * rt_mutex_trylock - try to lock a rt_mutex
1217  *
1218  * @lock:       the rt_mutex to be locked
1219  *
1220  * Returns 1 on success and 0 on contention
1221  */
1222 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1223 {
1224         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1225 }
1226 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1227
1228 /**
1229  * rt_mutex_unlock - unlock a rt_mutex
1230  *
1231  * @lock: the rt_mutex to be unlocked
1232  */
1233 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1234 {
1235         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1236 }
1237 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1238
1239 /**
1240  * rt_mutex_destroy - mark a mutex unusable
1241  * @lock: the mutex to be destroyed
1242  *
1243  * This function marks the mutex uninitialized, and any subsequent
1244  * use of the mutex is forbidden. The mutex must not be locked when
1245  * this function is called.
1246  */
1247 void rt_mutex_destroy(struct rt_mutex *lock)
1248 {
1249         WARN_ON(rt_mutex_is_locked(lock));
1250 #ifdef CONFIG_DEBUG_RT_MUTEXES
1251         lock->magic = NULL;
1252 #endif
1253 }
1254
1255 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1256
1257 /**
1258  * __rt_mutex_init - initialize the rt lock
1259  *
1260  * @lock: the rt lock to be initialized
1261  *
1262  * Initialize the rt lock to unlocked state.
1263  *
1264  * Initializing of a locked rt lock is not allowed
1265  */
1266 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1267 {
1268         lock->owner = NULL;
1269         raw_spin_lock_init(&lock->wait_lock);
1270         lock->waiters = RB_ROOT;
1271         lock->waiters_leftmost = NULL;
1272
1273         debug_rt_mutex_init(lock, name);
1274 }
1275 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1276
1277 /**
1278  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1279  *                              proxy owner
1280  *
1281  * @lock:       the rt_mutex to be locked
1282  * @proxy_owner:the task to set as owner
1283  *
1284  * No locking. Caller has to do serializing itself
1285  * Special API call for PI-futex support
1286  */
1287 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1288                                 struct task_struct *proxy_owner)
1289 {
1290         __rt_mutex_init(lock, NULL);
1291         debug_rt_mutex_proxy_lock(lock, proxy_owner);
1292         rt_mutex_set_owner(lock, proxy_owner);
1293         rt_mutex_deadlock_account_lock(lock, proxy_owner);
1294 }
1295
1296 /**
1297  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1298  *
1299  * @lock:       the rt_mutex to be locked
1300  *
1301  * No locking. Caller has to do serializing itself
1302  * Special API call for PI-futex support
1303  */
1304 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1305                            struct task_struct *proxy_owner)
1306 {
1307         debug_rt_mutex_proxy_unlock(lock);
1308         rt_mutex_set_owner(lock, NULL);
1309         rt_mutex_deadlock_account_unlock(proxy_owner);
1310 }
1311
1312 /**
1313  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1314  * @lock:               the rt_mutex to take
1315  * @waiter:             the pre-initialized rt_mutex_waiter
1316  * @task:               the task to prepare
1317  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1318  *
1319  * Returns:
1320  *  0 - task blocked on lock
1321  *  1 - acquired the lock for task, caller should wake it up
1322  * <0 - error
1323  *
1324  * Special API call for FUTEX_REQUEUE_PI support.
1325  */
1326 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1327                               struct rt_mutex_waiter *waiter,
1328                               struct task_struct *task, int detect_deadlock)
1329 {
1330         int ret;
1331
1332         raw_spin_lock(&lock->wait_lock);
1333
1334         if (try_to_take_rt_mutex(lock, task, NULL)) {
1335                 raw_spin_unlock(&lock->wait_lock);
1336                 return 1;
1337         }
1338
1339         /* We enforce deadlock detection for futexes */
1340         ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1341
1342         if (ret && !rt_mutex_owner(lock)) {
1343                 /*
1344                  * Reset the return value. We might have
1345                  * returned with -EDEADLK and the owner
1346                  * released the lock while we were walking the
1347                  * pi chain.  Let the waiter sort it out.
1348                  */
1349                 ret = 0;
1350         }
1351
1352         if (unlikely(ret))
1353                 remove_waiter(lock, waiter);
1354
1355         raw_spin_unlock(&lock->wait_lock);
1356
1357         debug_rt_mutex_print_deadlock(waiter);
1358
1359         return ret;
1360 }
1361
1362 /**
1363  * rt_mutex_next_owner - return the next owner of the lock
1364  *
1365  * @lock: the rt lock query
1366  *
1367  * Returns the next owner of the lock or NULL
1368  *
1369  * Caller has to serialize against other accessors to the lock
1370  * itself.
1371  *
1372  * Special API call for PI-futex support
1373  */
1374 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1375 {
1376         if (!rt_mutex_has_waiters(lock))
1377                 return NULL;
1378
1379         return rt_mutex_top_waiter(lock)->task;
1380 }
1381
1382 /**
1383  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1384  * @lock:               the rt_mutex we were woken on
1385  * @to:                 the timeout, null if none. hrtimer should already have
1386  *                      been started.
1387  * @waiter:             the pre-initialized rt_mutex_waiter
1388  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1389  *
1390  * Complete the lock acquisition started our behalf by another thread.
1391  *
1392  * Returns:
1393  *  0 - success
1394  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1395  *
1396  * Special API call for PI-futex requeue support
1397  */
1398 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1399                                struct hrtimer_sleeper *to,
1400                                struct rt_mutex_waiter *waiter,
1401                                int detect_deadlock)
1402 {
1403         int ret;
1404
1405         raw_spin_lock(&lock->wait_lock);
1406
1407         set_current_state(TASK_INTERRUPTIBLE);
1408
1409         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1410
1411         set_current_state(TASK_RUNNING);
1412
1413         if (unlikely(ret))
1414                 remove_waiter(lock, waiter);
1415
1416         /*
1417          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1418          * have to fix that up.
1419          */
1420         fixup_rt_mutex_waiters(lock);
1421
1422         raw_spin_unlock(&lock->wait_lock);
1423
1424         return ret;
1425 }