Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[cascardo/linux.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67 #include <linux/fault-inject.h>
68
69 #include <asm/futex.h>
70
71 #include "locking/rtmutex_common.h"
72
73 /*
74  * READ this before attempting to hack on futexes!
75  *
76  * Basic futex operation and ordering guarantees
77  * =============================================
78  *
79  * The waiter reads the futex value in user space and calls
80  * futex_wait(). This function computes the hash bucket and acquires
81  * the hash bucket lock. After that it reads the futex user space value
82  * again and verifies that the data has not changed. If it has not changed
83  * it enqueues itself into the hash bucket, releases the hash bucket lock
84  * and schedules.
85  *
86  * The waker side modifies the user space value of the futex and calls
87  * futex_wake(). This function computes the hash bucket and acquires the
88  * hash bucket lock. Then it looks for waiters on that futex in the hash
89  * bucket and wakes them.
90  *
91  * In futex wake up scenarios where no tasks are blocked on a futex, taking
92  * the hb spinlock can be avoided and simply return. In order for this
93  * optimization to work, ordering guarantees must exist so that the waiter
94  * being added to the list is acknowledged when the list is concurrently being
95  * checked by the waker, avoiding scenarios like the following:
96  *
97  * CPU 0                               CPU 1
98  * val = *futex;
99  * sys_futex(WAIT, futex, val);
100  *   futex_wait(futex, val);
101  *   uval = *futex;
102  *                                     *futex = newval;
103  *                                     sys_futex(WAKE, futex);
104  *                                       futex_wake(futex);
105  *                                       if (queue_empty())
106  *                                         return;
107  *   if (uval == val)
108  *      lock(hash_bucket(futex));
109  *      queue();
110  *     unlock(hash_bucket(futex));
111  *     schedule();
112  *
113  * This would cause the waiter on CPU 0 to wait forever because it
114  * missed the transition of the user space value from val to newval
115  * and the waker did not find the waiter in the hash bucket queue.
116  *
117  * The correct serialization ensures that a waiter either observes
118  * the changed user space value before blocking or is woken by a
119  * concurrent waker:
120  *
121  * CPU 0                                 CPU 1
122  * val = *futex;
123  * sys_futex(WAIT, futex, val);
124  *   futex_wait(futex, val);
125  *
126  *   waiters++; (a)
127  *   smp_mb(); (A) <-- paired with -.
128  *                                  |
129  *   lock(hash_bucket(futex));      |
130  *                                  |
131  *   uval = *futex;                 |
132  *                                  |        *futex = newval;
133  *                                  |        sys_futex(WAKE, futex);
134  *                                  |          futex_wake(futex);
135  *                                  |
136  *                                  `--------> smp_mb(); (B)
137  *   if (uval == val)
138  *     queue();
139  *     unlock(hash_bucket(futex));
140  *     schedule();                         if (waiters)
141  *                                           lock(hash_bucket(futex));
142  *   else                                    wake_waiters(futex);
143  *     waiters--; (b)                        unlock(hash_bucket(futex));
144  *
145  * Where (A) orders the waiters increment and the futex value read through
146  * atomic operations (see hb_waiters_inc) and where (B) orders the write
147  * to futex and the waiters read -- this is done by the barriers for both
148  * shared and private futexes in get_futex_key_refs().
149  *
150  * This yields the following case (where X:=waiters, Y:=futex):
151  *
152  *      X = Y = 0
153  *
154  *      w[X]=1          w[Y]=1
155  *      MB              MB
156  *      r[Y]=y          r[X]=x
157  *
158  * Which guarantees that x==0 && y==0 is impossible; which translates back into
159  * the guarantee that we cannot both miss the futex variable change and the
160  * enqueue.
161  *
162  * Note that a new waiter is accounted for in (a) even when it is possible that
163  * the wait call can return error, in which case we backtrack from it in (b).
164  * Refer to the comment in queue_lock().
165  *
166  * Similarly, in order to account for waiters being requeued on another
167  * address we always increment the waiters for the destination bucket before
168  * acquiring the lock. It then decrements them again  after releasing it -
169  * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170  * will do the additional required waiter count housekeeping. This is done for
171  * double_lock_hb() and double_unlock_hb(), respectively.
172  */
173
174 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175 int __read_mostly futex_cmpxchg_enabled;
176 #endif
177
178 /*
179  * Futex flags used to encode options to functions and preserve them across
180  * restarts.
181  */
182 #ifdef CONFIG_MMU
183 # define FLAGS_SHARED           0x01
184 #else
185 /*
186  * NOMMU does not have per process address space. Let the compiler optimize
187  * code away.
188  */
189 # define FLAGS_SHARED           0x00
190 #endif
191 #define FLAGS_CLOCKRT           0x02
192 #define FLAGS_HAS_TIMEOUT       0x04
193
194 /*
195  * Priority Inheritance state:
196  */
197 struct futex_pi_state {
198         /*
199          * list of 'owned' pi_state instances - these have to be
200          * cleaned up in do_exit() if the task exits prematurely:
201          */
202         struct list_head list;
203
204         /*
205          * The PI object:
206          */
207         struct rt_mutex pi_mutex;
208
209         struct task_struct *owner;
210         atomic_t refcount;
211
212         union futex_key key;
213 };
214
215 /**
216  * struct futex_q - The hashed futex queue entry, one per waiting task
217  * @list:               priority-sorted list of tasks waiting on this futex
218  * @task:               the task waiting on the futex
219  * @lock_ptr:           the hash bucket lock
220  * @key:                the key the futex is hashed on
221  * @pi_state:           optional priority inheritance state
222  * @rt_waiter:          rt_waiter storage for use with requeue_pi
223  * @requeue_pi_key:     the requeue_pi target futex key
224  * @bitset:             bitset for the optional bitmasked wakeup
225  *
226  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
227  * we can wake only the relevant ones (hashed queues may be shared).
228  *
229  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
230  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
231  * The order of wakeup is always to make the first condition true, then
232  * the second.
233  *
234  * PI futexes are typically woken before they are removed from the hash list via
235  * the rt_mutex code. See unqueue_me_pi().
236  */
237 struct futex_q {
238         struct plist_node list;
239
240         struct task_struct *task;
241         spinlock_t *lock_ptr;
242         union futex_key key;
243         struct futex_pi_state *pi_state;
244         struct rt_mutex_waiter *rt_waiter;
245         union futex_key *requeue_pi_key;
246         u32 bitset;
247 };
248
249 static const struct futex_q futex_q_init = {
250         /* list gets initialized in queue_me()*/
251         .key = FUTEX_KEY_INIT,
252         .bitset = FUTEX_BITSET_MATCH_ANY
253 };
254
255 /*
256  * Hash buckets are shared by all the futex_keys that hash to the same
257  * location.  Each key may have multiple futex_q structures, one for each task
258  * waiting on a futex.
259  */
260 struct futex_hash_bucket {
261         atomic_t waiters;
262         spinlock_t lock;
263         struct plist_head chain;
264 } ____cacheline_aligned_in_smp;
265
266 /*
267  * The base of the bucket array and its size are always used together
268  * (after initialization only in hash_futex()), so ensure that they
269  * reside in the same cacheline.
270  */
271 static struct {
272         struct futex_hash_bucket *queues;
273         unsigned long            hashsize;
274 } __futex_data __read_mostly __aligned(2*sizeof(long));
275 #define futex_queues   (__futex_data.queues)
276 #define futex_hashsize (__futex_data.hashsize)
277
278
279 /*
280  * Fault injections for futexes.
281  */
282 #ifdef CONFIG_FAIL_FUTEX
283
284 static struct {
285         struct fault_attr attr;
286
287         bool ignore_private;
288 } fail_futex = {
289         .attr = FAULT_ATTR_INITIALIZER,
290         .ignore_private = false,
291 };
292
293 static int __init setup_fail_futex(char *str)
294 {
295         return setup_fault_attr(&fail_futex.attr, str);
296 }
297 __setup("fail_futex=", setup_fail_futex);
298
299 static bool should_fail_futex(bool fshared)
300 {
301         if (fail_futex.ignore_private && !fshared)
302                 return false;
303
304         return should_fail(&fail_futex.attr, 1);
305 }
306
307 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
308
309 static int __init fail_futex_debugfs(void)
310 {
311         umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
312         struct dentry *dir;
313
314         dir = fault_create_debugfs_attr("fail_futex", NULL,
315                                         &fail_futex.attr);
316         if (IS_ERR(dir))
317                 return PTR_ERR(dir);
318
319         if (!debugfs_create_bool("ignore-private", mode, dir,
320                                  &fail_futex.ignore_private)) {
321                 debugfs_remove_recursive(dir);
322                 return -ENOMEM;
323         }
324
325         return 0;
326 }
327
328 late_initcall(fail_futex_debugfs);
329
330 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
331
332 #else
333 static inline bool should_fail_futex(bool fshared)
334 {
335         return false;
336 }
337 #endif /* CONFIG_FAIL_FUTEX */
338
339 static inline void futex_get_mm(union futex_key *key)
340 {
341         atomic_inc(&key->private.mm->mm_count);
342         /*
343          * Ensure futex_get_mm() implies a full barrier such that
344          * get_futex_key() implies a full barrier. This is relied upon
345          * as smp_mb(); (B), see the ordering comment above.
346          */
347         smp_mb__after_atomic();
348 }
349
350 /*
351  * Reflects a new waiter being added to the waitqueue.
352  */
353 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
354 {
355 #ifdef CONFIG_SMP
356         atomic_inc(&hb->waiters);
357         /*
358          * Full barrier (A), see the ordering comment above.
359          */
360         smp_mb__after_atomic();
361 #endif
362 }
363
364 /*
365  * Reflects a waiter being removed from the waitqueue by wakeup
366  * paths.
367  */
368 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
369 {
370 #ifdef CONFIG_SMP
371         atomic_dec(&hb->waiters);
372 #endif
373 }
374
375 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
376 {
377 #ifdef CONFIG_SMP
378         return atomic_read(&hb->waiters);
379 #else
380         return 1;
381 #endif
382 }
383
384 /*
385  * We hash on the keys returned from get_futex_key (see below).
386  */
387 static struct futex_hash_bucket *hash_futex(union futex_key *key)
388 {
389         u32 hash = jhash2((u32*)&key->both.word,
390                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
391                           key->both.offset);
392         return &futex_queues[hash & (futex_hashsize - 1)];
393 }
394
395 /*
396  * Return 1 if two futex_keys are equal, 0 otherwise.
397  */
398 static inline int match_futex(union futex_key *key1, union futex_key *key2)
399 {
400         return (key1 && key2
401                 && key1->both.word == key2->both.word
402                 && key1->both.ptr == key2->both.ptr
403                 && key1->both.offset == key2->both.offset);
404 }
405
406 /*
407  * Take a reference to the resource addressed by a key.
408  * Can be called while holding spinlocks.
409  *
410  */
411 static void get_futex_key_refs(union futex_key *key)
412 {
413         if (!key->both.ptr)
414                 return;
415
416         /*
417          * On MMU less systems futexes are always "private" as there is no per
418          * process address space. We need the smp wmb nevertheless - yes,
419          * arch/blackfin has MMU less SMP ...
420          */
421         if (!IS_ENABLED(CONFIG_MMU)) {
422                 smp_mb(); /* explicit smp_mb(); (B) */
423                 return;
424         }
425
426         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
427         case FUT_OFF_INODE:
428                 ihold(key->shared.inode); /* implies smp_mb(); (B) */
429                 break;
430         case FUT_OFF_MMSHARED:
431                 futex_get_mm(key); /* implies smp_mb(); (B) */
432                 break;
433         default:
434                 /*
435                  * Private futexes do not hold reference on an inode or
436                  * mm, therefore the only purpose of calling get_futex_key_refs
437                  * is because we need the barrier for the lockless waiter check.
438                  */
439                 smp_mb(); /* explicit smp_mb(); (B) */
440         }
441 }
442
443 /*
444  * Drop a reference to the resource addressed by a key.
445  * The hash bucket spinlock must not be held. This is
446  * a no-op for private futexes, see comment in the get
447  * counterpart.
448  */
449 static void drop_futex_key_refs(union futex_key *key)
450 {
451         if (!key->both.ptr) {
452                 /* If we're here then we tried to put a key we failed to get */
453                 WARN_ON_ONCE(1);
454                 return;
455         }
456
457         if (!IS_ENABLED(CONFIG_MMU))
458                 return;
459
460         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
461         case FUT_OFF_INODE:
462                 iput(key->shared.inode);
463                 break;
464         case FUT_OFF_MMSHARED:
465                 mmdrop(key->private.mm);
466                 break;
467         }
468 }
469
470 /**
471  * get_futex_key() - Get parameters which are the keys for a futex
472  * @uaddr:      virtual address of the futex
473  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
474  * @key:        address where result is stored.
475  * @rw:         mapping needs to be read/write (values: VERIFY_READ,
476  *              VERIFY_WRITE)
477  *
478  * Return: a negative error code or 0
479  *
480  * The key words are stored in *key on success.
481  *
482  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
483  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
484  * We can usually work out the index without swapping in the page.
485  *
486  * lock_page() might sleep, the caller should not hold a spinlock.
487  */
488 static int
489 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
490 {
491         unsigned long address = (unsigned long)uaddr;
492         struct mm_struct *mm = current->mm;
493         struct page *page, *tail;
494         struct address_space *mapping;
495         int err, ro = 0;
496
497         /*
498          * The futex address must be "naturally" aligned.
499          */
500         key->both.offset = address % PAGE_SIZE;
501         if (unlikely((address % sizeof(u32)) != 0))
502                 return -EINVAL;
503         address -= key->both.offset;
504
505         if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
506                 return -EFAULT;
507
508         if (unlikely(should_fail_futex(fshared)))
509                 return -EFAULT;
510
511         /*
512          * PROCESS_PRIVATE futexes are fast.
513          * As the mm cannot disappear under us and the 'key' only needs
514          * virtual address, we dont even have to find the underlying vma.
515          * Note : We do have to check 'uaddr' is a valid user address,
516          *        but access_ok() should be faster than find_vma()
517          */
518         if (!fshared) {
519                 key->private.mm = mm;
520                 key->private.address = address;
521                 get_futex_key_refs(key);  /* implies smp_mb(); (B) */
522                 return 0;
523         }
524
525 again:
526         /* Ignore any VERIFY_READ mapping (futex common case) */
527         if (unlikely(should_fail_futex(fshared)))
528                 return -EFAULT;
529
530         err = get_user_pages_fast(address, 1, 1, &page);
531         /*
532          * If write access is not required (eg. FUTEX_WAIT), try
533          * and get read-only access.
534          */
535         if (err == -EFAULT && rw == VERIFY_READ) {
536                 err = get_user_pages_fast(address, 1, 0, &page);
537                 ro = 1;
538         }
539         if (err < 0)
540                 return err;
541         else
542                 err = 0;
543
544         /*
545          * The treatment of mapping from this point on is critical. The page
546          * lock protects many things but in this context the page lock
547          * stabilizes mapping, prevents inode freeing in the shared
548          * file-backed region case and guards against movement to swap cache.
549          *
550          * Strictly speaking the page lock is not needed in all cases being
551          * considered here and page lock forces unnecessarily serialization
552          * From this point on, mapping will be re-verified if necessary and
553          * page lock will be acquired only if it is unavoidable
554          *
555          * Mapping checks require the head page for any compound page so the
556          * head page and mapping is looked up now. For anonymous pages, it
557          * does not matter if the page splits in the future as the key is
558          * based on the address. For filesystem-backed pages, the tail is
559          * required as the index of the page determines the key. For
560          * base pages, there is no tail page and tail == page.
561          */
562         tail = page;
563         page = compound_head(page);
564         mapping = READ_ONCE(page->mapping);
565
566         /*
567          * If page->mapping is NULL, then it cannot be a PageAnon
568          * page; but it might be the ZERO_PAGE or in the gate area or
569          * in a special mapping (all cases which we are happy to fail);
570          * or it may have been a good file page when get_user_pages_fast
571          * found it, but truncated or holepunched or subjected to
572          * invalidate_complete_page2 before we got the page lock (also
573          * cases which we are happy to fail).  And we hold a reference,
574          * so refcount care in invalidate_complete_page's remove_mapping
575          * prevents drop_caches from setting mapping to NULL beneath us.
576          *
577          * The case we do have to guard against is when memory pressure made
578          * shmem_writepage move it from filecache to swapcache beneath us:
579          * an unlikely race, but we do need to retry for page->mapping.
580          */
581         if (unlikely(!mapping)) {
582                 int shmem_swizzled;
583
584                 /*
585                  * Page lock is required to identify which special case above
586                  * applies. If this is really a shmem page then the page lock
587                  * will prevent unexpected transitions.
588                  */
589                 lock_page(page);
590                 shmem_swizzled = PageSwapCache(page) || page->mapping;
591                 unlock_page(page);
592                 put_page(page);
593
594                 if (shmem_swizzled)
595                         goto again;
596
597                 return -EFAULT;
598         }
599
600         /*
601          * Private mappings are handled in a simple way.
602          *
603          * If the futex key is stored on an anonymous page, then the associated
604          * object is the mm which is implicitly pinned by the calling process.
605          *
606          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
607          * it's a read-only handle, it's expected that futexes attach to
608          * the object not the particular process.
609          */
610         if (PageAnon(page)) {
611                 /*
612                  * A RO anonymous page will never change and thus doesn't make
613                  * sense for futex operations.
614                  */
615                 if (unlikely(should_fail_futex(fshared)) || ro) {
616                         err = -EFAULT;
617                         goto out;
618                 }
619
620                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
621                 key->private.mm = mm;
622                 key->private.address = address;
623
624                 get_futex_key_refs(key); /* implies smp_mb(); (B) */
625
626         } else {
627                 struct inode *inode;
628
629                 /*
630                  * The associated futex object in this case is the inode and
631                  * the page->mapping must be traversed. Ordinarily this should
632                  * be stabilised under page lock but it's not strictly
633                  * necessary in this case as we just want to pin the inode, not
634                  * update the radix tree or anything like that.
635                  *
636                  * The RCU read lock is taken as the inode is finally freed
637                  * under RCU. If the mapping still matches expectations then the
638                  * mapping->host can be safely accessed as being a valid inode.
639                  */
640                 rcu_read_lock();
641
642                 if (READ_ONCE(page->mapping) != mapping) {
643                         rcu_read_unlock();
644                         put_page(page);
645
646                         goto again;
647                 }
648
649                 inode = READ_ONCE(mapping->host);
650                 if (!inode) {
651                         rcu_read_unlock();
652                         put_page(page);
653
654                         goto again;
655                 }
656
657                 /*
658                  * Take a reference unless it is about to be freed. Previously
659                  * this reference was taken by ihold under the page lock
660                  * pinning the inode in place so i_lock was unnecessary. The
661                  * only way for this check to fail is if the inode was
662                  * truncated in parallel so warn for now if this happens.
663                  *
664                  * We are not calling into get_futex_key_refs() in file-backed
665                  * cases, therefore a successful atomic_inc return below will
666                  * guarantee that get_futex_key() will still imply smp_mb(); (B).
667                  */
668                 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
669                         rcu_read_unlock();
670                         put_page(page);
671
672                         goto again;
673                 }
674
675                 /* Should be impossible but lets be paranoid for now */
676                 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
677                         err = -EFAULT;
678                         rcu_read_unlock();
679                         iput(inode);
680
681                         goto out;
682                 }
683
684                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
685                 key->shared.inode = inode;
686                 key->shared.pgoff = basepage_index(tail);
687                 rcu_read_unlock();
688         }
689
690 out:
691         put_page(page);
692         return err;
693 }
694
695 static inline void put_futex_key(union futex_key *key)
696 {
697         drop_futex_key_refs(key);
698 }
699
700 /**
701  * fault_in_user_writeable() - Fault in user address and verify RW access
702  * @uaddr:      pointer to faulting user space address
703  *
704  * Slow path to fixup the fault we just took in the atomic write
705  * access to @uaddr.
706  *
707  * We have no generic implementation of a non-destructive write to the
708  * user address. We know that we faulted in the atomic pagefault
709  * disabled section so we can as well avoid the #PF overhead by
710  * calling get_user_pages() right away.
711  */
712 static int fault_in_user_writeable(u32 __user *uaddr)
713 {
714         struct mm_struct *mm = current->mm;
715         int ret;
716
717         down_read(&mm->mmap_sem);
718         ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
719                                FAULT_FLAG_WRITE, NULL);
720         up_read(&mm->mmap_sem);
721
722         return ret < 0 ? ret : 0;
723 }
724
725 /**
726  * futex_top_waiter() - Return the highest priority waiter on a futex
727  * @hb:         the hash bucket the futex_q's reside in
728  * @key:        the futex key (to distinguish it from other futex futex_q's)
729  *
730  * Must be called with the hb lock held.
731  */
732 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
733                                         union futex_key *key)
734 {
735         struct futex_q *this;
736
737         plist_for_each_entry(this, &hb->chain, list) {
738                 if (match_futex(&this->key, key))
739                         return this;
740         }
741         return NULL;
742 }
743
744 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
745                                       u32 uval, u32 newval)
746 {
747         int ret;
748
749         pagefault_disable();
750         ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
751         pagefault_enable();
752
753         return ret;
754 }
755
756 static int get_futex_value_locked(u32 *dest, u32 __user *from)
757 {
758         int ret;
759
760         pagefault_disable();
761         ret = __get_user(*dest, from);
762         pagefault_enable();
763
764         return ret ? -EFAULT : 0;
765 }
766
767
768 /*
769  * PI code:
770  */
771 static int refill_pi_state_cache(void)
772 {
773         struct futex_pi_state *pi_state;
774
775         if (likely(current->pi_state_cache))
776                 return 0;
777
778         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
779
780         if (!pi_state)
781                 return -ENOMEM;
782
783         INIT_LIST_HEAD(&pi_state->list);
784         /* pi_mutex gets initialized later */
785         pi_state->owner = NULL;
786         atomic_set(&pi_state->refcount, 1);
787         pi_state->key = FUTEX_KEY_INIT;
788
789         current->pi_state_cache = pi_state;
790
791         return 0;
792 }
793
794 static struct futex_pi_state * alloc_pi_state(void)
795 {
796         struct futex_pi_state *pi_state = current->pi_state_cache;
797
798         WARN_ON(!pi_state);
799         current->pi_state_cache = NULL;
800
801         return pi_state;
802 }
803
804 /*
805  * Drops a reference to the pi_state object and frees or caches it
806  * when the last reference is gone.
807  *
808  * Must be called with the hb lock held.
809  */
810 static void put_pi_state(struct futex_pi_state *pi_state)
811 {
812         if (!pi_state)
813                 return;
814
815         if (!atomic_dec_and_test(&pi_state->refcount))
816                 return;
817
818         /*
819          * If pi_state->owner is NULL, the owner is most probably dying
820          * and has cleaned up the pi_state already
821          */
822         if (pi_state->owner) {
823                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
824                 list_del_init(&pi_state->list);
825                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
826
827                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
828         }
829
830         if (current->pi_state_cache)
831                 kfree(pi_state);
832         else {
833                 /*
834                  * pi_state->list is already empty.
835                  * clear pi_state->owner.
836                  * refcount is at 0 - put it back to 1.
837                  */
838                 pi_state->owner = NULL;
839                 atomic_set(&pi_state->refcount, 1);
840                 current->pi_state_cache = pi_state;
841         }
842 }
843
844 /*
845  * Look up the task based on what TID userspace gave us.
846  * We dont trust it.
847  */
848 static struct task_struct * futex_find_get_task(pid_t pid)
849 {
850         struct task_struct *p;
851
852         rcu_read_lock();
853         p = find_task_by_vpid(pid);
854         if (p)
855                 get_task_struct(p);
856
857         rcu_read_unlock();
858
859         return p;
860 }
861
862 /*
863  * This task is holding PI mutexes at exit time => bad.
864  * Kernel cleans up PI-state, but userspace is likely hosed.
865  * (Robust-futex cleanup is separate and might save the day for userspace.)
866  */
867 void exit_pi_state_list(struct task_struct *curr)
868 {
869         struct list_head *next, *head = &curr->pi_state_list;
870         struct futex_pi_state *pi_state;
871         struct futex_hash_bucket *hb;
872         union futex_key key = FUTEX_KEY_INIT;
873
874         if (!futex_cmpxchg_enabled)
875                 return;
876         /*
877          * We are a ZOMBIE and nobody can enqueue itself on
878          * pi_state_list anymore, but we have to be careful
879          * versus waiters unqueueing themselves:
880          */
881         raw_spin_lock_irq(&curr->pi_lock);
882         while (!list_empty(head)) {
883
884                 next = head->next;
885                 pi_state = list_entry(next, struct futex_pi_state, list);
886                 key = pi_state->key;
887                 hb = hash_futex(&key);
888                 raw_spin_unlock_irq(&curr->pi_lock);
889
890                 spin_lock(&hb->lock);
891
892                 raw_spin_lock_irq(&curr->pi_lock);
893                 /*
894                  * We dropped the pi-lock, so re-check whether this
895                  * task still owns the PI-state:
896                  */
897                 if (head->next != next) {
898                         spin_unlock(&hb->lock);
899                         continue;
900                 }
901
902                 WARN_ON(pi_state->owner != curr);
903                 WARN_ON(list_empty(&pi_state->list));
904                 list_del_init(&pi_state->list);
905                 pi_state->owner = NULL;
906                 raw_spin_unlock_irq(&curr->pi_lock);
907
908                 rt_mutex_unlock(&pi_state->pi_mutex);
909
910                 spin_unlock(&hb->lock);
911
912                 raw_spin_lock_irq(&curr->pi_lock);
913         }
914         raw_spin_unlock_irq(&curr->pi_lock);
915 }
916
917 /*
918  * We need to check the following states:
919  *
920  *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
921  *
922  * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
923  * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
924  *
925  * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
926  *
927  * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
928  * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
929  *
930  * [6]  Found  | Found    | task      | 0         | 1      | Valid
931  *
932  * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
933  *
934  * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
935  * [9]  Found  | Found    | task      | 0         | 0      | Invalid
936  * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
937  *
938  * [1]  Indicates that the kernel can acquire the futex atomically. We
939  *      came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
940  *
941  * [2]  Valid, if TID does not belong to a kernel thread. If no matching
942  *      thread is found then it indicates that the owner TID has died.
943  *
944  * [3]  Invalid. The waiter is queued on a non PI futex
945  *
946  * [4]  Valid state after exit_robust_list(), which sets the user space
947  *      value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
948  *
949  * [5]  The user space value got manipulated between exit_robust_list()
950  *      and exit_pi_state_list()
951  *
952  * [6]  Valid state after exit_pi_state_list() which sets the new owner in
953  *      the pi_state but cannot access the user space value.
954  *
955  * [7]  pi_state->owner can only be NULL when the OWNER_DIED bit is set.
956  *
957  * [8]  Owner and user space value match
958  *
959  * [9]  There is no transient state which sets the user space TID to 0
960  *      except exit_robust_list(), but this is indicated by the
961  *      FUTEX_OWNER_DIED bit. See [4]
962  *
963  * [10] There is no transient state which leaves owner and user space
964  *      TID out of sync.
965  */
966
967 /*
968  * Validate that the existing waiter has a pi_state and sanity check
969  * the pi_state against the user space value. If correct, attach to
970  * it.
971  */
972 static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
973                               struct futex_pi_state **ps)
974 {
975         pid_t pid = uval & FUTEX_TID_MASK;
976
977         /*
978          * Userspace might have messed up non-PI and PI futexes [3]
979          */
980         if (unlikely(!pi_state))
981                 return -EINVAL;
982
983         WARN_ON(!atomic_read(&pi_state->refcount));
984
985         /*
986          * Handle the owner died case:
987          */
988         if (uval & FUTEX_OWNER_DIED) {
989                 /*
990                  * exit_pi_state_list sets owner to NULL and wakes the
991                  * topmost waiter. The task which acquires the
992                  * pi_state->rt_mutex will fixup owner.
993                  */
994                 if (!pi_state->owner) {
995                         /*
996                          * No pi state owner, but the user space TID
997                          * is not 0. Inconsistent state. [5]
998                          */
999                         if (pid)
1000                                 return -EINVAL;
1001                         /*
1002                          * Take a ref on the state and return success. [4]
1003                          */
1004                         goto out_state;
1005                 }
1006
1007                 /*
1008                  * If TID is 0, then either the dying owner has not
1009                  * yet executed exit_pi_state_list() or some waiter
1010                  * acquired the rtmutex in the pi state, but did not
1011                  * yet fixup the TID in user space.
1012                  *
1013                  * Take a ref on the state and return success. [6]
1014                  */
1015                 if (!pid)
1016                         goto out_state;
1017         } else {
1018                 /*
1019                  * If the owner died bit is not set, then the pi_state
1020                  * must have an owner. [7]
1021                  */
1022                 if (!pi_state->owner)
1023                         return -EINVAL;
1024         }
1025
1026         /*
1027          * Bail out if user space manipulated the futex value. If pi
1028          * state exists then the owner TID must be the same as the
1029          * user space TID. [9/10]
1030          */
1031         if (pid != task_pid_vnr(pi_state->owner))
1032                 return -EINVAL;
1033 out_state:
1034         atomic_inc(&pi_state->refcount);
1035         *ps = pi_state;
1036         return 0;
1037 }
1038
1039 /*
1040  * Lookup the task for the TID provided from user space and attach to
1041  * it after doing proper sanity checks.
1042  */
1043 static int attach_to_pi_owner(u32 uval, union futex_key *key,
1044                               struct futex_pi_state **ps)
1045 {
1046         pid_t pid = uval & FUTEX_TID_MASK;
1047         struct futex_pi_state *pi_state;
1048         struct task_struct *p;
1049
1050         /*
1051          * We are the first waiter - try to look up the real owner and attach
1052          * the new pi_state to it, but bail out when TID = 0 [1]
1053          */
1054         if (!pid)
1055                 return -ESRCH;
1056         p = futex_find_get_task(pid);
1057         if (!p)
1058                 return -ESRCH;
1059
1060         if (unlikely(p->flags & PF_KTHREAD)) {
1061                 put_task_struct(p);
1062                 return -EPERM;
1063         }
1064
1065         /*
1066          * We need to look at the task state flags to figure out,
1067          * whether the task is exiting. To protect against the do_exit
1068          * change of the task flags, we do this protected by
1069          * p->pi_lock:
1070          */
1071         raw_spin_lock_irq(&p->pi_lock);
1072         if (unlikely(p->flags & PF_EXITING)) {
1073                 /*
1074                  * The task is on the way out. When PF_EXITPIDONE is
1075                  * set, we know that the task has finished the
1076                  * cleanup:
1077                  */
1078                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1079
1080                 raw_spin_unlock_irq(&p->pi_lock);
1081                 put_task_struct(p);
1082                 return ret;
1083         }
1084
1085         /*
1086          * No existing pi state. First waiter. [2]
1087          */
1088         pi_state = alloc_pi_state();
1089
1090         /*
1091          * Initialize the pi_mutex in locked state and make @p
1092          * the owner of it:
1093          */
1094         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1095
1096         /* Store the key for possible exit cleanups: */
1097         pi_state->key = *key;
1098
1099         WARN_ON(!list_empty(&pi_state->list));
1100         list_add(&pi_state->list, &p->pi_state_list);
1101         pi_state->owner = p;
1102         raw_spin_unlock_irq(&p->pi_lock);
1103
1104         put_task_struct(p);
1105
1106         *ps = pi_state;
1107
1108         return 0;
1109 }
1110
1111 static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1112                            union futex_key *key, struct futex_pi_state **ps)
1113 {
1114         struct futex_q *match = futex_top_waiter(hb, key);
1115
1116         /*
1117          * If there is a waiter on that futex, validate it and
1118          * attach to the pi_state when the validation succeeds.
1119          */
1120         if (match)
1121                 return attach_to_pi_state(uval, match->pi_state, ps);
1122
1123         /*
1124          * We are the first waiter - try to look up the owner based on
1125          * @uval and attach to it.
1126          */
1127         return attach_to_pi_owner(uval, key, ps);
1128 }
1129
1130 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1131 {
1132         u32 uninitialized_var(curval);
1133
1134         if (unlikely(should_fail_futex(true)))
1135                 return -EFAULT;
1136
1137         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1138                 return -EFAULT;
1139
1140         /*If user space value changed, let the caller retry */
1141         return curval != uval ? -EAGAIN : 0;
1142 }
1143
1144 /**
1145  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1146  * @uaddr:              the pi futex user address
1147  * @hb:                 the pi futex hash bucket
1148  * @key:                the futex key associated with uaddr and hb
1149  * @ps:                 the pi_state pointer where we store the result of the
1150  *                      lookup
1151  * @task:               the task to perform the atomic lock work for.  This will
1152  *                      be "current" except in the case of requeue pi.
1153  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1154  *
1155  * Return:
1156  *  0 - ready to wait;
1157  *  1 - acquired the lock;
1158  * <0 - error
1159  *
1160  * The hb->lock and futex_key refs shall be held by the caller.
1161  */
1162 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1163                                 union futex_key *key,
1164                                 struct futex_pi_state **ps,
1165                                 struct task_struct *task, int set_waiters)
1166 {
1167         u32 uval, newval, vpid = task_pid_vnr(task);
1168         struct futex_q *match;
1169         int ret;
1170
1171         /*
1172          * Read the user space value first so we can validate a few
1173          * things before proceeding further.
1174          */
1175         if (get_futex_value_locked(&uval, uaddr))
1176                 return -EFAULT;
1177
1178         if (unlikely(should_fail_futex(true)))
1179                 return -EFAULT;
1180
1181         /*
1182          * Detect deadlocks.
1183          */
1184         if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1185                 return -EDEADLK;
1186
1187         if ((unlikely(should_fail_futex(true))))
1188                 return -EDEADLK;
1189
1190         /*
1191          * Lookup existing state first. If it exists, try to attach to
1192          * its pi_state.
1193          */
1194         match = futex_top_waiter(hb, key);
1195         if (match)
1196                 return attach_to_pi_state(uval, match->pi_state, ps);
1197
1198         /*
1199          * No waiter and user TID is 0. We are here because the
1200          * waiters or the owner died bit is set or called from
1201          * requeue_cmp_pi or for whatever reason something took the
1202          * syscall.
1203          */
1204         if (!(uval & FUTEX_TID_MASK)) {
1205                 /*
1206                  * We take over the futex. No other waiters and the user space
1207                  * TID is 0. We preserve the owner died bit.
1208                  */
1209                 newval = uval & FUTEX_OWNER_DIED;
1210                 newval |= vpid;
1211
1212                 /* The futex requeue_pi code can enforce the waiters bit */
1213                 if (set_waiters)
1214                         newval |= FUTEX_WAITERS;
1215
1216                 ret = lock_pi_update_atomic(uaddr, uval, newval);
1217                 /* If the take over worked, return 1 */
1218                 return ret < 0 ? ret : 1;
1219         }
1220
1221         /*
1222          * First waiter. Set the waiters bit before attaching ourself to
1223          * the owner. If owner tries to unlock, it will be forced into
1224          * the kernel and blocked on hb->lock.
1225          */
1226         newval = uval | FUTEX_WAITERS;
1227         ret = lock_pi_update_atomic(uaddr, uval, newval);
1228         if (ret)
1229                 return ret;
1230         /*
1231          * If the update of the user space value succeeded, we try to
1232          * attach to the owner. If that fails, no harm done, we only
1233          * set the FUTEX_WAITERS bit in the user space variable.
1234          */
1235         return attach_to_pi_owner(uval, key, ps);
1236 }
1237
1238 /**
1239  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1240  * @q:  The futex_q to unqueue
1241  *
1242  * The q->lock_ptr must not be NULL and must be held by the caller.
1243  */
1244 static void __unqueue_futex(struct futex_q *q)
1245 {
1246         struct futex_hash_bucket *hb;
1247
1248         if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1249             || WARN_ON(plist_node_empty(&q->list)))
1250                 return;
1251
1252         hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1253         plist_del(&q->list, &hb->chain);
1254         hb_waiters_dec(hb);
1255 }
1256
1257 /*
1258  * The hash bucket lock must be held when this is called.
1259  * Afterwards, the futex_q must not be accessed. Callers
1260  * must ensure to later call wake_up_q() for the actual
1261  * wakeups to occur.
1262  */
1263 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1264 {
1265         struct task_struct *p = q->task;
1266
1267         if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1268                 return;
1269
1270         /*
1271          * Queue the task for later wakeup for after we've released
1272          * the hb->lock. wake_q_add() grabs reference to p.
1273          */
1274         wake_q_add(wake_q, p);
1275         __unqueue_futex(q);
1276         /*
1277          * The waiting task can free the futex_q as soon as
1278          * q->lock_ptr = NULL is written, without taking any locks. A
1279          * memory barrier is required here to prevent the following
1280          * store to lock_ptr from getting ahead of the plist_del.
1281          */
1282         smp_wmb();
1283         q->lock_ptr = NULL;
1284 }
1285
1286 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1287                          struct futex_hash_bucket *hb)
1288 {
1289         struct task_struct *new_owner;
1290         struct futex_pi_state *pi_state = this->pi_state;
1291         u32 uninitialized_var(curval), newval;
1292         WAKE_Q(wake_q);
1293         bool deboost;
1294         int ret = 0;
1295
1296         if (!pi_state)
1297                 return -EINVAL;
1298
1299         /*
1300          * If current does not own the pi_state then the futex is
1301          * inconsistent and user space fiddled with the futex value.
1302          */
1303         if (pi_state->owner != current)
1304                 return -EINVAL;
1305
1306         raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1307         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1308
1309         /*
1310          * It is possible that the next waiter (the one that brought
1311          * this owner to the kernel) timed out and is no longer
1312          * waiting on the lock.
1313          */
1314         if (!new_owner)
1315                 new_owner = this->task;
1316
1317         /*
1318          * We pass it to the next owner. The WAITERS bit is always
1319          * kept enabled while there is PI state around. We cleanup the
1320          * owner died bit, because we are the owner.
1321          */
1322         newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1323
1324         if (unlikely(should_fail_futex(true)))
1325                 ret = -EFAULT;
1326
1327         if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1328                 ret = -EFAULT;
1329         } else if (curval != uval) {
1330                 /*
1331                  * If a unconditional UNLOCK_PI operation (user space did not
1332                  * try the TID->0 transition) raced with a waiter setting the
1333                  * FUTEX_WAITERS flag between get_user() and locking the hash
1334                  * bucket lock, retry the operation.
1335                  */
1336                 if ((FUTEX_TID_MASK & curval) == uval)
1337                         ret = -EAGAIN;
1338                 else
1339                         ret = -EINVAL;
1340         }
1341         if (ret) {
1342                 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1343                 return ret;
1344         }
1345
1346         raw_spin_lock(&pi_state->owner->pi_lock);
1347         WARN_ON(list_empty(&pi_state->list));
1348         list_del_init(&pi_state->list);
1349         raw_spin_unlock(&pi_state->owner->pi_lock);
1350
1351         raw_spin_lock(&new_owner->pi_lock);
1352         WARN_ON(!list_empty(&pi_state->list));
1353         list_add(&pi_state->list, &new_owner->pi_state_list);
1354         pi_state->owner = new_owner;
1355         raw_spin_unlock(&new_owner->pi_lock);
1356
1357         raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1358
1359         deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1360
1361         /*
1362          * First unlock HB so the waiter does not spin on it once he got woken
1363          * up. Second wake up the waiter before the priority is adjusted. If we
1364          * deboost first (and lose our higher priority), then the task might get
1365          * scheduled away before the wake up can take place.
1366          */
1367         spin_unlock(&hb->lock);
1368         wake_up_q(&wake_q);
1369         if (deboost)
1370                 rt_mutex_adjust_prio(current);
1371
1372         return 0;
1373 }
1374
1375 /*
1376  * Express the locking dependencies for lockdep:
1377  */
1378 static inline void
1379 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1380 {
1381         if (hb1 <= hb2) {
1382                 spin_lock(&hb1->lock);
1383                 if (hb1 < hb2)
1384                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1385         } else { /* hb1 > hb2 */
1386                 spin_lock(&hb2->lock);
1387                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1388         }
1389 }
1390
1391 static inline void
1392 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1393 {
1394         spin_unlock(&hb1->lock);
1395         if (hb1 != hb2)
1396                 spin_unlock(&hb2->lock);
1397 }
1398
1399 /*
1400  * Wake up waiters matching bitset queued on this futex (uaddr).
1401  */
1402 static int
1403 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1404 {
1405         struct futex_hash_bucket *hb;
1406         struct futex_q *this, *next;
1407         union futex_key key = FUTEX_KEY_INIT;
1408         int ret;
1409         WAKE_Q(wake_q);
1410
1411         if (!bitset)
1412                 return -EINVAL;
1413
1414         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1415         if (unlikely(ret != 0))
1416                 goto out;
1417
1418         hb = hash_futex(&key);
1419
1420         /* Make sure we really have tasks to wakeup */
1421         if (!hb_waiters_pending(hb))
1422                 goto out_put_key;
1423
1424         spin_lock(&hb->lock);
1425
1426         plist_for_each_entry_safe(this, next, &hb->chain, list) {
1427                 if (match_futex (&this->key, &key)) {
1428                         if (this->pi_state || this->rt_waiter) {
1429                                 ret = -EINVAL;
1430                                 break;
1431                         }
1432
1433                         /* Check if one of the bits is set in both bitsets */
1434                         if (!(this->bitset & bitset))
1435                                 continue;
1436
1437                         mark_wake_futex(&wake_q, this);
1438                         if (++ret >= nr_wake)
1439                                 break;
1440                 }
1441         }
1442
1443         spin_unlock(&hb->lock);
1444         wake_up_q(&wake_q);
1445 out_put_key:
1446         put_futex_key(&key);
1447 out:
1448         return ret;
1449 }
1450
1451 /*
1452  * Wake up all waiters hashed on the physical page that is mapped
1453  * to this virtual address:
1454  */
1455 static int
1456 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1457               int nr_wake, int nr_wake2, int op)
1458 {
1459         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1460         struct futex_hash_bucket *hb1, *hb2;
1461         struct futex_q *this, *next;
1462         int ret, op_ret;
1463         WAKE_Q(wake_q);
1464
1465 retry:
1466         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1467         if (unlikely(ret != 0))
1468                 goto out;
1469         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1470         if (unlikely(ret != 0))
1471                 goto out_put_key1;
1472
1473         hb1 = hash_futex(&key1);
1474         hb2 = hash_futex(&key2);
1475
1476 retry_private:
1477         double_lock_hb(hb1, hb2);
1478         op_ret = futex_atomic_op_inuser(op, uaddr2);
1479         if (unlikely(op_ret < 0)) {
1480
1481                 double_unlock_hb(hb1, hb2);
1482
1483 #ifndef CONFIG_MMU
1484                 /*
1485                  * we don't get EFAULT from MMU faults if we don't have an MMU,
1486                  * but we might get them from range checking
1487                  */
1488                 ret = op_ret;
1489                 goto out_put_keys;
1490 #endif
1491
1492                 if (unlikely(op_ret != -EFAULT)) {
1493                         ret = op_ret;
1494                         goto out_put_keys;
1495                 }
1496
1497                 ret = fault_in_user_writeable(uaddr2);
1498                 if (ret)
1499                         goto out_put_keys;
1500
1501                 if (!(flags & FLAGS_SHARED))
1502                         goto retry_private;
1503
1504                 put_futex_key(&key2);
1505                 put_futex_key(&key1);
1506                 goto retry;
1507         }
1508
1509         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1510                 if (match_futex (&this->key, &key1)) {
1511                         if (this->pi_state || this->rt_waiter) {
1512                                 ret = -EINVAL;
1513                                 goto out_unlock;
1514                         }
1515                         mark_wake_futex(&wake_q, this);
1516                         if (++ret >= nr_wake)
1517                                 break;
1518                 }
1519         }
1520
1521         if (op_ret > 0) {
1522                 op_ret = 0;
1523                 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1524                         if (match_futex (&this->key, &key2)) {
1525                                 if (this->pi_state || this->rt_waiter) {
1526                                         ret = -EINVAL;
1527                                         goto out_unlock;
1528                                 }
1529                                 mark_wake_futex(&wake_q, this);
1530                                 if (++op_ret >= nr_wake2)
1531                                         break;
1532                         }
1533                 }
1534                 ret += op_ret;
1535         }
1536
1537 out_unlock:
1538         double_unlock_hb(hb1, hb2);
1539         wake_up_q(&wake_q);
1540 out_put_keys:
1541         put_futex_key(&key2);
1542 out_put_key1:
1543         put_futex_key(&key1);
1544 out:
1545         return ret;
1546 }
1547
1548 /**
1549  * requeue_futex() - Requeue a futex_q from one hb to another
1550  * @q:          the futex_q to requeue
1551  * @hb1:        the source hash_bucket
1552  * @hb2:        the target hash_bucket
1553  * @key2:       the new key for the requeued futex_q
1554  */
1555 static inline
1556 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1557                    struct futex_hash_bucket *hb2, union futex_key *key2)
1558 {
1559
1560         /*
1561          * If key1 and key2 hash to the same bucket, no need to
1562          * requeue.
1563          */
1564         if (likely(&hb1->chain != &hb2->chain)) {
1565                 plist_del(&q->list, &hb1->chain);
1566                 hb_waiters_dec(hb1);
1567                 hb_waiters_inc(hb2);
1568                 plist_add(&q->list, &hb2->chain);
1569                 q->lock_ptr = &hb2->lock;
1570         }
1571         get_futex_key_refs(key2);
1572         q->key = *key2;
1573 }
1574
1575 /**
1576  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1577  * @q:          the futex_q
1578  * @key:        the key of the requeue target futex
1579  * @hb:         the hash_bucket of the requeue target futex
1580  *
1581  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1582  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1583  * to the requeue target futex so the waiter can detect the wakeup on the right
1584  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1585  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1586  * to protect access to the pi_state to fixup the owner later.  Must be called
1587  * with both q->lock_ptr and hb->lock held.
1588  */
1589 static inline
1590 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1591                            struct futex_hash_bucket *hb)
1592 {
1593         get_futex_key_refs(key);
1594         q->key = *key;
1595
1596         __unqueue_futex(q);
1597
1598         WARN_ON(!q->rt_waiter);
1599         q->rt_waiter = NULL;
1600
1601         q->lock_ptr = &hb->lock;
1602
1603         wake_up_state(q->task, TASK_NORMAL);
1604 }
1605
1606 /**
1607  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1608  * @pifutex:            the user address of the to futex
1609  * @hb1:                the from futex hash bucket, must be locked by the caller
1610  * @hb2:                the to futex hash bucket, must be locked by the caller
1611  * @key1:               the from futex key
1612  * @key2:               the to futex key
1613  * @ps:                 address to store the pi_state pointer
1614  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1615  *
1616  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1617  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1618  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1619  * hb1 and hb2 must be held by the caller.
1620  *
1621  * Return:
1622  *  0 - failed to acquire the lock atomically;
1623  * >0 - acquired the lock, return value is vpid of the top_waiter
1624  * <0 - error
1625  */
1626 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1627                                  struct futex_hash_bucket *hb1,
1628                                  struct futex_hash_bucket *hb2,
1629                                  union futex_key *key1, union futex_key *key2,
1630                                  struct futex_pi_state **ps, int set_waiters)
1631 {
1632         struct futex_q *top_waiter = NULL;
1633         u32 curval;
1634         int ret, vpid;
1635
1636         if (get_futex_value_locked(&curval, pifutex))
1637                 return -EFAULT;
1638
1639         if (unlikely(should_fail_futex(true)))
1640                 return -EFAULT;
1641
1642         /*
1643          * Find the top_waiter and determine if there are additional waiters.
1644          * If the caller intends to requeue more than 1 waiter to pifutex,
1645          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1646          * as we have means to handle the possible fault.  If not, don't set
1647          * the bit unecessarily as it will force the subsequent unlock to enter
1648          * the kernel.
1649          */
1650         top_waiter = futex_top_waiter(hb1, key1);
1651
1652         /* There are no waiters, nothing for us to do. */
1653         if (!top_waiter)
1654                 return 0;
1655
1656         /* Ensure we requeue to the expected futex. */
1657         if (!match_futex(top_waiter->requeue_pi_key, key2))
1658                 return -EINVAL;
1659
1660         /*
1661          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1662          * the contended case or if set_waiters is 1.  The pi_state is returned
1663          * in ps in contended cases.
1664          */
1665         vpid = task_pid_vnr(top_waiter->task);
1666         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1667                                    set_waiters);
1668         if (ret == 1) {
1669                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1670                 return vpid;
1671         }
1672         return ret;
1673 }
1674
1675 /**
1676  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1677  * @uaddr1:     source futex user address
1678  * @flags:      futex flags (FLAGS_SHARED, etc.)
1679  * @uaddr2:     target futex user address
1680  * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1681  * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1682  * @cmpval:     @uaddr1 expected value (or %NULL)
1683  * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1684  *              pi futex (pi to pi requeue is not supported)
1685  *
1686  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1687  * uaddr2 atomically on behalf of the top waiter.
1688  *
1689  * Return:
1690  * >=0 - on success, the number of tasks requeued or woken;
1691  *  <0 - on error
1692  */
1693 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1694                          u32 __user *uaddr2, int nr_wake, int nr_requeue,
1695                          u32 *cmpval, int requeue_pi)
1696 {
1697         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1698         int drop_count = 0, task_count = 0, ret;
1699         struct futex_pi_state *pi_state = NULL;
1700         struct futex_hash_bucket *hb1, *hb2;
1701         struct futex_q *this, *next;
1702         WAKE_Q(wake_q);
1703
1704         if (requeue_pi) {
1705                 /*
1706                  * Requeue PI only works on two distinct uaddrs. This
1707                  * check is only valid for private futexes. See below.
1708                  */
1709                 if (uaddr1 == uaddr2)
1710                         return -EINVAL;
1711
1712                 /*
1713                  * requeue_pi requires a pi_state, try to allocate it now
1714                  * without any locks in case it fails.
1715                  */
1716                 if (refill_pi_state_cache())
1717                         return -ENOMEM;
1718                 /*
1719                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1720                  * + nr_requeue, since it acquires the rt_mutex prior to
1721                  * returning to userspace, so as to not leave the rt_mutex with
1722                  * waiters and no owner.  However, second and third wake-ups
1723                  * cannot be predicted as they involve race conditions with the
1724                  * first wake and a fault while looking up the pi_state.  Both
1725                  * pthread_cond_signal() and pthread_cond_broadcast() should
1726                  * use nr_wake=1.
1727                  */
1728                 if (nr_wake != 1)
1729                         return -EINVAL;
1730         }
1731
1732 retry:
1733         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1734         if (unlikely(ret != 0))
1735                 goto out;
1736         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1737                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1738         if (unlikely(ret != 0))
1739                 goto out_put_key1;
1740
1741         /*
1742          * The check above which compares uaddrs is not sufficient for
1743          * shared futexes. We need to compare the keys:
1744          */
1745         if (requeue_pi && match_futex(&key1, &key2)) {
1746                 ret = -EINVAL;
1747                 goto out_put_keys;
1748         }
1749
1750         hb1 = hash_futex(&key1);
1751         hb2 = hash_futex(&key2);
1752
1753 retry_private:
1754         hb_waiters_inc(hb2);
1755         double_lock_hb(hb1, hb2);
1756
1757         if (likely(cmpval != NULL)) {
1758                 u32 curval;
1759
1760                 ret = get_futex_value_locked(&curval, uaddr1);
1761
1762                 if (unlikely(ret)) {
1763                         double_unlock_hb(hb1, hb2);
1764                         hb_waiters_dec(hb2);
1765
1766                         ret = get_user(curval, uaddr1);
1767                         if (ret)
1768                                 goto out_put_keys;
1769
1770                         if (!(flags & FLAGS_SHARED))
1771                                 goto retry_private;
1772
1773                         put_futex_key(&key2);
1774                         put_futex_key(&key1);
1775                         goto retry;
1776                 }
1777                 if (curval != *cmpval) {
1778                         ret = -EAGAIN;
1779                         goto out_unlock;
1780                 }
1781         }
1782
1783         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1784                 /*
1785                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1786                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1787                  * bit.  We force this here where we are able to easily handle
1788                  * faults rather in the requeue loop below.
1789                  */
1790                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1791                                                  &key2, &pi_state, nr_requeue);
1792
1793                 /*
1794                  * At this point the top_waiter has either taken uaddr2 or is
1795                  * waiting on it.  If the former, then the pi_state will not
1796                  * exist yet, look it up one more time to ensure we have a
1797                  * reference to it. If the lock was taken, ret contains the
1798                  * vpid of the top waiter task.
1799                  * If the lock was not taken, we have pi_state and an initial
1800                  * refcount on it. In case of an error we have nothing.
1801                  */
1802                 if (ret > 0) {
1803                         WARN_ON(pi_state);
1804                         drop_count++;
1805                         task_count++;
1806                         /*
1807                          * If we acquired the lock, then the user space value
1808                          * of uaddr2 should be vpid. It cannot be changed by
1809                          * the top waiter as it is blocked on hb2 lock if it
1810                          * tries to do so. If something fiddled with it behind
1811                          * our back the pi state lookup might unearth it. So
1812                          * we rather use the known value than rereading and
1813                          * handing potential crap to lookup_pi_state.
1814                          *
1815                          * If that call succeeds then we have pi_state and an
1816                          * initial refcount on it.
1817                          */
1818                         ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1819                 }
1820
1821                 switch (ret) {
1822                 case 0:
1823                         /* We hold a reference on the pi state. */
1824                         break;
1825
1826                         /* If the above failed, then pi_state is NULL */
1827                 case -EFAULT:
1828                         double_unlock_hb(hb1, hb2);
1829                         hb_waiters_dec(hb2);
1830                         put_futex_key(&key2);
1831                         put_futex_key(&key1);
1832                         ret = fault_in_user_writeable(uaddr2);
1833                         if (!ret)
1834                                 goto retry;
1835                         goto out;
1836                 case -EAGAIN:
1837                         /*
1838                          * Two reasons for this:
1839                          * - Owner is exiting and we just wait for the
1840                          *   exit to complete.
1841                          * - The user space value changed.
1842                          */
1843                         double_unlock_hb(hb1, hb2);
1844                         hb_waiters_dec(hb2);
1845                         put_futex_key(&key2);
1846                         put_futex_key(&key1);
1847                         cond_resched();
1848                         goto retry;
1849                 default:
1850                         goto out_unlock;
1851                 }
1852         }
1853
1854         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1855                 if (task_count - nr_wake >= nr_requeue)
1856                         break;
1857
1858                 if (!match_futex(&this->key, &key1))
1859                         continue;
1860
1861                 /*
1862                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1863                  * be paired with each other and no other futex ops.
1864                  *
1865                  * We should never be requeueing a futex_q with a pi_state,
1866                  * which is awaiting a futex_unlock_pi().
1867                  */
1868                 if ((requeue_pi && !this->rt_waiter) ||
1869                     (!requeue_pi && this->rt_waiter) ||
1870                     this->pi_state) {
1871                         ret = -EINVAL;
1872                         break;
1873                 }
1874
1875                 /*
1876                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1877                  * lock, we already woke the top_waiter.  If not, it will be
1878                  * woken by futex_unlock_pi().
1879                  */
1880                 if (++task_count <= nr_wake && !requeue_pi) {
1881                         mark_wake_futex(&wake_q, this);
1882                         continue;
1883                 }
1884
1885                 /* Ensure we requeue to the expected futex for requeue_pi. */
1886                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1887                         ret = -EINVAL;
1888                         break;
1889                 }
1890
1891                 /*
1892                  * Requeue nr_requeue waiters and possibly one more in the case
1893                  * of requeue_pi if we couldn't acquire the lock atomically.
1894                  */
1895                 if (requeue_pi) {
1896                         /*
1897                          * Prepare the waiter to take the rt_mutex. Take a
1898                          * refcount on the pi_state and store the pointer in
1899                          * the futex_q object of the waiter.
1900                          */
1901                         atomic_inc(&pi_state->refcount);
1902                         this->pi_state = pi_state;
1903                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1904                                                         this->rt_waiter,
1905                                                         this->task);
1906                         if (ret == 1) {
1907                                 /*
1908                                  * We got the lock. We do neither drop the
1909                                  * refcount on pi_state nor clear
1910                                  * this->pi_state because the waiter needs the
1911                                  * pi_state for cleaning up the user space
1912                                  * value. It will drop the refcount after
1913                                  * doing so.
1914                                  */
1915                                 requeue_pi_wake_futex(this, &key2, hb2);
1916                                 drop_count++;
1917                                 continue;
1918                         } else if (ret) {
1919                                 /*
1920                                  * rt_mutex_start_proxy_lock() detected a
1921                                  * potential deadlock when we tried to queue
1922                                  * that waiter. Drop the pi_state reference
1923                                  * which we took above and remove the pointer
1924                                  * to the state from the waiters futex_q
1925                                  * object.
1926                                  */
1927                                 this->pi_state = NULL;
1928                                 put_pi_state(pi_state);
1929                                 /*
1930                                  * We stop queueing more waiters and let user
1931                                  * space deal with the mess.
1932                                  */
1933                                 break;
1934                         }
1935                 }
1936                 requeue_futex(this, hb1, hb2, &key2);
1937                 drop_count++;
1938         }
1939
1940         /*
1941          * We took an extra initial reference to the pi_state either
1942          * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1943          * need to drop it here again.
1944          */
1945         put_pi_state(pi_state);
1946
1947 out_unlock:
1948         double_unlock_hb(hb1, hb2);
1949         wake_up_q(&wake_q);
1950         hb_waiters_dec(hb2);
1951
1952         /*
1953          * drop_futex_key_refs() must be called outside the spinlocks. During
1954          * the requeue we moved futex_q's from the hash bucket at key1 to the
1955          * one at key2 and updated their key pointer.  We no longer need to
1956          * hold the references to key1.
1957          */
1958         while (--drop_count >= 0)
1959                 drop_futex_key_refs(&key1);
1960
1961 out_put_keys:
1962         put_futex_key(&key2);
1963 out_put_key1:
1964         put_futex_key(&key1);
1965 out:
1966         return ret ? ret : task_count;
1967 }
1968
1969 /* The key must be already stored in q->key. */
1970 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1971         __acquires(&hb->lock)
1972 {
1973         struct futex_hash_bucket *hb;
1974
1975         hb = hash_futex(&q->key);
1976
1977         /*
1978          * Increment the counter before taking the lock so that
1979          * a potential waker won't miss a to-be-slept task that is
1980          * waiting for the spinlock. This is safe as all queue_lock()
1981          * users end up calling queue_me(). Similarly, for housekeeping,
1982          * decrement the counter at queue_unlock() when some error has
1983          * occurred and we don't end up adding the task to the list.
1984          */
1985         hb_waiters_inc(hb);
1986
1987         q->lock_ptr = &hb->lock;
1988
1989         spin_lock(&hb->lock); /* implies smp_mb(); (A) */
1990         return hb;
1991 }
1992
1993 static inline void
1994 queue_unlock(struct futex_hash_bucket *hb)
1995         __releases(&hb->lock)
1996 {
1997         spin_unlock(&hb->lock);
1998         hb_waiters_dec(hb);
1999 }
2000
2001 /**
2002  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2003  * @q:  The futex_q to enqueue
2004  * @hb: The destination hash bucket
2005  *
2006  * The hb->lock must be held by the caller, and is released here. A call to
2007  * queue_me() is typically paired with exactly one call to unqueue_me().  The
2008  * exceptions involve the PI related operations, which may use unqueue_me_pi()
2009  * or nothing if the unqueue is done as part of the wake process and the unqueue
2010  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2011  * an example).
2012  */
2013 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2014         __releases(&hb->lock)
2015 {
2016         int prio;
2017
2018         /*
2019          * The priority used to register this element is
2020          * - either the real thread-priority for the real-time threads
2021          * (i.e. threads with a priority lower than MAX_RT_PRIO)
2022          * - or MAX_RT_PRIO for non-RT threads.
2023          * Thus, all RT-threads are woken first in priority order, and
2024          * the others are woken last, in FIFO order.
2025          */
2026         prio = min(current->normal_prio, MAX_RT_PRIO);
2027
2028         plist_node_init(&q->list, prio);
2029         plist_add(&q->list, &hb->chain);
2030         q->task = current;
2031         spin_unlock(&hb->lock);
2032 }
2033
2034 /**
2035  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2036  * @q:  The futex_q to unqueue
2037  *
2038  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2039  * be paired with exactly one earlier call to queue_me().
2040  *
2041  * Return:
2042  *   1 - if the futex_q was still queued (and we removed unqueued it);
2043  *   0 - if the futex_q was already removed by the waking thread
2044  */
2045 static int unqueue_me(struct futex_q *q)
2046 {
2047         spinlock_t *lock_ptr;
2048         int ret = 0;
2049
2050         /* In the common case we don't take the spinlock, which is nice. */
2051 retry:
2052         /*
2053          * q->lock_ptr can change between this read and the following spin_lock.
2054          * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2055          * optimizing lock_ptr out of the logic below.
2056          */
2057         lock_ptr = READ_ONCE(q->lock_ptr);
2058         if (lock_ptr != NULL) {
2059                 spin_lock(lock_ptr);
2060                 /*
2061                  * q->lock_ptr can change between reading it and
2062                  * spin_lock(), causing us to take the wrong lock.  This
2063                  * corrects the race condition.
2064                  *
2065                  * Reasoning goes like this: if we have the wrong lock,
2066                  * q->lock_ptr must have changed (maybe several times)
2067                  * between reading it and the spin_lock().  It can
2068                  * change again after the spin_lock() but only if it was
2069                  * already changed before the spin_lock().  It cannot,
2070                  * however, change back to the original value.  Therefore
2071                  * we can detect whether we acquired the correct lock.
2072                  */
2073                 if (unlikely(lock_ptr != q->lock_ptr)) {
2074                         spin_unlock(lock_ptr);
2075                         goto retry;
2076                 }
2077                 __unqueue_futex(q);
2078
2079                 BUG_ON(q->pi_state);
2080
2081                 spin_unlock(lock_ptr);
2082                 ret = 1;
2083         }
2084
2085         drop_futex_key_refs(&q->key);
2086         return ret;
2087 }
2088
2089 /*
2090  * PI futexes can not be requeued and must remove themself from the
2091  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2092  * and dropped here.
2093  */
2094 static void unqueue_me_pi(struct futex_q *q)
2095         __releases(q->lock_ptr)
2096 {
2097         __unqueue_futex(q);
2098
2099         BUG_ON(!q->pi_state);
2100         put_pi_state(q->pi_state);
2101         q->pi_state = NULL;
2102
2103         spin_unlock(q->lock_ptr);
2104 }
2105
2106 /*
2107  * Fixup the pi_state owner with the new owner.
2108  *
2109  * Must be called with hash bucket lock held and mm->sem held for non
2110  * private futexes.
2111  */
2112 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2113                                 struct task_struct *newowner)
2114 {
2115         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2116         struct futex_pi_state *pi_state = q->pi_state;
2117         struct task_struct *oldowner = pi_state->owner;
2118         u32 uval, uninitialized_var(curval), newval;
2119         int ret;
2120
2121         /* Owner died? */
2122         if (!pi_state->owner)
2123                 newtid |= FUTEX_OWNER_DIED;
2124
2125         /*
2126          * We are here either because we stole the rtmutex from the
2127          * previous highest priority waiter or we are the highest priority
2128          * waiter but failed to get the rtmutex the first time.
2129          * We have to replace the newowner TID in the user space variable.
2130          * This must be atomic as we have to preserve the owner died bit here.
2131          *
2132          * Note: We write the user space value _before_ changing the pi_state
2133          * because we can fault here. Imagine swapped out pages or a fork
2134          * that marked all the anonymous memory readonly for cow.
2135          *
2136          * Modifying pi_state _before_ the user space value would
2137          * leave the pi_state in an inconsistent state when we fault
2138          * here, because we need to drop the hash bucket lock to
2139          * handle the fault. This might be observed in the PID check
2140          * in lookup_pi_state.
2141          */
2142 retry:
2143         if (get_futex_value_locked(&uval, uaddr))
2144                 goto handle_fault;
2145
2146         while (1) {
2147                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2148
2149                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2150                         goto handle_fault;
2151                 if (curval == uval)
2152                         break;
2153                 uval = curval;
2154         }
2155
2156         /*
2157          * We fixed up user space. Now we need to fix the pi_state
2158          * itself.
2159          */
2160         if (pi_state->owner != NULL) {
2161                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2162                 WARN_ON(list_empty(&pi_state->list));
2163                 list_del_init(&pi_state->list);
2164                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2165         }
2166
2167         pi_state->owner = newowner;
2168
2169         raw_spin_lock_irq(&newowner->pi_lock);
2170         WARN_ON(!list_empty(&pi_state->list));
2171         list_add(&pi_state->list, &newowner->pi_state_list);
2172         raw_spin_unlock_irq(&newowner->pi_lock);
2173         return 0;
2174
2175         /*
2176          * To handle the page fault we need to drop the hash bucket
2177          * lock here. That gives the other task (either the highest priority
2178          * waiter itself or the task which stole the rtmutex) the
2179          * chance to try the fixup of the pi_state. So once we are
2180          * back from handling the fault we need to check the pi_state
2181          * after reacquiring the hash bucket lock and before trying to
2182          * do another fixup. When the fixup has been done already we
2183          * simply return.
2184          */
2185 handle_fault:
2186         spin_unlock(q->lock_ptr);
2187
2188         ret = fault_in_user_writeable(uaddr);
2189
2190         spin_lock(q->lock_ptr);
2191
2192         /*
2193          * Check if someone else fixed it for us:
2194          */
2195         if (pi_state->owner != oldowner)
2196                 return 0;
2197
2198         if (ret)
2199                 return ret;
2200
2201         goto retry;
2202 }
2203
2204 static long futex_wait_restart(struct restart_block *restart);
2205
2206 /**
2207  * fixup_owner() - Post lock pi_state and corner case management
2208  * @uaddr:      user address of the futex
2209  * @q:          futex_q (contains pi_state and access to the rt_mutex)
2210  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
2211  *
2212  * After attempting to lock an rt_mutex, this function is called to cleanup
2213  * the pi_state owner as well as handle race conditions that may allow us to
2214  * acquire the lock. Must be called with the hb lock held.
2215  *
2216  * Return:
2217  *  1 - success, lock taken;
2218  *  0 - success, lock not taken;
2219  * <0 - on error (-EFAULT)
2220  */
2221 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2222 {
2223         struct task_struct *owner;
2224         int ret = 0;
2225
2226         if (locked) {
2227                 /*
2228                  * Got the lock. We might not be the anticipated owner if we
2229                  * did a lock-steal - fix up the PI-state in that case:
2230                  */
2231                 if (q->pi_state->owner != current)
2232                         ret = fixup_pi_state_owner(uaddr, q, current);
2233                 goto out;
2234         }
2235
2236         /*
2237          * Catch the rare case, where the lock was released when we were on the
2238          * way back before we locked the hash bucket.
2239          */
2240         if (q->pi_state->owner == current) {
2241                 /*
2242                  * Try to get the rt_mutex now. This might fail as some other
2243                  * task acquired the rt_mutex after we removed ourself from the
2244                  * rt_mutex waiters list.
2245                  */
2246                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2247                         locked = 1;
2248                         goto out;
2249                 }
2250
2251                 /*
2252                  * pi_state is incorrect, some other task did a lock steal and
2253                  * we returned due to timeout or signal without taking the
2254                  * rt_mutex. Too late.
2255                  */
2256                 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2257                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2258                 if (!owner)
2259                         owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2260                 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2261                 ret = fixup_pi_state_owner(uaddr, q, owner);
2262                 goto out;
2263         }
2264
2265         /*
2266          * Paranoia check. If we did not take the lock, then we should not be
2267          * the owner of the rt_mutex.
2268          */
2269         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2270                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2271                                 "pi-state %p\n", ret,
2272                                 q->pi_state->pi_mutex.owner,
2273                                 q->pi_state->owner);
2274
2275 out:
2276         return ret ? ret : locked;
2277 }
2278
2279 /**
2280  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2281  * @hb:         the futex hash bucket, must be locked by the caller
2282  * @q:          the futex_q to queue up on
2283  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
2284  */
2285 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2286                                 struct hrtimer_sleeper *timeout)
2287 {
2288         /*
2289          * The task state is guaranteed to be set before another task can
2290          * wake it. set_current_state() is implemented using smp_store_mb() and
2291          * queue_me() calls spin_unlock() upon completion, both serializing
2292          * access to the hash list and forcing another memory barrier.
2293          */
2294         set_current_state(TASK_INTERRUPTIBLE);
2295         queue_me(q, hb);
2296
2297         /* Arm the timer */
2298         if (timeout)
2299                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2300
2301         /*
2302          * If we have been removed from the hash list, then another task
2303          * has tried to wake us, and we can skip the call to schedule().
2304          */
2305         if (likely(!plist_node_empty(&q->list))) {
2306                 /*
2307                  * If the timer has already expired, current will already be
2308                  * flagged for rescheduling. Only call schedule if there
2309                  * is no timeout, or if it has yet to expire.
2310                  */
2311                 if (!timeout || timeout->task)
2312                         freezable_schedule();
2313         }
2314         __set_current_state(TASK_RUNNING);
2315 }
2316
2317 /**
2318  * futex_wait_setup() - Prepare to wait on a futex
2319  * @uaddr:      the futex userspace address
2320  * @val:        the expected value
2321  * @flags:      futex flags (FLAGS_SHARED, etc.)
2322  * @q:          the associated futex_q
2323  * @hb:         storage for hash_bucket pointer to be returned to caller
2324  *
2325  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
2326  * compare it with the expected value.  Handle atomic faults internally.
2327  * Return with the hb lock held and a q.key reference on success, and unlocked
2328  * with no q.key reference on failure.
2329  *
2330  * Return:
2331  *  0 - uaddr contains val and hb has been locked;
2332  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2333  */
2334 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2335                            struct futex_q *q, struct futex_hash_bucket **hb)
2336 {
2337         u32 uval;
2338         int ret;
2339
2340         /*
2341          * Access the page AFTER the hash-bucket is locked.
2342          * Order is important:
2343          *
2344          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2345          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
2346          *
2347          * The basic logical guarantee of a futex is that it blocks ONLY
2348          * if cond(var) is known to be true at the time of blocking, for
2349          * any cond.  If we locked the hash-bucket after testing *uaddr, that
2350          * would open a race condition where we could block indefinitely with
2351          * cond(var) false, which would violate the guarantee.
2352          *
2353          * On the other hand, we insert q and release the hash-bucket only
2354          * after testing *uaddr.  This guarantees that futex_wait() will NOT
2355          * absorb a wakeup if *uaddr does not match the desired values
2356          * while the syscall executes.
2357          */
2358 retry:
2359         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2360         if (unlikely(ret != 0))
2361                 return ret;
2362
2363 retry_private:
2364         *hb = queue_lock(q);
2365
2366         ret = get_futex_value_locked(&uval, uaddr);
2367
2368         if (ret) {
2369                 queue_unlock(*hb);
2370
2371                 ret = get_user(uval, uaddr);
2372                 if (ret)
2373                         goto out;
2374
2375                 if (!(flags & FLAGS_SHARED))
2376                         goto retry_private;
2377
2378                 put_futex_key(&q->key);
2379                 goto retry;
2380         }
2381
2382         if (uval != val) {
2383                 queue_unlock(*hb);
2384                 ret = -EWOULDBLOCK;
2385         }
2386
2387 out:
2388         if (ret)
2389                 put_futex_key(&q->key);
2390         return ret;
2391 }
2392
2393 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2394                       ktime_t *abs_time, u32 bitset)
2395 {
2396         struct hrtimer_sleeper timeout, *to = NULL;
2397         struct restart_block *restart;
2398         struct futex_hash_bucket *hb;
2399         struct futex_q q = futex_q_init;
2400         int ret;
2401
2402         if (!bitset)
2403                 return -EINVAL;
2404         q.bitset = bitset;
2405
2406         if (abs_time) {
2407                 to = &timeout;
2408
2409                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2410                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2411                                       HRTIMER_MODE_ABS);
2412                 hrtimer_init_sleeper(to, current);
2413                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2414                                              current->timer_slack_ns);
2415         }
2416
2417 retry:
2418         /*
2419          * Prepare to wait on uaddr. On success, holds hb lock and increments
2420          * q.key refs.
2421          */
2422         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2423         if (ret)
2424                 goto out;
2425
2426         /* queue_me and wait for wakeup, timeout, or a signal. */
2427         futex_wait_queue_me(hb, &q, to);
2428
2429         /* If we were woken (and unqueued), we succeeded, whatever. */
2430         ret = 0;
2431         /* unqueue_me() drops q.key ref */
2432         if (!unqueue_me(&q))
2433                 goto out;
2434         ret = -ETIMEDOUT;
2435         if (to && !to->task)
2436                 goto out;
2437
2438         /*
2439          * We expect signal_pending(current), but we might be the
2440          * victim of a spurious wakeup as well.
2441          */
2442         if (!signal_pending(current))
2443                 goto retry;
2444
2445         ret = -ERESTARTSYS;
2446         if (!abs_time)
2447                 goto out;
2448
2449         restart = &current->restart_block;
2450         restart->fn = futex_wait_restart;
2451         restart->futex.uaddr = uaddr;
2452         restart->futex.val = val;
2453         restart->futex.time = abs_time->tv64;
2454         restart->futex.bitset = bitset;
2455         restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2456
2457         ret = -ERESTART_RESTARTBLOCK;
2458
2459 out:
2460         if (to) {
2461                 hrtimer_cancel(&to->timer);
2462                 destroy_hrtimer_on_stack(&to->timer);
2463         }
2464         return ret;
2465 }
2466
2467
2468 static long futex_wait_restart(struct restart_block *restart)
2469 {
2470         u32 __user *uaddr = restart->futex.uaddr;
2471         ktime_t t, *tp = NULL;
2472
2473         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2474                 t.tv64 = restart->futex.time;
2475                 tp = &t;
2476         }
2477         restart->fn = do_no_restart_syscall;
2478
2479         return (long)futex_wait(uaddr, restart->futex.flags,
2480                                 restart->futex.val, tp, restart->futex.bitset);
2481 }
2482
2483
2484 /*
2485  * Userspace tried a 0 -> TID atomic transition of the futex value
2486  * and failed. The kernel side here does the whole locking operation:
2487  * if there are waiters then it will block as a consequence of relying
2488  * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2489  * a 0 value of the futex too.).
2490  *
2491  * Also serves as futex trylock_pi()'ing, and due semantics.
2492  */
2493 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2494                          ktime_t *time, int trylock)
2495 {
2496         struct hrtimer_sleeper timeout, *to = NULL;
2497         struct futex_hash_bucket *hb;
2498         struct futex_q q = futex_q_init;
2499         int res, ret;
2500
2501         if (refill_pi_state_cache())
2502                 return -ENOMEM;
2503
2504         if (time) {
2505                 to = &timeout;
2506                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2507                                       HRTIMER_MODE_ABS);
2508                 hrtimer_init_sleeper(to, current);
2509                 hrtimer_set_expires(&to->timer, *time);
2510         }
2511
2512 retry:
2513         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2514         if (unlikely(ret != 0))
2515                 goto out;
2516
2517 retry_private:
2518         hb = queue_lock(&q);
2519
2520         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2521         if (unlikely(ret)) {
2522                 /*
2523                  * Atomic work succeeded and we got the lock,
2524                  * or failed. Either way, we do _not_ block.
2525                  */
2526                 switch (ret) {
2527                 case 1:
2528                         /* We got the lock. */
2529                         ret = 0;
2530                         goto out_unlock_put_key;
2531                 case -EFAULT:
2532                         goto uaddr_faulted;
2533                 case -EAGAIN:
2534                         /*
2535                          * Two reasons for this:
2536                          * - Task is exiting and we just wait for the
2537                          *   exit to complete.
2538                          * - The user space value changed.
2539                          */
2540                         queue_unlock(hb);
2541                         put_futex_key(&q.key);
2542                         cond_resched();
2543                         goto retry;
2544                 default:
2545                         goto out_unlock_put_key;
2546                 }
2547         }
2548
2549         /*
2550          * Only actually queue now that the atomic ops are done:
2551          */
2552         queue_me(&q, hb);
2553
2554         WARN_ON(!q.pi_state);
2555         /*
2556          * Block on the PI mutex:
2557          */
2558         if (!trylock) {
2559                 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2560         } else {
2561                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2562                 /* Fixup the trylock return value: */
2563                 ret = ret ? 0 : -EWOULDBLOCK;
2564         }
2565
2566         spin_lock(q.lock_ptr);
2567         /*
2568          * Fixup the pi_state owner and possibly acquire the lock if we
2569          * haven't already.
2570          */
2571         res = fixup_owner(uaddr, &q, !ret);
2572         /*
2573          * If fixup_owner() returned an error, proprogate that.  If it acquired
2574          * the lock, clear our -ETIMEDOUT or -EINTR.
2575          */
2576         if (res)
2577                 ret = (res < 0) ? res : 0;
2578
2579         /*
2580          * If fixup_owner() faulted and was unable to handle the fault, unlock
2581          * it and return the fault to userspace.
2582          */
2583         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2584                 rt_mutex_unlock(&q.pi_state->pi_mutex);
2585
2586         /* Unqueue and drop the lock */
2587         unqueue_me_pi(&q);
2588
2589         goto out_put_key;
2590
2591 out_unlock_put_key:
2592         queue_unlock(hb);
2593
2594 out_put_key:
2595         put_futex_key(&q.key);
2596 out:
2597         if (to)
2598                 destroy_hrtimer_on_stack(&to->timer);
2599         return ret != -EINTR ? ret : -ERESTARTNOINTR;
2600
2601 uaddr_faulted:
2602         queue_unlock(hb);
2603
2604         ret = fault_in_user_writeable(uaddr);
2605         if (ret)
2606                 goto out_put_key;
2607
2608         if (!(flags & FLAGS_SHARED))
2609                 goto retry_private;
2610
2611         put_futex_key(&q.key);
2612         goto retry;
2613 }
2614
2615 /*
2616  * Userspace attempted a TID -> 0 atomic transition, and failed.
2617  * This is the in-kernel slowpath: we look up the PI state (if any),
2618  * and do the rt-mutex unlock.
2619  */
2620 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2621 {
2622         u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2623         union futex_key key = FUTEX_KEY_INIT;
2624         struct futex_hash_bucket *hb;
2625         struct futex_q *match;
2626         int ret;
2627
2628 retry:
2629         if (get_user(uval, uaddr))
2630                 return -EFAULT;
2631         /*
2632          * We release only a lock we actually own:
2633          */
2634         if ((uval & FUTEX_TID_MASK) != vpid)
2635                 return -EPERM;
2636
2637         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2638         if (ret)
2639                 return ret;
2640
2641         hb = hash_futex(&key);
2642         spin_lock(&hb->lock);
2643
2644         /*
2645          * Check waiters first. We do not trust user space values at
2646          * all and we at least want to know if user space fiddled
2647          * with the futex value instead of blindly unlocking.
2648          */
2649         match = futex_top_waiter(hb, &key);
2650         if (match) {
2651                 ret = wake_futex_pi(uaddr, uval, match, hb);
2652                 /*
2653                  * In case of success wake_futex_pi dropped the hash
2654                  * bucket lock.
2655                  */
2656                 if (!ret)
2657                         goto out_putkey;
2658                 /*
2659                  * The atomic access to the futex value generated a
2660                  * pagefault, so retry the user-access and the wakeup:
2661                  */
2662                 if (ret == -EFAULT)
2663                         goto pi_faulted;
2664                 /*
2665                  * A unconditional UNLOCK_PI op raced against a waiter
2666                  * setting the FUTEX_WAITERS bit. Try again.
2667                  */
2668                 if (ret == -EAGAIN) {
2669                         spin_unlock(&hb->lock);
2670                         put_futex_key(&key);
2671                         goto retry;
2672                 }
2673                 /*
2674                  * wake_futex_pi has detected invalid state. Tell user
2675                  * space.
2676                  */
2677                 goto out_unlock;
2678         }
2679
2680         /*
2681          * We have no kernel internal state, i.e. no waiters in the
2682          * kernel. Waiters which are about to queue themselves are stuck
2683          * on hb->lock. So we can safely ignore them. We do neither
2684          * preserve the WAITERS bit not the OWNER_DIED one. We are the
2685          * owner.
2686          */
2687         if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2688                 goto pi_faulted;
2689
2690         /*
2691          * If uval has changed, let user space handle it.
2692          */
2693         ret = (curval == uval) ? 0 : -EAGAIN;
2694
2695 out_unlock:
2696         spin_unlock(&hb->lock);
2697 out_putkey:
2698         put_futex_key(&key);
2699         return ret;
2700
2701 pi_faulted:
2702         spin_unlock(&hb->lock);
2703         put_futex_key(&key);
2704
2705         ret = fault_in_user_writeable(uaddr);
2706         if (!ret)
2707                 goto retry;
2708
2709         return ret;
2710 }
2711
2712 /**
2713  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2714  * @hb:         the hash_bucket futex_q was original enqueued on
2715  * @q:          the futex_q woken while waiting to be requeued
2716  * @key2:       the futex_key of the requeue target futex
2717  * @timeout:    the timeout associated with the wait (NULL if none)
2718  *
2719  * Detect if the task was woken on the initial futex as opposed to the requeue
2720  * target futex.  If so, determine if it was a timeout or a signal that caused
2721  * the wakeup and return the appropriate error code to the caller.  Must be
2722  * called with the hb lock held.
2723  *
2724  * Return:
2725  *  0 = no early wakeup detected;
2726  * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2727  */
2728 static inline
2729 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2730                                    struct futex_q *q, union futex_key *key2,
2731                                    struct hrtimer_sleeper *timeout)
2732 {
2733         int ret = 0;
2734
2735         /*
2736          * With the hb lock held, we avoid races while we process the wakeup.
2737          * We only need to hold hb (and not hb2) to ensure atomicity as the
2738          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2739          * It can't be requeued from uaddr2 to something else since we don't
2740          * support a PI aware source futex for requeue.
2741          */
2742         if (!match_futex(&q->key, key2)) {
2743                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2744                 /*
2745                  * We were woken prior to requeue by a timeout or a signal.
2746                  * Unqueue the futex_q and determine which it was.
2747                  */
2748                 plist_del(&q->list, &hb->chain);
2749                 hb_waiters_dec(hb);
2750
2751                 /* Handle spurious wakeups gracefully */
2752                 ret = -EWOULDBLOCK;
2753                 if (timeout && !timeout->task)
2754                         ret = -ETIMEDOUT;
2755                 else if (signal_pending(current))
2756                         ret = -ERESTARTNOINTR;
2757         }
2758         return ret;
2759 }
2760
2761 /**
2762  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2763  * @uaddr:      the futex we initially wait on (non-pi)
2764  * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2765  *              the same type, no requeueing from private to shared, etc.
2766  * @val:        the expected value of uaddr
2767  * @abs_time:   absolute timeout
2768  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2769  * @uaddr2:     the pi futex we will take prior to returning to user-space
2770  *
2771  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2772  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2773  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2774  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2775  * without one, the pi logic would not know which task to boost/deboost, if
2776  * there was a need to.
2777  *
2778  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2779  * via the following--
2780  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2781  * 2) wakeup on uaddr2 after a requeue
2782  * 3) signal
2783  * 4) timeout
2784  *
2785  * If 3, cleanup and return -ERESTARTNOINTR.
2786  *
2787  * If 2, we may then block on trying to take the rt_mutex and return via:
2788  * 5) successful lock
2789  * 6) signal
2790  * 7) timeout
2791  * 8) other lock acquisition failure
2792  *
2793  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2794  *
2795  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2796  *
2797  * Return:
2798  *  0 - On success;
2799  * <0 - On error
2800  */
2801 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2802                                  u32 val, ktime_t *abs_time, u32 bitset,
2803                                  u32 __user *uaddr2)
2804 {
2805         struct hrtimer_sleeper timeout, *to = NULL;
2806         struct rt_mutex_waiter rt_waiter;
2807         struct rt_mutex *pi_mutex = NULL;
2808         struct futex_hash_bucket *hb;
2809         union futex_key key2 = FUTEX_KEY_INIT;
2810         struct futex_q q = futex_q_init;
2811         int res, ret;
2812
2813         if (uaddr == uaddr2)
2814                 return -EINVAL;
2815
2816         if (!bitset)
2817                 return -EINVAL;
2818
2819         if (abs_time) {
2820                 to = &timeout;
2821                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2822                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2823                                       HRTIMER_MODE_ABS);
2824                 hrtimer_init_sleeper(to, current);
2825                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2826                                              current->timer_slack_ns);
2827         }
2828
2829         /*
2830          * The waiter is allocated on our stack, manipulated by the requeue
2831          * code while we sleep on uaddr.
2832          */
2833         debug_rt_mutex_init_waiter(&rt_waiter);
2834         RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2835         RB_CLEAR_NODE(&rt_waiter.tree_entry);
2836         rt_waiter.task = NULL;
2837
2838         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2839         if (unlikely(ret != 0))
2840                 goto out;
2841
2842         q.bitset = bitset;
2843         q.rt_waiter = &rt_waiter;
2844         q.requeue_pi_key = &key2;
2845
2846         /*
2847          * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2848          * count.
2849          */
2850         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2851         if (ret)
2852                 goto out_key2;
2853
2854         /*
2855          * The check above which compares uaddrs is not sufficient for
2856          * shared futexes. We need to compare the keys:
2857          */
2858         if (match_futex(&q.key, &key2)) {
2859                 queue_unlock(hb);
2860                 ret = -EINVAL;
2861                 goto out_put_keys;
2862         }
2863
2864         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2865         futex_wait_queue_me(hb, &q, to);
2866
2867         spin_lock(&hb->lock);
2868         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2869         spin_unlock(&hb->lock);
2870         if (ret)
2871                 goto out_put_keys;
2872
2873         /*
2874          * In order for us to be here, we know our q.key == key2, and since
2875          * we took the hb->lock above, we also know that futex_requeue() has
2876          * completed and we no longer have to concern ourselves with a wakeup
2877          * race with the atomic proxy lock acquisition by the requeue code. The
2878          * futex_requeue dropped our key1 reference and incremented our key2
2879          * reference count.
2880          */
2881
2882         /* Check if the requeue code acquired the second futex for us. */
2883         if (!q.rt_waiter) {
2884                 /*
2885                  * Got the lock. We might not be the anticipated owner if we
2886                  * did a lock-steal - fix up the PI-state in that case.
2887                  */
2888                 if (q.pi_state && (q.pi_state->owner != current)) {
2889                         spin_lock(q.lock_ptr);
2890                         ret = fixup_pi_state_owner(uaddr2, &q, current);
2891                         /*
2892                          * Drop the reference to the pi state which
2893                          * the requeue_pi() code acquired for us.
2894                          */
2895                         put_pi_state(q.pi_state);
2896                         spin_unlock(q.lock_ptr);
2897                 }
2898         } else {
2899                 /*
2900                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2901                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2902                  * the pi_state.
2903                  */
2904                 WARN_ON(!q.pi_state);
2905                 pi_mutex = &q.pi_state->pi_mutex;
2906                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2907                 debug_rt_mutex_free_waiter(&rt_waiter);
2908
2909                 spin_lock(q.lock_ptr);
2910                 /*
2911                  * Fixup the pi_state owner and possibly acquire the lock if we
2912                  * haven't already.
2913                  */
2914                 res = fixup_owner(uaddr2, &q, !ret);
2915                 /*
2916                  * If fixup_owner() returned an error, proprogate that.  If it
2917                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2918                  */
2919                 if (res)
2920                         ret = (res < 0) ? res : 0;
2921
2922                 /* Unqueue and drop the lock. */
2923                 unqueue_me_pi(&q);
2924         }
2925
2926         /*
2927          * If fixup_pi_state_owner() faulted and was unable to handle the
2928          * fault, unlock the rt_mutex and return the fault to userspace.
2929          */
2930         if (ret == -EFAULT) {
2931                 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2932                         rt_mutex_unlock(pi_mutex);
2933         } else if (ret == -EINTR) {
2934                 /*
2935                  * We've already been requeued, but cannot restart by calling
2936                  * futex_lock_pi() directly. We could restart this syscall, but
2937                  * it would detect that the user space "val" changed and return
2938                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2939                  * -EWOULDBLOCK directly.
2940                  */
2941                 ret = -EWOULDBLOCK;
2942         }
2943
2944 out_put_keys:
2945         put_futex_key(&q.key);
2946 out_key2:
2947         put_futex_key(&key2);
2948
2949 out:
2950         if (to) {
2951                 hrtimer_cancel(&to->timer);
2952                 destroy_hrtimer_on_stack(&to->timer);
2953         }
2954         return ret;
2955 }
2956
2957 /*
2958  * Support for robust futexes: the kernel cleans up held futexes at
2959  * thread exit time.
2960  *
2961  * Implementation: user-space maintains a per-thread list of locks it
2962  * is holding. Upon do_exit(), the kernel carefully walks this list,
2963  * and marks all locks that are owned by this thread with the
2964  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2965  * always manipulated with the lock held, so the list is private and
2966  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2967  * field, to allow the kernel to clean up if the thread dies after
2968  * acquiring the lock, but just before it could have added itself to
2969  * the list. There can only be one such pending lock.
2970  */
2971
2972 /**
2973  * sys_set_robust_list() - Set the robust-futex list head of a task
2974  * @head:       pointer to the list-head
2975  * @len:        length of the list-head, as userspace expects
2976  */
2977 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2978                 size_t, len)
2979 {
2980         if (!futex_cmpxchg_enabled)
2981                 return -ENOSYS;
2982         /*
2983          * The kernel knows only one size for now:
2984          */
2985         if (unlikely(len != sizeof(*head)))
2986                 return -EINVAL;
2987
2988         current->robust_list = head;
2989
2990         return 0;
2991 }
2992
2993 /**
2994  * sys_get_robust_list() - Get the robust-futex list head of a task
2995  * @pid:        pid of the process [zero for current task]
2996  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2997  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2998  */
2999 SYSCALL_DEFINE3(get_robust_list, int, pid,
3000                 struct robust_list_head __user * __user *, head_ptr,
3001                 size_t __user *, len_ptr)
3002 {
3003         struct robust_list_head __user *head;
3004         unsigned long ret;
3005         struct task_struct *p;
3006
3007         if (!futex_cmpxchg_enabled)
3008                 return -ENOSYS;
3009
3010         rcu_read_lock();
3011
3012         ret = -ESRCH;
3013         if (!pid)
3014                 p = current;
3015         else {
3016                 p = find_task_by_vpid(pid);
3017                 if (!p)
3018                         goto err_unlock;
3019         }
3020
3021         ret = -EPERM;
3022         if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3023                 goto err_unlock;
3024
3025         head = p->robust_list;
3026         rcu_read_unlock();
3027
3028         if (put_user(sizeof(*head), len_ptr))
3029                 return -EFAULT;
3030         return put_user(head, head_ptr);
3031
3032 err_unlock:
3033         rcu_read_unlock();
3034
3035         return ret;
3036 }
3037
3038 /*
3039  * Process a futex-list entry, check whether it's owned by the
3040  * dying task, and do notification if so:
3041  */
3042 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3043 {
3044         u32 uval, uninitialized_var(nval), mval;
3045
3046 retry:
3047         if (get_user(uval, uaddr))
3048                 return -1;
3049
3050         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3051                 /*
3052                  * Ok, this dying thread is truly holding a futex
3053                  * of interest. Set the OWNER_DIED bit atomically
3054                  * via cmpxchg, and if the value had FUTEX_WAITERS
3055                  * set, wake up a waiter (if any). (We have to do a
3056                  * futex_wake() even if OWNER_DIED is already set -
3057                  * to handle the rare but possible case of recursive
3058                  * thread-death.) The rest of the cleanup is done in
3059                  * userspace.
3060                  */
3061                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3062                 /*
3063                  * We are not holding a lock here, but we want to have
3064                  * the pagefault_disable/enable() protection because
3065                  * we want to handle the fault gracefully. If the
3066                  * access fails we try to fault in the futex with R/W
3067                  * verification via get_user_pages. get_user() above
3068                  * does not guarantee R/W access. If that fails we
3069                  * give up and leave the futex locked.
3070                  */
3071                 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3072                         if (fault_in_user_writeable(uaddr))
3073                                 return -1;
3074                         goto retry;
3075                 }
3076                 if (nval != uval)
3077                         goto retry;
3078
3079                 /*
3080                  * Wake robust non-PI futexes here. The wakeup of
3081                  * PI futexes happens in exit_pi_state():
3082                  */
3083                 if (!pi && (uval & FUTEX_WAITERS))
3084                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3085         }
3086         return 0;
3087 }
3088
3089 /*
3090  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3091  */
3092 static inline int fetch_robust_entry(struct robust_list __user **entry,
3093                                      struct robust_list __user * __user *head,
3094                                      unsigned int *pi)
3095 {
3096         unsigned long uentry;
3097
3098         if (get_user(uentry, (unsigned long __user *)head))
3099                 return -EFAULT;
3100
3101         *entry = (void __user *)(uentry & ~1UL);
3102         *pi = uentry & 1;
3103
3104         return 0;
3105 }
3106
3107 /*
3108  * Walk curr->robust_list (very carefully, it's a userspace list!)
3109  * and mark any locks found there dead, and notify any waiters.
3110  *
3111  * We silently return on any sign of list-walking problem.
3112  */
3113 void exit_robust_list(struct task_struct *curr)
3114 {
3115         struct robust_list_head __user *head = curr->robust_list;
3116         struct robust_list __user *entry, *next_entry, *pending;
3117         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3118         unsigned int uninitialized_var(next_pi);
3119         unsigned long futex_offset;
3120         int rc;
3121
3122         if (!futex_cmpxchg_enabled)
3123                 return;
3124
3125         /*
3126          * Fetch the list head (which was registered earlier, via
3127          * sys_set_robust_list()):
3128          */
3129         if (fetch_robust_entry(&entry, &head->list.next, &pi))
3130                 return;
3131         /*
3132          * Fetch the relative futex offset:
3133          */
3134         if (get_user(futex_offset, &head->futex_offset))
3135                 return;
3136         /*
3137          * Fetch any possibly pending lock-add first, and handle it
3138          * if it exists:
3139          */
3140         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3141                 return;
3142
3143         next_entry = NULL;      /* avoid warning with gcc */
3144         while (entry != &head->list) {
3145                 /*
3146                  * Fetch the next entry in the list before calling
3147                  * handle_futex_death:
3148                  */
3149                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3150                 /*
3151                  * A pending lock might already be on the list, so
3152                  * don't process it twice:
3153                  */
3154                 if (entry != pending)
3155                         if (handle_futex_death((void __user *)entry + futex_offset,
3156                                                 curr, pi))
3157                                 return;
3158                 if (rc)
3159                         return;
3160                 entry = next_entry;
3161                 pi = next_pi;
3162                 /*
3163                  * Avoid excessively long or circular lists:
3164                  */
3165                 if (!--limit)
3166                         break;
3167
3168                 cond_resched();
3169         }
3170
3171         if (pending)
3172                 handle_futex_death((void __user *)pending + futex_offset,
3173                                    curr, pip);
3174 }
3175
3176 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3177                 u32 __user *uaddr2, u32 val2, u32 val3)
3178 {
3179         int cmd = op & FUTEX_CMD_MASK;
3180         unsigned int flags = 0;
3181
3182         if (!(op & FUTEX_PRIVATE_FLAG))
3183                 flags |= FLAGS_SHARED;
3184
3185         if (op & FUTEX_CLOCK_REALTIME) {
3186                 flags |= FLAGS_CLOCKRT;
3187                 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3188                     cmd != FUTEX_WAIT_REQUEUE_PI)
3189                         return -ENOSYS;
3190         }
3191
3192         switch (cmd) {
3193         case FUTEX_LOCK_PI:
3194         case FUTEX_UNLOCK_PI:
3195         case FUTEX_TRYLOCK_PI:
3196         case FUTEX_WAIT_REQUEUE_PI:
3197         case FUTEX_CMP_REQUEUE_PI:
3198                 if (!futex_cmpxchg_enabled)
3199                         return -ENOSYS;
3200         }
3201
3202         switch (cmd) {
3203         case FUTEX_WAIT:
3204                 val3 = FUTEX_BITSET_MATCH_ANY;
3205         case FUTEX_WAIT_BITSET:
3206                 return futex_wait(uaddr, flags, val, timeout, val3);
3207         case FUTEX_WAKE:
3208                 val3 = FUTEX_BITSET_MATCH_ANY;
3209         case FUTEX_WAKE_BITSET:
3210                 return futex_wake(uaddr, flags, val, val3);
3211         case FUTEX_REQUEUE:
3212                 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3213         case FUTEX_CMP_REQUEUE:
3214                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3215         case FUTEX_WAKE_OP:
3216                 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3217         case FUTEX_LOCK_PI:
3218                 return futex_lock_pi(uaddr, flags, timeout, 0);
3219         case FUTEX_UNLOCK_PI:
3220                 return futex_unlock_pi(uaddr, flags);
3221         case FUTEX_TRYLOCK_PI:
3222                 return futex_lock_pi(uaddr, flags, NULL, 1);
3223         case FUTEX_WAIT_REQUEUE_PI:
3224                 val3 = FUTEX_BITSET_MATCH_ANY;
3225                 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3226                                              uaddr2);
3227         case FUTEX_CMP_REQUEUE_PI:
3228                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3229         }
3230         return -ENOSYS;
3231 }
3232
3233
3234 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3235                 struct timespec __user *, utime, u32 __user *, uaddr2,
3236                 u32, val3)
3237 {
3238         struct timespec ts;
3239         ktime_t t, *tp = NULL;
3240         u32 val2 = 0;
3241         int cmd = op & FUTEX_CMD_MASK;
3242
3243         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3244                       cmd == FUTEX_WAIT_BITSET ||
3245                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
3246                 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3247                         return -EFAULT;
3248                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3249                         return -EFAULT;
3250                 if (!timespec_valid(&ts))
3251                         return -EINVAL;
3252
3253                 t = timespec_to_ktime(ts);
3254                 if (cmd == FUTEX_WAIT)
3255                         t = ktime_add_safe(ktime_get(), t);
3256                 tp = &t;
3257         }
3258         /*
3259          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3260          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3261          */
3262         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3263             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3264                 val2 = (u32) (unsigned long) utime;
3265
3266         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3267 }
3268
3269 static void __init futex_detect_cmpxchg(void)
3270 {
3271 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3272         u32 curval;
3273
3274         /*
3275          * This will fail and we want it. Some arch implementations do
3276          * runtime detection of the futex_atomic_cmpxchg_inatomic()
3277          * functionality. We want to know that before we call in any
3278          * of the complex code paths. Also we want to prevent
3279          * registration of robust lists in that case. NULL is
3280          * guaranteed to fault and we get -EFAULT on functional
3281          * implementation, the non-functional ones will return
3282          * -ENOSYS.
3283          */
3284         if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3285                 futex_cmpxchg_enabled = 1;
3286 #endif
3287 }
3288
3289 static int __init futex_init(void)
3290 {
3291         unsigned int futex_shift;
3292         unsigned long i;
3293
3294 #if CONFIG_BASE_SMALL
3295         futex_hashsize = 16;
3296 #else
3297         futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3298 #endif
3299
3300         futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3301                                                futex_hashsize, 0,
3302                                                futex_hashsize < 256 ? HASH_SMALL : 0,
3303                                                &futex_shift, NULL,
3304                                                futex_hashsize, futex_hashsize);
3305         futex_hashsize = 1UL << futex_shift;
3306
3307         futex_detect_cmpxchg();
3308
3309         for (i = 0; i < futex_hashsize; i++) {
3310                 atomic_set(&futex_queues[i].waiters, 0);
3311                 plist_head_init(&futex_queues[i].chain);
3312                 spin_lock_init(&futex_queues[i].lock);
3313         }
3314
3315         return 0;
3316 }
3317 __initcall(futex_init);