sched/wait: Avoid abort_exclusive_wait() in ___wait_event()
[cascardo/linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE       0x01
18 #define WQ_FLAG_WOKEN           0x02
19
20 struct __wait_queue {
21         unsigned int            flags;
22         void                    *private;
23         wait_queue_func_t       func;
24         struct list_head        task_list;
25 };
26
27 struct wait_bit_key {
28         void                    *flags;
29         int                     bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR    -1
31         unsigned long           timeout;
32 };
33
34 struct wait_bit_queue {
35         struct wait_bit_key     key;
36         wait_queue_t            wait;
37 };
38
39 struct __wait_queue_head {
40         spinlock_t              lock;
41         struct list_head        task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44
45 struct task_struct;
46
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
52         .private        = tsk,                                          \
53         .func           = default_wake_function,                        \
54         .task_list      = { NULL, NULL } }
55
56 #define DECLARE_WAITQUEUE(name, tsk)                                    \
57         wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
60         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
61         .task_list      = { &(name).task_list, &(name).task_list } }
62
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
67         { .flags = word, .bit_nr = bit, }
68
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
70         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74 #define init_waitqueue_head(q)                          \
75         do {                                            \
76                 static struct lock_class_key __key;     \
77                                                         \
78                 __init_waitqueue_head((q), #q, &__key); \
79         } while (0)
80
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83         ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92         q->flags        = 0;
93         q->private      = p;
94         q->func         = default_wake_function;
95 }
96
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100         q->flags        = 0;
101         q->private      = NULL;
102         q->func         = func;
103 }
104
105 /**
106  * waitqueue_active -- locklessly test for waiters on the queue
107  * @q: the waitqueue to test for waiters
108  *
109  * returns true if the wait list is not empty
110  *
111  * NOTE: this function is lockless and requires care, incorrect usage _will_
112  * lead to sporadic and non-obvious failure.
113  *
114  * Use either while holding wait_queue_head_t::lock or when used for wakeups
115  * with an extra smp_mb() like:
116  *
117  *      CPU0 - waker                    CPU1 - waiter
118  *
119  *                                      for (;;) {
120  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
121  *      smp_mb();                         // smp_mb() from set_current_state()
122  *      if (waitqueue_active(wq))         if (@cond)
123  *        wake_up(wq);                      break;
124  *                                        schedule();
125  *                                      }
126  *                                      finish_wait(&wq, &wait);
127  *
128  * Because without the explicit smp_mb() it's possible for the
129  * waitqueue_active() load to get hoisted over the @cond store such that we'll
130  * observe an empty wait list while the waiter might not observe @cond.
131  *
132  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133  * which (when the lock is uncontended) are of roughly equal cost.
134  */
135 static inline int waitqueue_active(wait_queue_head_t *q)
136 {
137         return !list_empty(&q->task_list);
138 }
139
140 /**
141  * wq_has_sleeper - check if there are any waiting processes
142  * @wq: wait queue head
143  *
144  * Returns true if wq has waiting processes
145  *
146  * Please refer to the comment for waitqueue_active.
147  */
148 static inline bool wq_has_sleeper(wait_queue_head_t *wq)
149 {
150         /*
151          * We need to be sure we are in sync with the
152          * add_wait_queue modifications to the wait queue.
153          *
154          * This memory barrier should be paired with one on the
155          * waiting side.
156          */
157         smp_mb();
158         return waitqueue_active(wq);
159 }
160
161 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
162 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
163 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
164
165 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
166 {
167         list_add(&new->task_list, &head->task_list);
168 }
169
170 /*
171  * Used for wake-one threads:
172  */
173 static inline void
174 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
175 {
176         wait->flags |= WQ_FLAG_EXCLUSIVE;
177         __add_wait_queue(q, wait);
178 }
179
180 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
181                                          wait_queue_t *new)
182 {
183         list_add_tail(&new->task_list, &head->task_list);
184 }
185
186 static inline void
187 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
188 {
189         wait->flags |= WQ_FLAG_EXCLUSIVE;
190         __add_wait_queue_tail(q, wait);
191 }
192
193 static inline void
194 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
195 {
196         list_del(&old->task_list);
197 }
198
199 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
200 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
202 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
203 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
204 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
205 void __wake_up_bit(wait_queue_head_t *, void *, int);
206 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
207 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208 void wake_up_bit(void *, int);
209 void wake_up_atomic_t(atomic_t *);
210 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
211 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
212 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
213 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
214 wait_queue_head_t *bit_waitqueue(void *, int);
215
216 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
217 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
218 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
219 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
220 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
221
222 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
223 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
224 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
225 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
226
227 /*
228  * Wakeup macros to be used to report events to the targets.
229  */
230 #define wake_up_poll(x, m)                                              \
231         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
232 #define wake_up_locked_poll(x, m)                                       \
233         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
234 #define wake_up_interruptible_poll(x, m)                                \
235         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
236 #define wake_up_interruptible_sync_poll(x, m)                           \
237         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
238
239 #define ___wait_cond_timeout(condition)                                 \
240 ({                                                                      \
241         bool __cond = (condition);                                      \
242         if (__cond && !__ret)                                           \
243                 __ret = 1;                                              \
244         __cond || !__ret;                                               \
245 })
246
247 #define ___wait_is_interruptible(state)                                 \
248         (!__builtin_constant_p(state) ||                                \
249                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
250
251 /*
252  * The below macro ___wait_event() has an explicit shadow of the __ret
253  * variable when used from the wait_event_*() macros.
254  *
255  * This is so that both can use the ___wait_cond_timeout() construct
256  * to wrap the condition.
257  *
258  * The type inconsistency of the wait_event_*() __ret variable is also
259  * on purpose; we use long where we can return timeout values and int
260  * otherwise.
261  */
262
263 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
264 ({                                                                      \
265         __label__ __out;                                                \
266         wait_queue_t __wait;                                            \
267         long __ret = ret;       /* explicit shadow */                   \
268                                                                         \
269         INIT_LIST_HEAD(&__wait.task_list);                              \
270         if (exclusive)                                                  \
271                 __wait.flags = WQ_FLAG_EXCLUSIVE;                       \
272         else                                                            \
273                 __wait.flags = 0;                                       \
274                                                                         \
275         for (;;) {                                                      \
276                 long __int = prepare_to_wait_event(&wq, &__wait, state);\
277                                                                         \
278                 if (condition)                                          \
279                         break;                                          \
280                                                                         \
281                 if (___wait_is_interruptible(state) && __int) {         \
282                         __ret = __int;                                  \
283                         goto __out;                                     \
284                 }                                                       \
285                                                                         \
286                 cmd;                                                    \
287         }                                                               \
288         finish_wait(&wq, &__wait);                                      \
289 __out:  __ret;                                                          \
290 })
291
292 #define __wait_event(wq, condition)                                     \
293         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
294                             schedule())
295
296 /**
297  * wait_event - sleep until a condition gets true
298  * @wq: the waitqueue to wait on
299  * @condition: a C expression for the event to wait for
300  *
301  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
302  * @condition evaluates to true. The @condition is checked each time
303  * the waitqueue @wq is woken up.
304  *
305  * wake_up() has to be called after changing any variable that could
306  * change the result of the wait condition.
307  */
308 #define wait_event(wq, condition)                                       \
309 do {                                                                    \
310         might_sleep();                                                  \
311         if (condition)                                                  \
312                 break;                                                  \
313         __wait_event(wq, condition);                                    \
314 } while (0)
315
316 #define __io_wait_event(wq, condition)                                  \
317         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
318                             io_schedule())
319
320 /*
321  * io_wait_event() -- like wait_event() but with io_schedule()
322  */
323 #define io_wait_event(wq, condition)                                    \
324 do {                                                                    \
325         might_sleep();                                                  \
326         if (condition)                                                  \
327                 break;                                                  \
328         __io_wait_event(wq, condition);                                 \
329 } while (0)
330
331 #define __wait_event_freezable(wq, condition)                           \
332         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
333                             schedule(); try_to_freeze())
334
335 /**
336  * wait_event_freezable - sleep (or freeze) until a condition gets true
337  * @wq: the waitqueue to wait on
338  * @condition: a C expression for the event to wait for
339  *
340  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
341  * to system load) until the @condition evaluates to true. The
342  * @condition is checked each time the waitqueue @wq is woken up.
343  *
344  * wake_up() has to be called after changing any variable that could
345  * change the result of the wait condition.
346  */
347 #define wait_event_freezable(wq, condition)                             \
348 ({                                                                      \
349         int __ret = 0;                                                  \
350         might_sleep();                                                  \
351         if (!(condition))                                               \
352                 __ret = __wait_event_freezable(wq, condition);          \
353         __ret;                                                          \
354 })
355
356 #define __wait_event_timeout(wq, condition, timeout)                    \
357         ___wait_event(wq, ___wait_cond_timeout(condition),              \
358                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
359                       __ret = schedule_timeout(__ret))
360
361 /**
362  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
363  * @wq: the waitqueue to wait on
364  * @condition: a C expression for the event to wait for
365  * @timeout: timeout, in jiffies
366  *
367  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
368  * @condition evaluates to true. The @condition is checked each time
369  * the waitqueue @wq is woken up.
370  *
371  * wake_up() has to be called after changing any variable that could
372  * change the result of the wait condition.
373  *
374  * Returns:
375  * 0 if the @condition evaluated to %false after the @timeout elapsed,
376  * 1 if the @condition evaluated to %true after the @timeout elapsed,
377  * or the remaining jiffies (at least 1) if the @condition evaluated
378  * to %true before the @timeout elapsed.
379  */
380 #define wait_event_timeout(wq, condition, timeout)                      \
381 ({                                                                      \
382         long __ret = timeout;                                           \
383         might_sleep();                                                  \
384         if (!___wait_cond_timeout(condition))                           \
385                 __ret = __wait_event_timeout(wq, condition, timeout);   \
386         __ret;                                                          \
387 })
388
389 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
390         ___wait_event(wq, ___wait_cond_timeout(condition),              \
391                       TASK_INTERRUPTIBLE, 0, timeout,                   \
392                       __ret = schedule_timeout(__ret); try_to_freeze())
393
394 /*
395  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
396  * increasing load and is freezable.
397  */
398 #define wait_event_freezable_timeout(wq, condition, timeout)            \
399 ({                                                                      \
400         long __ret = timeout;                                           \
401         might_sleep();                                                  \
402         if (!___wait_cond_timeout(condition))                           \
403                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
404         __ret;                                                          \
405 })
406
407 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
408         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
409                             cmd1; schedule(); cmd2)
410 /*
411  * Just like wait_event_cmd(), except it sets exclusive flag
412  */
413 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
414 do {                                                                    \
415         if (condition)                                                  \
416                 break;                                                  \
417         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
418 } while (0)
419
420 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
421         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
422                             cmd1; schedule(); cmd2)
423
424 /**
425  * wait_event_cmd - sleep until a condition gets true
426  * @wq: the waitqueue to wait on
427  * @condition: a C expression for the event to wait for
428  * @cmd1: the command will be executed before sleep
429  * @cmd2: the command will be executed after sleep
430  *
431  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
432  * @condition evaluates to true. The @condition is checked each time
433  * the waitqueue @wq is woken up.
434  *
435  * wake_up() has to be called after changing any variable that could
436  * change the result of the wait condition.
437  */
438 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
439 do {                                                                    \
440         if (condition)                                                  \
441                 break;                                                  \
442         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
443 } while (0)
444
445 #define __wait_event_interruptible(wq, condition)                       \
446         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
447                       schedule())
448
449 /**
450  * wait_event_interruptible - sleep until a condition gets true
451  * @wq: the waitqueue to wait on
452  * @condition: a C expression for the event to wait for
453  *
454  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
455  * @condition evaluates to true or a signal is received.
456  * The @condition is checked each time the waitqueue @wq is woken up.
457  *
458  * wake_up() has to be called after changing any variable that could
459  * change the result of the wait condition.
460  *
461  * The function will return -ERESTARTSYS if it was interrupted by a
462  * signal and 0 if @condition evaluated to true.
463  */
464 #define wait_event_interruptible(wq, condition)                         \
465 ({                                                                      \
466         int __ret = 0;                                                  \
467         might_sleep();                                                  \
468         if (!(condition))                                               \
469                 __ret = __wait_event_interruptible(wq, condition);      \
470         __ret;                                                          \
471 })
472
473 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
474         ___wait_event(wq, ___wait_cond_timeout(condition),              \
475                       TASK_INTERRUPTIBLE, 0, timeout,                   \
476                       __ret = schedule_timeout(__ret))
477
478 /**
479  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
480  * @wq: the waitqueue to wait on
481  * @condition: a C expression for the event to wait for
482  * @timeout: timeout, in jiffies
483  *
484  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
485  * @condition evaluates to true or a signal is received.
486  * The @condition is checked each time the waitqueue @wq is woken up.
487  *
488  * wake_up() has to be called after changing any variable that could
489  * change the result of the wait condition.
490  *
491  * Returns:
492  * 0 if the @condition evaluated to %false after the @timeout elapsed,
493  * 1 if the @condition evaluated to %true after the @timeout elapsed,
494  * the remaining jiffies (at least 1) if the @condition evaluated
495  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
496  * interrupted by a signal.
497  */
498 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
499 ({                                                                      \
500         long __ret = timeout;                                           \
501         might_sleep();                                                  \
502         if (!___wait_cond_timeout(condition))                           \
503                 __ret = __wait_event_interruptible_timeout(wq,          \
504                                                 condition, timeout);    \
505         __ret;                                                          \
506 })
507
508 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
509 ({                                                                      \
510         int __ret = 0;                                                  \
511         struct hrtimer_sleeper __t;                                     \
512                                                                         \
513         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
514                               HRTIMER_MODE_REL);                        \
515         hrtimer_init_sleeper(&__t, current);                            \
516         if ((timeout).tv64 != KTIME_MAX)                                \
517                 hrtimer_start_range_ns(&__t.timer, timeout,             \
518                                        current->timer_slack_ns,         \
519                                        HRTIMER_MODE_REL);               \
520                                                                         \
521         __ret = ___wait_event(wq, condition, state, 0, 0,               \
522                 if (!__t.task) {                                        \
523                         __ret = -ETIME;                                 \
524                         break;                                          \
525                 }                                                       \
526                 schedule());                                            \
527                                                                         \
528         hrtimer_cancel(&__t.timer);                                     \
529         destroy_hrtimer_on_stack(&__t.timer);                           \
530         __ret;                                                          \
531 })
532
533 /**
534  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
535  * @wq: the waitqueue to wait on
536  * @condition: a C expression for the event to wait for
537  * @timeout: timeout, as a ktime_t
538  *
539  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
540  * @condition evaluates to true or a signal is received.
541  * The @condition is checked each time the waitqueue @wq is woken up.
542  *
543  * wake_up() has to be called after changing any variable that could
544  * change the result of the wait condition.
545  *
546  * The function returns 0 if @condition became true, or -ETIME if the timeout
547  * elapsed.
548  */
549 #define wait_event_hrtimeout(wq, condition, timeout)                    \
550 ({                                                                      \
551         int __ret = 0;                                                  \
552         might_sleep();                                                  \
553         if (!(condition))                                               \
554                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
555                                                TASK_UNINTERRUPTIBLE);   \
556         __ret;                                                          \
557 })
558
559 /**
560  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
561  * @wq: the waitqueue to wait on
562  * @condition: a C expression for the event to wait for
563  * @timeout: timeout, as a ktime_t
564  *
565  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
566  * @condition evaluates to true or a signal is received.
567  * The @condition is checked each time the waitqueue @wq is woken up.
568  *
569  * wake_up() has to be called after changing any variable that could
570  * change the result of the wait condition.
571  *
572  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
573  * interrupted by a signal, or -ETIME if the timeout elapsed.
574  */
575 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
576 ({                                                                      \
577         long __ret = 0;                                                 \
578         might_sleep();                                                  \
579         if (!(condition))                                               \
580                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
581                                                TASK_INTERRUPTIBLE);     \
582         __ret;                                                          \
583 })
584
585 #define __wait_event_interruptible_exclusive(wq, condition)             \
586         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
587                       schedule())
588
589 #define wait_event_interruptible_exclusive(wq, condition)               \
590 ({                                                                      \
591         int __ret = 0;                                                  \
592         might_sleep();                                                  \
593         if (!(condition))                                               \
594                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
595         __ret;                                                          \
596 })
597
598 #define __wait_event_killable_exclusive(wq, condition)                  \
599         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,               \
600                       schedule())
601
602 #define wait_event_killable_exclusive(wq, condition)                    \
603 ({                                                                      \
604         int __ret = 0;                                                  \
605         might_sleep();                                                  \
606         if (!(condition))                                               \
607                 __ret = __wait_event_killable_exclusive(wq, condition); \
608         __ret;                                                          \
609 })
610
611
612 #define __wait_event_freezable_exclusive(wq, condition)                 \
613         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
614                         schedule(); try_to_freeze())
615
616 #define wait_event_freezable_exclusive(wq, condition)                   \
617 ({                                                                      \
618         int __ret = 0;                                                  \
619         might_sleep();                                                  \
620         if (!(condition))                                               \
621                 __ret = __wait_event_freezable_exclusive(wq, condition);\
622         __ret;                                                          \
623 })
624
625
626 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
627 ({                                                                      \
628         int __ret = 0;                                                  \
629         DEFINE_WAIT(__wait);                                            \
630         if (exclusive)                                                  \
631                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
632         do {                                                            \
633                 if (likely(list_empty(&__wait.task_list)))              \
634                         __add_wait_queue_tail(&(wq), &__wait);          \
635                 set_current_state(TASK_INTERRUPTIBLE);                  \
636                 if (signal_pending(current)) {                          \
637                         __ret = -ERESTARTSYS;                           \
638                         break;                                          \
639                 }                                                       \
640                 if (irq)                                                \
641                         spin_unlock_irq(&(wq).lock);                    \
642                 else                                                    \
643                         spin_unlock(&(wq).lock);                        \
644                 schedule();                                             \
645                 if (irq)                                                \
646                         spin_lock_irq(&(wq).lock);                      \
647                 else                                                    \
648                         spin_lock(&(wq).lock);                          \
649         } while (!(condition));                                         \
650         __remove_wait_queue(&(wq), &__wait);                            \
651         __set_current_state(TASK_RUNNING);                              \
652         __ret;                                                          \
653 })
654
655
656 /**
657  * wait_event_interruptible_locked - sleep until a condition gets true
658  * @wq: the waitqueue to wait on
659  * @condition: a C expression for the event to wait for
660  *
661  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
662  * @condition evaluates to true or a signal is received.
663  * The @condition is checked each time the waitqueue @wq is woken up.
664  *
665  * It must be called with wq.lock being held.  This spinlock is
666  * unlocked while sleeping but @condition testing is done while lock
667  * is held and when this macro exits the lock is held.
668  *
669  * The lock is locked/unlocked using spin_lock()/spin_unlock()
670  * functions which must match the way they are locked/unlocked outside
671  * of this macro.
672  *
673  * wake_up_locked() has to be called after changing any variable that could
674  * change the result of the wait condition.
675  *
676  * The function will return -ERESTARTSYS if it was interrupted by a
677  * signal and 0 if @condition evaluated to true.
678  */
679 #define wait_event_interruptible_locked(wq, condition)                  \
680         ((condition)                                                    \
681          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
682
683 /**
684  * wait_event_interruptible_locked_irq - sleep until a condition gets true
685  * @wq: the waitqueue to wait on
686  * @condition: a C expression for the event to wait for
687  *
688  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
689  * @condition evaluates to true or a signal is received.
690  * The @condition is checked each time the waitqueue @wq is woken up.
691  *
692  * It must be called with wq.lock being held.  This spinlock is
693  * unlocked while sleeping but @condition testing is done while lock
694  * is held and when this macro exits the lock is held.
695  *
696  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
697  * functions which must match the way they are locked/unlocked outside
698  * of this macro.
699  *
700  * wake_up_locked() has to be called after changing any variable that could
701  * change the result of the wait condition.
702  *
703  * The function will return -ERESTARTSYS if it was interrupted by a
704  * signal and 0 if @condition evaluated to true.
705  */
706 #define wait_event_interruptible_locked_irq(wq, condition)              \
707         ((condition)                                                    \
708          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
709
710 /**
711  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
712  * @wq: the waitqueue to wait on
713  * @condition: a C expression for the event to wait for
714  *
715  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
716  * @condition evaluates to true or a signal is received.
717  * The @condition is checked each time the waitqueue @wq is woken up.
718  *
719  * It must be called with wq.lock being held.  This spinlock is
720  * unlocked while sleeping but @condition testing is done while lock
721  * is held and when this macro exits the lock is held.
722  *
723  * The lock is locked/unlocked using spin_lock()/spin_unlock()
724  * functions which must match the way they are locked/unlocked outside
725  * of this macro.
726  *
727  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
728  * set thus when other process waits process on the list if this
729  * process is awaken further processes are not considered.
730  *
731  * wake_up_locked() has to be called after changing any variable that could
732  * change the result of the wait condition.
733  *
734  * The function will return -ERESTARTSYS if it was interrupted by a
735  * signal and 0 if @condition evaluated to true.
736  */
737 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
738         ((condition)                                                    \
739          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
740
741 /**
742  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
743  * @wq: the waitqueue to wait on
744  * @condition: a C expression for the event to wait for
745  *
746  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
747  * @condition evaluates to true or a signal is received.
748  * The @condition is checked each time the waitqueue @wq is woken up.
749  *
750  * It must be called with wq.lock being held.  This spinlock is
751  * unlocked while sleeping but @condition testing is done while lock
752  * is held and when this macro exits the lock is held.
753  *
754  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
755  * functions which must match the way they are locked/unlocked outside
756  * of this macro.
757  *
758  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
759  * set thus when other process waits process on the list if this
760  * process is awaken further processes are not considered.
761  *
762  * wake_up_locked() has to be called after changing any variable that could
763  * change the result of the wait condition.
764  *
765  * The function will return -ERESTARTSYS if it was interrupted by a
766  * signal and 0 if @condition evaluated to true.
767  */
768 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
769         ((condition)                                                    \
770          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
771
772
773 #define __wait_event_killable(wq, condition)                            \
774         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
775
776 /**
777  * wait_event_killable - sleep until a condition gets true
778  * @wq: the waitqueue to wait on
779  * @condition: a C expression for the event to wait for
780  *
781  * The process is put to sleep (TASK_KILLABLE) until the
782  * @condition evaluates to true or a signal is received.
783  * The @condition is checked each time the waitqueue @wq is woken up.
784  *
785  * wake_up() has to be called after changing any variable that could
786  * change the result of the wait condition.
787  *
788  * The function will return -ERESTARTSYS if it was interrupted by a
789  * signal and 0 if @condition evaluated to true.
790  */
791 #define wait_event_killable(wq, condition)                              \
792 ({                                                                      \
793         int __ret = 0;                                                  \
794         might_sleep();                                                  \
795         if (!(condition))                                               \
796                 __ret = __wait_event_killable(wq, condition);           \
797         __ret;                                                          \
798 })
799
800
801 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
802         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
803                             spin_unlock_irq(&lock);                     \
804                             cmd;                                        \
805                             schedule();                                 \
806                             spin_lock_irq(&lock))
807
808 /**
809  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
810  *                           condition is checked under the lock. This
811  *                           is expected to be called with the lock
812  *                           taken.
813  * @wq: the waitqueue to wait on
814  * @condition: a C expression for the event to wait for
815  * @lock: a locked spinlock_t, which will be released before cmd
816  *        and schedule() and reacquired afterwards.
817  * @cmd: a command which is invoked outside the critical section before
818  *       sleep
819  *
820  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
821  * @condition evaluates to true. The @condition is checked each time
822  * the waitqueue @wq is woken up.
823  *
824  * wake_up() has to be called after changing any variable that could
825  * change the result of the wait condition.
826  *
827  * This is supposed to be called while holding the lock. The lock is
828  * dropped before invoking the cmd and going to sleep and is reacquired
829  * afterwards.
830  */
831 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
832 do {                                                                    \
833         if (condition)                                                  \
834                 break;                                                  \
835         __wait_event_lock_irq(wq, condition, lock, cmd);                \
836 } while (0)
837
838 /**
839  * wait_event_lock_irq - sleep until a condition gets true. The
840  *                       condition is checked under the lock. This
841  *                       is expected to be called with the lock
842  *                       taken.
843  * @wq: the waitqueue to wait on
844  * @condition: a C expression for the event to wait for
845  * @lock: a locked spinlock_t, which will be released before schedule()
846  *        and reacquired afterwards.
847  *
848  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
849  * @condition evaluates to true. The @condition is checked each time
850  * the waitqueue @wq is woken up.
851  *
852  * wake_up() has to be called after changing any variable that could
853  * change the result of the wait condition.
854  *
855  * This is supposed to be called while holding the lock. The lock is
856  * dropped before going to sleep and is reacquired afterwards.
857  */
858 #define wait_event_lock_irq(wq, condition, lock)                        \
859 do {                                                                    \
860         if (condition)                                                  \
861                 break;                                                  \
862         __wait_event_lock_irq(wq, condition, lock, );                   \
863 } while (0)
864
865
866 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
867         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
868                       spin_unlock_irq(&lock);                           \
869                       cmd;                                              \
870                       schedule();                                       \
871                       spin_lock_irq(&lock))
872
873 /**
874  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
875  *              The condition is checked under the lock. This is expected to
876  *              be called with the lock taken.
877  * @wq: the waitqueue to wait on
878  * @condition: a C expression for the event to wait for
879  * @lock: a locked spinlock_t, which will be released before cmd and
880  *        schedule() and reacquired afterwards.
881  * @cmd: a command which is invoked outside the critical section before
882  *       sleep
883  *
884  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
885  * @condition evaluates to true or a signal is received. The @condition is
886  * checked each time the waitqueue @wq is woken up.
887  *
888  * wake_up() has to be called after changing any variable that could
889  * change the result of the wait condition.
890  *
891  * This is supposed to be called while holding the lock. The lock is
892  * dropped before invoking the cmd and going to sleep and is reacquired
893  * afterwards.
894  *
895  * The macro will return -ERESTARTSYS if it was interrupted by a signal
896  * and 0 if @condition evaluated to true.
897  */
898 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
899 ({                                                                      \
900         int __ret = 0;                                                  \
901         if (!(condition))                                               \
902                 __ret = __wait_event_interruptible_lock_irq(wq,         \
903                                                 condition, lock, cmd);  \
904         __ret;                                                          \
905 })
906
907 /**
908  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
909  *              The condition is checked under the lock. This is expected
910  *              to be called with the lock taken.
911  * @wq: the waitqueue to wait on
912  * @condition: a C expression for the event to wait for
913  * @lock: a locked spinlock_t, which will be released before schedule()
914  *        and reacquired afterwards.
915  *
916  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
917  * @condition evaluates to true or signal is received. The @condition is
918  * checked each time the waitqueue @wq is woken up.
919  *
920  * wake_up() has to be called after changing any variable that could
921  * change the result of the wait condition.
922  *
923  * This is supposed to be called while holding the lock. The lock is
924  * dropped before going to sleep and is reacquired afterwards.
925  *
926  * The macro will return -ERESTARTSYS if it was interrupted by a signal
927  * and 0 if @condition evaluated to true.
928  */
929 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
930 ({                                                                      \
931         int __ret = 0;                                                  \
932         if (!(condition))                                               \
933                 __ret = __wait_event_interruptible_lock_irq(wq,         \
934                                                 condition, lock,);      \
935         __ret;                                                          \
936 })
937
938 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
939                                                     lock, timeout)      \
940         ___wait_event(wq, ___wait_cond_timeout(condition),              \
941                       TASK_INTERRUPTIBLE, 0, timeout,                   \
942                       spin_unlock_irq(&lock);                           \
943                       __ret = schedule_timeout(__ret);                  \
944                       spin_lock_irq(&lock));
945
946 /**
947  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
948  *              true or a timeout elapses. The condition is checked under
949  *              the lock. This is expected to be called with the lock taken.
950  * @wq: the waitqueue to wait on
951  * @condition: a C expression for the event to wait for
952  * @lock: a locked spinlock_t, which will be released before schedule()
953  *        and reacquired afterwards.
954  * @timeout: timeout, in jiffies
955  *
956  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
957  * @condition evaluates to true or signal is received. The @condition is
958  * checked each time the waitqueue @wq is woken up.
959  *
960  * wake_up() has to be called after changing any variable that could
961  * change the result of the wait condition.
962  *
963  * This is supposed to be called while holding the lock. The lock is
964  * dropped before going to sleep and is reacquired afterwards.
965  *
966  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
967  * was interrupted by a signal, and the remaining jiffies otherwise
968  * if the condition evaluated to true before the timeout elapsed.
969  */
970 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
971                                                   timeout)              \
972 ({                                                                      \
973         long __ret = timeout;                                           \
974         if (!___wait_cond_timeout(condition))                           \
975                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
976                                         wq, condition, lock, timeout);  \
977         __ret;                                                          \
978 })
979
980 /*
981  * Waitqueues which are removed from the waitqueue_head at wakeup time
982  */
983 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
984 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
985 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
986 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
987 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, void *key);
988 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
989 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
990 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
991 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
992
993 #define DEFINE_WAIT_FUNC(name, function)                                \
994         wait_queue_t name = {                                           \
995                 .private        = current,                              \
996                 .func           = function,                             \
997                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
998         }
999
1000 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1001
1002 #define DEFINE_WAIT_BIT(name, word, bit)                                \
1003         struct wait_bit_queue name = {                                  \
1004                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
1005                 .wait   = {                                             \
1006                         .private        = current,                      \
1007                         .func           = wake_bit_function,            \
1008                         .task_list      =                               \
1009                                 LIST_HEAD_INIT((name).wait.task_list),  \
1010                 },                                                      \
1011         }
1012
1013 #define init_wait(wait)                                                 \
1014         do {                                                            \
1015                 (wait)->private = current;                              \
1016                 (wait)->func = autoremove_wake_function;                \
1017                 INIT_LIST_HEAD(&(wait)->task_list);                     \
1018                 (wait)->flags = 0;                                      \
1019         } while (0)
1020
1021
1022 extern int bit_wait(struct wait_bit_key *, int);
1023 extern int bit_wait_io(struct wait_bit_key *, int);
1024 extern int bit_wait_timeout(struct wait_bit_key *, int);
1025 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1026
1027 /**
1028  * wait_on_bit - wait for a bit to be cleared
1029  * @word: the word being waited on, a kernel virtual address
1030  * @bit: the bit of the word being waited on
1031  * @mode: the task state to sleep in
1032  *
1033  * There is a standard hashed waitqueue table for generic use. This
1034  * is the part of the hashtable's accessor API that waits on a bit.
1035  * For instance, if one were to have waiters on a bitflag, one would
1036  * call wait_on_bit() in threads waiting for the bit to clear.
1037  * One uses wait_on_bit() where one is waiting for the bit to clear,
1038  * but has no intention of setting it.
1039  * Returned value will be zero if the bit was cleared, or non-zero
1040  * if the process received a signal and the mode permitted wakeup
1041  * on that signal.
1042  */
1043 static inline int
1044 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1045 {
1046         might_sleep();
1047         if (!test_bit(bit, word))
1048                 return 0;
1049         return out_of_line_wait_on_bit(word, bit,
1050                                        bit_wait,
1051                                        mode);
1052 }
1053
1054 /**
1055  * wait_on_bit_io - wait for a bit to be cleared
1056  * @word: the word being waited on, a kernel virtual address
1057  * @bit: the bit of the word being waited on
1058  * @mode: the task state to sleep in
1059  *
1060  * Use the standard hashed waitqueue table to wait for a bit
1061  * to be cleared.  This is similar to wait_on_bit(), but calls
1062  * io_schedule() instead of schedule() for the actual waiting.
1063  *
1064  * Returned value will be zero if the bit was cleared, or non-zero
1065  * if the process received a signal and the mode permitted wakeup
1066  * on that signal.
1067  */
1068 static inline int
1069 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1070 {
1071         might_sleep();
1072         if (!test_bit(bit, word))
1073                 return 0;
1074         return out_of_line_wait_on_bit(word, bit,
1075                                        bit_wait_io,
1076                                        mode);
1077 }
1078
1079 /**
1080  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1081  * @word: the word being waited on, a kernel virtual address
1082  * @bit: the bit of the word being waited on
1083  * @mode: the task state to sleep in
1084  * @timeout: timeout, in jiffies
1085  *
1086  * Use the standard hashed waitqueue table to wait for a bit
1087  * to be cleared. This is similar to wait_on_bit(), except also takes a
1088  * timeout parameter.
1089  *
1090  * Returned value will be zero if the bit was cleared before the
1091  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1092  * received a signal and the mode permitted wakeup on that signal.
1093  */
1094 static inline int
1095 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1096                     unsigned long timeout)
1097 {
1098         might_sleep();
1099         if (!test_bit(bit, word))
1100                 return 0;
1101         return out_of_line_wait_on_bit_timeout(word, bit,
1102                                                bit_wait_timeout,
1103                                                mode, timeout);
1104 }
1105
1106 /**
1107  * wait_on_bit_action - wait for a bit to be cleared
1108  * @word: the word being waited on, a kernel virtual address
1109  * @bit: the bit of the word being waited on
1110  * @action: the function used to sleep, which may take special actions
1111  * @mode: the task state to sleep in
1112  *
1113  * Use the standard hashed waitqueue table to wait for a bit
1114  * to be cleared, and allow the waiting action to be specified.
1115  * This is like wait_on_bit() but allows fine control of how the waiting
1116  * is done.
1117  *
1118  * Returned value will be zero if the bit was cleared, or non-zero
1119  * if the process received a signal and the mode permitted wakeup
1120  * on that signal.
1121  */
1122 static inline int
1123 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1124                    unsigned mode)
1125 {
1126         might_sleep();
1127         if (!test_bit(bit, word))
1128                 return 0;
1129         return out_of_line_wait_on_bit(word, bit, action, mode);
1130 }
1131
1132 /**
1133  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1134  * @word: the word being waited on, a kernel virtual address
1135  * @bit: the bit of the word being waited on
1136  * @mode: the task state to sleep in
1137  *
1138  * There is a standard hashed waitqueue table for generic use. This
1139  * is the part of the hashtable's accessor API that waits on a bit
1140  * when one intends to set it, for instance, trying to lock bitflags.
1141  * For instance, if one were to have waiters trying to set bitflag
1142  * and waiting for it to clear before setting it, one would call
1143  * wait_on_bit() in threads waiting to be able to set the bit.
1144  * One uses wait_on_bit_lock() where one is waiting for the bit to
1145  * clear with the intention of setting it, and when done, clearing it.
1146  *
1147  * Returns zero if the bit was (eventually) found to be clear and was
1148  * set.  Returns non-zero if a signal was delivered to the process and
1149  * the @mode allows that signal to wake the process.
1150  */
1151 static inline int
1152 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1153 {
1154         might_sleep();
1155         if (!test_and_set_bit(bit, word))
1156                 return 0;
1157         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1158 }
1159
1160 /**
1161  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1162  * @word: the word being waited on, a kernel virtual address
1163  * @bit: the bit of the word being waited on
1164  * @mode: the task state to sleep in
1165  *
1166  * Use the standard hashed waitqueue table to wait for a bit
1167  * to be cleared and then to atomically set it.  This is similar
1168  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1169  * for the actual waiting.
1170  *
1171  * Returns zero if the bit was (eventually) found to be clear and was
1172  * set.  Returns non-zero if a signal was delivered to the process and
1173  * the @mode allows that signal to wake the process.
1174  */
1175 static inline int
1176 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1177 {
1178         might_sleep();
1179         if (!test_and_set_bit(bit, word))
1180                 return 0;
1181         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1182 }
1183
1184 /**
1185  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1186  * @word: the word being waited on, a kernel virtual address
1187  * @bit: the bit of the word being waited on
1188  * @action: the function used to sleep, which may take special actions
1189  * @mode: the task state to sleep in
1190  *
1191  * Use the standard hashed waitqueue table to wait for a bit
1192  * to be cleared and then to set it, and allow the waiting action
1193  * to be specified.
1194  * This is like wait_on_bit() but allows fine control of how the waiting
1195  * is done.
1196  *
1197  * Returns zero if the bit was (eventually) found to be clear and was
1198  * set.  Returns non-zero if a signal was delivered to the process and
1199  * the @mode allows that signal to wake the process.
1200  */
1201 static inline int
1202 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1203                         unsigned mode)
1204 {
1205         might_sleep();
1206         if (!test_and_set_bit(bit, word))
1207                 return 0;
1208         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1209 }
1210
1211 /**
1212  * wait_on_atomic_t - Wait for an atomic_t to become 0
1213  * @val: The atomic value being waited on, a kernel virtual address
1214  * @action: the function used to sleep, which may take special actions
1215  * @mode: the task state to sleep in
1216  *
1217  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1218  * the purpose of getting a waitqueue, but we set the key to a bit number
1219  * outside of the target 'word'.
1220  */
1221 static inline
1222 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1223 {
1224         might_sleep();
1225         if (atomic_read(val) == 0)
1226                 return 0;
1227         return out_of_line_wait_on_atomic_t(val, action, mode);
1228 }
1229
1230 #endif /* _LINUX_WAIT_H */