Merge tag 'sh-for-4.9' of git://git.libc.org/linux-sh
[cascardo/linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE       0x01
18 #define WQ_FLAG_WOKEN           0x02
19
20 struct __wait_queue {
21         unsigned int            flags;
22         void                    *private;
23         wait_queue_func_t       func;
24         struct list_head        task_list;
25 };
26
27 struct wait_bit_key {
28         void                    *flags;
29         int                     bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR    -1
31         unsigned long           timeout;
32 };
33
34 struct wait_bit_queue {
35         struct wait_bit_key     key;
36         wait_queue_t            wait;
37 };
38
39 struct __wait_queue_head {
40         spinlock_t              lock;
41         struct list_head        task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44
45 struct task_struct;
46
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
52         .private        = tsk,                                          \
53         .func           = default_wake_function,                        \
54         .task_list      = { NULL, NULL } }
55
56 #define DECLARE_WAITQUEUE(name, tsk)                                    \
57         wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
60         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
61         .task_list      = { &(name).task_list, &(name).task_list } }
62
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
67         { .flags = word, .bit_nr = bit, }
68
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
70         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74 #define init_waitqueue_head(q)                          \
75         do {                                            \
76                 static struct lock_class_key __key;     \
77                                                         \
78                 __init_waitqueue_head((q), #q, &__key); \
79         } while (0)
80
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83         ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92         q->flags        = 0;
93         q->private      = p;
94         q->func         = default_wake_function;
95 }
96
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100         q->flags        = 0;
101         q->private      = NULL;
102         q->func         = func;
103 }
104
105 /**
106  * waitqueue_active -- locklessly test for waiters on the queue
107  * @q: the waitqueue to test for waiters
108  *
109  * returns true if the wait list is not empty
110  *
111  * NOTE: this function is lockless and requires care, incorrect usage _will_
112  * lead to sporadic and non-obvious failure.
113  *
114  * Use either while holding wait_queue_head_t::lock or when used for wakeups
115  * with an extra smp_mb() like:
116  *
117  *      CPU0 - waker                    CPU1 - waiter
118  *
119  *                                      for (;;) {
120  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
121  *      smp_mb();                         // smp_mb() from set_current_state()
122  *      if (waitqueue_active(wq))         if (@cond)
123  *        wake_up(wq);                      break;
124  *                                        schedule();
125  *                                      }
126  *                                      finish_wait(&wq, &wait);
127  *
128  * Because without the explicit smp_mb() it's possible for the
129  * waitqueue_active() load to get hoisted over the @cond store such that we'll
130  * observe an empty wait list while the waiter might not observe @cond.
131  *
132  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133  * which (when the lock is uncontended) are of roughly equal cost.
134  */
135 static inline int waitqueue_active(wait_queue_head_t *q)
136 {
137         return !list_empty(&q->task_list);
138 }
139
140 /**
141  * wq_has_sleeper - check if there are any waiting processes
142  * @wq: wait queue head
143  *
144  * Returns true if wq has waiting processes
145  *
146  * Please refer to the comment for waitqueue_active.
147  */
148 static inline bool wq_has_sleeper(wait_queue_head_t *wq)
149 {
150         /*
151          * We need to be sure we are in sync with the
152          * add_wait_queue modifications to the wait queue.
153          *
154          * This memory barrier should be paired with one on the
155          * waiting side.
156          */
157         smp_mb();
158         return waitqueue_active(wq);
159 }
160
161 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
162 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
163 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
164
165 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
166 {
167         list_add(&new->task_list, &head->task_list);
168 }
169
170 /*
171  * Used for wake-one threads:
172  */
173 static inline void
174 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
175 {
176         wait->flags |= WQ_FLAG_EXCLUSIVE;
177         __add_wait_queue(q, wait);
178 }
179
180 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
181                                          wait_queue_t *new)
182 {
183         list_add_tail(&new->task_list, &head->task_list);
184 }
185
186 static inline void
187 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
188 {
189         wait->flags |= WQ_FLAG_EXCLUSIVE;
190         __add_wait_queue_tail(q, wait);
191 }
192
193 static inline void
194 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
195 {
196         list_del(&old->task_list);
197 }
198
199 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
200 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
202 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
203 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
204 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
205 void __wake_up_bit(wait_queue_head_t *, void *, int);
206 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
207 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208 void wake_up_bit(void *, int);
209 void wake_up_atomic_t(atomic_t *);
210 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
211 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
212 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
213 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
214 wait_queue_head_t *bit_waitqueue(void *, int);
215
216 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
217 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
218 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
219 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
220 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
221
222 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
223 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
224 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
225 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
226
227 /*
228  * Wakeup macros to be used to report events to the targets.
229  */
230 #define wake_up_poll(x, m)                                              \
231         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
232 #define wake_up_locked_poll(x, m)                                       \
233         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
234 #define wake_up_interruptible_poll(x, m)                                \
235         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
236 #define wake_up_interruptible_sync_poll(x, m)                           \
237         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
238
239 #define ___wait_cond_timeout(condition)                                 \
240 ({                                                                      \
241         bool __cond = (condition);                                      \
242         if (__cond && !__ret)                                           \
243                 __ret = 1;                                              \
244         __cond || !__ret;                                               \
245 })
246
247 #define ___wait_is_interruptible(state)                                 \
248         (!__builtin_constant_p(state) ||                                \
249                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
250
251 extern void init_wait_entry(wait_queue_t *__wait, int flags);
252
253 /*
254  * The below macro ___wait_event() has an explicit shadow of the __ret
255  * variable when used from the wait_event_*() macros.
256  *
257  * This is so that both can use the ___wait_cond_timeout() construct
258  * to wrap the condition.
259  *
260  * The type inconsistency of the wait_event_*() __ret variable is also
261  * on purpose; we use long where we can return timeout values and int
262  * otherwise.
263  */
264
265 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
266 ({                                                                      \
267         __label__ __out;                                                \
268         wait_queue_t __wait;                                            \
269         long __ret = ret;       /* explicit shadow */                   \
270                                                                         \
271         init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0);    \
272         for (;;) {                                                      \
273                 long __int = prepare_to_wait_event(&wq, &__wait, state);\
274                                                                         \
275                 if (condition)                                          \
276                         break;                                          \
277                                                                         \
278                 if (___wait_is_interruptible(state) && __int) {         \
279                         __ret = __int;                                  \
280                         goto __out;                                     \
281                 }                                                       \
282                                                                         \
283                 cmd;                                                    \
284         }                                                               \
285         finish_wait(&wq, &__wait);                                      \
286 __out:  __ret;                                                          \
287 })
288
289 #define __wait_event(wq, condition)                                     \
290         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
291                             schedule())
292
293 /**
294  * wait_event - sleep until a condition gets true
295  * @wq: the waitqueue to wait on
296  * @condition: a C expression for the event to wait for
297  *
298  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
299  * @condition evaluates to true. The @condition is checked each time
300  * the waitqueue @wq is woken up.
301  *
302  * wake_up() has to be called after changing any variable that could
303  * change the result of the wait condition.
304  */
305 #define wait_event(wq, condition)                                       \
306 do {                                                                    \
307         might_sleep();                                                  \
308         if (condition)                                                  \
309                 break;                                                  \
310         __wait_event(wq, condition);                                    \
311 } while (0)
312
313 #define __io_wait_event(wq, condition)                                  \
314         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
315                             io_schedule())
316
317 /*
318  * io_wait_event() -- like wait_event() but with io_schedule()
319  */
320 #define io_wait_event(wq, condition)                                    \
321 do {                                                                    \
322         might_sleep();                                                  \
323         if (condition)                                                  \
324                 break;                                                  \
325         __io_wait_event(wq, condition);                                 \
326 } while (0)
327
328 #define __wait_event_freezable(wq, condition)                           \
329         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
330                             schedule(); try_to_freeze())
331
332 /**
333  * wait_event_freezable - sleep (or freeze) until a condition gets true
334  * @wq: the waitqueue to wait on
335  * @condition: a C expression for the event to wait for
336  *
337  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
338  * to system load) until the @condition evaluates to true. The
339  * @condition is checked each time the waitqueue @wq is woken up.
340  *
341  * wake_up() has to be called after changing any variable that could
342  * change the result of the wait condition.
343  */
344 #define wait_event_freezable(wq, condition)                             \
345 ({                                                                      \
346         int __ret = 0;                                                  \
347         might_sleep();                                                  \
348         if (!(condition))                                               \
349                 __ret = __wait_event_freezable(wq, condition);          \
350         __ret;                                                          \
351 })
352
353 #define __wait_event_timeout(wq, condition, timeout)                    \
354         ___wait_event(wq, ___wait_cond_timeout(condition),              \
355                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
356                       __ret = schedule_timeout(__ret))
357
358 /**
359  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
360  * @wq: the waitqueue to wait on
361  * @condition: a C expression for the event to wait for
362  * @timeout: timeout, in jiffies
363  *
364  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
365  * @condition evaluates to true. The @condition is checked each time
366  * the waitqueue @wq is woken up.
367  *
368  * wake_up() has to be called after changing any variable that could
369  * change the result of the wait condition.
370  *
371  * Returns:
372  * 0 if the @condition evaluated to %false after the @timeout elapsed,
373  * 1 if the @condition evaluated to %true after the @timeout elapsed,
374  * or the remaining jiffies (at least 1) if the @condition evaluated
375  * to %true before the @timeout elapsed.
376  */
377 #define wait_event_timeout(wq, condition, timeout)                      \
378 ({                                                                      \
379         long __ret = timeout;                                           \
380         might_sleep();                                                  \
381         if (!___wait_cond_timeout(condition))                           \
382                 __ret = __wait_event_timeout(wq, condition, timeout);   \
383         __ret;                                                          \
384 })
385
386 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
387         ___wait_event(wq, ___wait_cond_timeout(condition),              \
388                       TASK_INTERRUPTIBLE, 0, timeout,                   \
389                       __ret = schedule_timeout(__ret); try_to_freeze())
390
391 /*
392  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
393  * increasing load and is freezable.
394  */
395 #define wait_event_freezable_timeout(wq, condition, timeout)            \
396 ({                                                                      \
397         long __ret = timeout;                                           \
398         might_sleep();                                                  \
399         if (!___wait_cond_timeout(condition))                           \
400                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
401         __ret;                                                          \
402 })
403
404 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
405         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
406                             cmd1; schedule(); cmd2)
407 /*
408  * Just like wait_event_cmd(), except it sets exclusive flag
409  */
410 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
411 do {                                                                    \
412         if (condition)                                                  \
413                 break;                                                  \
414         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
415 } while (0)
416
417 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
418         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
419                             cmd1; schedule(); cmd2)
420
421 /**
422  * wait_event_cmd - sleep until a condition gets true
423  * @wq: the waitqueue to wait on
424  * @condition: a C expression for the event to wait for
425  * @cmd1: the command will be executed before sleep
426  * @cmd2: the command will be executed after sleep
427  *
428  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
429  * @condition evaluates to true. The @condition is checked each time
430  * the waitqueue @wq is woken up.
431  *
432  * wake_up() has to be called after changing any variable that could
433  * change the result of the wait condition.
434  */
435 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
436 do {                                                                    \
437         if (condition)                                                  \
438                 break;                                                  \
439         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
440 } while (0)
441
442 #define __wait_event_interruptible(wq, condition)                       \
443         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
444                       schedule())
445
446 /**
447  * wait_event_interruptible - sleep until a condition gets true
448  * @wq: the waitqueue to wait on
449  * @condition: a C expression for the event to wait for
450  *
451  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
452  * @condition evaluates to true or a signal is received.
453  * The @condition is checked each time the waitqueue @wq is woken up.
454  *
455  * wake_up() has to be called after changing any variable that could
456  * change the result of the wait condition.
457  *
458  * The function will return -ERESTARTSYS if it was interrupted by a
459  * signal and 0 if @condition evaluated to true.
460  */
461 #define wait_event_interruptible(wq, condition)                         \
462 ({                                                                      \
463         int __ret = 0;                                                  \
464         might_sleep();                                                  \
465         if (!(condition))                                               \
466                 __ret = __wait_event_interruptible(wq, condition);      \
467         __ret;                                                          \
468 })
469
470 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
471         ___wait_event(wq, ___wait_cond_timeout(condition),              \
472                       TASK_INTERRUPTIBLE, 0, timeout,                   \
473                       __ret = schedule_timeout(__ret))
474
475 /**
476  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
477  * @wq: the waitqueue to wait on
478  * @condition: a C expression for the event to wait for
479  * @timeout: timeout, in jiffies
480  *
481  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
482  * @condition evaluates to true or a signal is received.
483  * The @condition is checked each time the waitqueue @wq is woken up.
484  *
485  * wake_up() has to be called after changing any variable that could
486  * change the result of the wait condition.
487  *
488  * Returns:
489  * 0 if the @condition evaluated to %false after the @timeout elapsed,
490  * 1 if the @condition evaluated to %true after the @timeout elapsed,
491  * the remaining jiffies (at least 1) if the @condition evaluated
492  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
493  * interrupted by a signal.
494  */
495 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
496 ({                                                                      \
497         long __ret = timeout;                                           \
498         might_sleep();                                                  \
499         if (!___wait_cond_timeout(condition))                           \
500                 __ret = __wait_event_interruptible_timeout(wq,          \
501                                                 condition, timeout);    \
502         __ret;                                                          \
503 })
504
505 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
506 ({                                                                      \
507         int __ret = 0;                                                  \
508         struct hrtimer_sleeper __t;                                     \
509                                                                         \
510         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
511                               HRTIMER_MODE_REL);                        \
512         hrtimer_init_sleeper(&__t, current);                            \
513         if ((timeout).tv64 != KTIME_MAX)                                \
514                 hrtimer_start_range_ns(&__t.timer, timeout,             \
515                                        current->timer_slack_ns,         \
516                                        HRTIMER_MODE_REL);               \
517                                                                         \
518         __ret = ___wait_event(wq, condition, state, 0, 0,               \
519                 if (!__t.task) {                                        \
520                         __ret = -ETIME;                                 \
521                         break;                                          \
522                 }                                                       \
523                 schedule());                                            \
524                                                                         \
525         hrtimer_cancel(&__t.timer);                                     \
526         destroy_hrtimer_on_stack(&__t.timer);                           \
527         __ret;                                                          \
528 })
529
530 /**
531  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
532  * @wq: the waitqueue to wait on
533  * @condition: a C expression for the event to wait for
534  * @timeout: timeout, as a ktime_t
535  *
536  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
537  * @condition evaluates to true or a signal is received.
538  * The @condition is checked each time the waitqueue @wq is woken up.
539  *
540  * wake_up() has to be called after changing any variable that could
541  * change the result of the wait condition.
542  *
543  * The function returns 0 if @condition became true, or -ETIME if the timeout
544  * elapsed.
545  */
546 #define wait_event_hrtimeout(wq, condition, timeout)                    \
547 ({                                                                      \
548         int __ret = 0;                                                  \
549         might_sleep();                                                  \
550         if (!(condition))                                               \
551                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
552                                                TASK_UNINTERRUPTIBLE);   \
553         __ret;                                                          \
554 })
555
556 /**
557  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
558  * @wq: the waitqueue to wait on
559  * @condition: a C expression for the event to wait for
560  * @timeout: timeout, as a ktime_t
561  *
562  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
563  * @condition evaluates to true or a signal is received.
564  * The @condition is checked each time the waitqueue @wq is woken up.
565  *
566  * wake_up() has to be called after changing any variable that could
567  * change the result of the wait condition.
568  *
569  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
570  * interrupted by a signal, or -ETIME if the timeout elapsed.
571  */
572 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
573 ({                                                                      \
574         long __ret = 0;                                                 \
575         might_sleep();                                                  \
576         if (!(condition))                                               \
577                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
578                                                TASK_INTERRUPTIBLE);     \
579         __ret;                                                          \
580 })
581
582 #define __wait_event_interruptible_exclusive(wq, condition)             \
583         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
584                       schedule())
585
586 #define wait_event_interruptible_exclusive(wq, condition)               \
587 ({                                                                      \
588         int __ret = 0;                                                  \
589         might_sleep();                                                  \
590         if (!(condition))                                               \
591                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
592         __ret;                                                          \
593 })
594
595 #define __wait_event_killable_exclusive(wq, condition)                  \
596         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,               \
597                       schedule())
598
599 #define wait_event_killable_exclusive(wq, condition)                    \
600 ({                                                                      \
601         int __ret = 0;                                                  \
602         might_sleep();                                                  \
603         if (!(condition))                                               \
604                 __ret = __wait_event_killable_exclusive(wq, condition); \
605         __ret;                                                          \
606 })
607
608
609 #define __wait_event_freezable_exclusive(wq, condition)                 \
610         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
611                         schedule(); try_to_freeze())
612
613 #define wait_event_freezable_exclusive(wq, condition)                   \
614 ({                                                                      \
615         int __ret = 0;                                                  \
616         might_sleep();                                                  \
617         if (!(condition))                                               \
618                 __ret = __wait_event_freezable_exclusive(wq, condition);\
619         __ret;                                                          \
620 })
621
622
623 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
624 ({                                                                      \
625         int __ret = 0;                                                  \
626         DEFINE_WAIT(__wait);                                            \
627         if (exclusive)                                                  \
628                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
629         do {                                                            \
630                 if (likely(list_empty(&__wait.task_list)))              \
631                         __add_wait_queue_tail(&(wq), &__wait);          \
632                 set_current_state(TASK_INTERRUPTIBLE);                  \
633                 if (signal_pending(current)) {                          \
634                         __ret = -ERESTARTSYS;                           \
635                         break;                                          \
636                 }                                                       \
637                 if (irq)                                                \
638                         spin_unlock_irq(&(wq).lock);                    \
639                 else                                                    \
640                         spin_unlock(&(wq).lock);                        \
641                 schedule();                                             \
642                 if (irq)                                                \
643                         spin_lock_irq(&(wq).lock);                      \
644                 else                                                    \
645                         spin_lock(&(wq).lock);                          \
646         } while (!(condition));                                         \
647         __remove_wait_queue(&(wq), &__wait);                            \
648         __set_current_state(TASK_RUNNING);                              \
649         __ret;                                                          \
650 })
651
652
653 /**
654  * wait_event_interruptible_locked - sleep until a condition gets true
655  * @wq: the waitqueue to wait on
656  * @condition: a C expression for the event to wait for
657  *
658  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
659  * @condition evaluates to true or a signal is received.
660  * The @condition is checked each time the waitqueue @wq is woken up.
661  *
662  * It must be called with wq.lock being held.  This spinlock is
663  * unlocked while sleeping but @condition testing is done while lock
664  * is held and when this macro exits the lock is held.
665  *
666  * The lock is locked/unlocked using spin_lock()/spin_unlock()
667  * functions which must match the way they are locked/unlocked outside
668  * of this macro.
669  *
670  * wake_up_locked() has to be called after changing any variable that could
671  * change the result of the wait condition.
672  *
673  * The function will return -ERESTARTSYS if it was interrupted by a
674  * signal and 0 if @condition evaluated to true.
675  */
676 #define wait_event_interruptible_locked(wq, condition)                  \
677         ((condition)                                                    \
678          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
679
680 /**
681  * wait_event_interruptible_locked_irq - sleep until a condition gets true
682  * @wq: the waitqueue to wait on
683  * @condition: a C expression for the event to wait for
684  *
685  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
686  * @condition evaluates to true or a signal is received.
687  * The @condition is checked each time the waitqueue @wq is woken up.
688  *
689  * It must be called with wq.lock being held.  This spinlock is
690  * unlocked while sleeping but @condition testing is done while lock
691  * is held and when this macro exits the lock is held.
692  *
693  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
694  * functions which must match the way they are locked/unlocked outside
695  * of this macro.
696  *
697  * wake_up_locked() has to be called after changing any variable that could
698  * change the result of the wait condition.
699  *
700  * The function will return -ERESTARTSYS if it was interrupted by a
701  * signal and 0 if @condition evaluated to true.
702  */
703 #define wait_event_interruptible_locked_irq(wq, condition)              \
704         ((condition)                                                    \
705          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
706
707 /**
708  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
709  * @wq: the waitqueue to wait on
710  * @condition: a C expression for the event to wait for
711  *
712  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
713  * @condition evaluates to true or a signal is received.
714  * The @condition is checked each time the waitqueue @wq is woken up.
715  *
716  * It must be called with wq.lock being held.  This spinlock is
717  * unlocked while sleeping but @condition testing is done while lock
718  * is held and when this macro exits the lock is held.
719  *
720  * The lock is locked/unlocked using spin_lock()/spin_unlock()
721  * functions which must match the way they are locked/unlocked outside
722  * of this macro.
723  *
724  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
725  * set thus when other process waits process on the list if this
726  * process is awaken further processes are not considered.
727  *
728  * wake_up_locked() has to be called after changing any variable that could
729  * change the result of the wait condition.
730  *
731  * The function will return -ERESTARTSYS if it was interrupted by a
732  * signal and 0 if @condition evaluated to true.
733  */
734 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
735         ((condition)                                                    \
736          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
737
738 /**
739  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
740  * @wq: the waitqueue to wait on
741  * @condition: a C expression for the event to wait for
742  *
743  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
744  * @condition evaluates to true or a signal is received.
745  * The @condition is checked each time the waitqueue @wq is woken up.
746  *
747  * It must be called with wq.lock being held.  This spinlock is
748  * unlocked while sleeping but @condition testing is done while lock
749  * is held and when this macro exits the lock is held.
750  *
751  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
752  * functions which must match the way they are locked/unlocked outside
753  * of this macro.
754  *
755  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
756  * set thus when other process waits process on the list if this
757  * process is awaken further processes are not considered.
758  *
759  * wake_up_locked() has to be called after changing any variable that could
760  * change the result of the wait condition.
761  *
762  * The function will return -ERESTARTSYS if it was interrupted by a
763  * signal and 0 if @condition evaluated to true.
764  */
765 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
766         ((condition)                                                    \
767          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
768
769
770 #define __wait_event_killable(wq, condition)                            \
771         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
772
773 /**
774  * wait_event_killable - sleep until a condition gets true
775  * @wq: the waitqueue to wait on
776  * @condition: a C expression for the event to wait for
777  *
778  * The process is put to sleep (TASK_KILLABLE) until the
779  * @condition evaluates to true or a signal is received.
780  * The @condition is checked each time the waitqueue @wq is woken up.
781  *
782  * wake_up() has to be called after changing any variable that could
783  * change the result of the wait condition.
784  *
785  * The function will return -ERESTARTSYS if it was interrupted by a
786  * signal and 0 if @condition evaluated to true.
787  */
788 #define wait_event_killable(wq, condition)                              \
789 ({                                                                      \
790         int __ret = 0;                                                  \
791         might_sleep();                                                  \
792         if (!(condition))                                               \
793                 __ret = __wait_event_killable(wq, condition);           \
794         __ret;                                                          \
795 })
796
797
798 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
799         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
800                             spin_unlock_irq(&lock);                     \
801                             cmd;                                        \
802                             schedule();                                 \
803                             spin_lock_irq(&lock))
804
805 /**
806  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
807  *                           condition is checked under the lock. This
808  *                           is expected to be called with the lock
809  *                           taken.
810  * @wq: the waitqueue to wait on
811  * @condition: a C expression for the event to wait for
812  * @lock: a locked spinlock_t, which will be released before cmd
813  *        and schedule() and reacquired afterwards.
814  * @cmd: a command which is invoked outside the critical section before
815  *       sleep
816  *
817  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
818  * @condition evaluates to true. The @condition is checked each time
819  * the waitqueue @wq is woken up.
820  *
821  * wake_up() has to be called after changing any variable that could
822  * change the result of the wait condition.
823  *
824  * This is supposed to be called while holding the lock. The lock is
825  * dropped before invoking the cmd and going to sleep and is reacquired
826  * afterwards.
827  */
828 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
829 do {                                                                    \
830         if (condition)                                                  \
831                 break;                                                  \
832         __wait_event_lock_irq(wq, condition, lock, cmd);                \
833 } while (0)
834
835 /**
836  * wait_event_lock_irq - sleep until a condition gets true. The
837  *                       condition is checked under the lock. This
838  *                       is expected to be called with the lock
839  *                       taken.
840  * @wq: the waitqueue to wait on
841  * @condition: a C expression for the event to wait for
842  * @lock: a locked spinlock_t, which will be released before schedule()
843  *        and reacquired afterwards.
844  *
845  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
846  * @condition evaluates to true. The @condition is checked each time
847  * the waitqueue @wq is woken up.
848  *
849  * wake_up() has to be called after changing any variable that could
850  * change the result of the wait condition.
851  *
852  * This is supposed to be called while holding the lock. The lock is
853  * dropped before going to sleep and is reacquired afterwards.
854  */
855 #define wait_event_lock_irq(wq, condition, lock)                        \
856 do {                                                                    \
857         if (condition)                                                  \
858                 break;                                                  \
859         __wait_event_lock_irq(wq, condition, lock, );                   \
860 } while (0)
861
862
863 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
864         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
865                       spin_unlock_irq(&lock);                           \
866                       cmd;                                              \
867                       schedule();                                       \
868                       spin_lock_irq(&lock))
869
870 /**
871  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
872  *              The condition is checked under the lock. This is expected to
873  *              be called with the lock taken.
874  * @wq: the waitqueue to wait on
875  * @condition: a C expression for the event to wait for
876  * @lock: a locked spinlock_t, which will be released before cmd and
877  *        schedule() and reacquired afterwards.
878  * @cmd: a command which is invoked outside the critical section before
879  *       sleep
880  *
881  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
882  * @condition evaluates to true or a signal is received. The @condition is
883  * checked each time the waitqueue @wq is woken up.
884  *
885  * wake_up() has to be called after changing any variable that could
886  * change the result of the wait condition.
887  *
888  * This is supposed to be called while holding the lock. The lock is
889  * dropped before invoking the cmd and going to sleep and is reacquired
890  * afterwards.
891  *
892  * The macro will return -ERESTARTSYS if it was interrupted by a signal
893  * and 0 if @condition evaluated to true.
894  */
895 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
896 ({                                                                      \
897         int __ret = 0;                                                  \
898         if (!(condition))                                               \
899                 __ret = __wait_event_interruptible_lock_irq(wq,         \
900                                                 condition, lock, cmd);  \
901         __ret;                                                          \
902 })
903
904 /**
905  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
906  *              The condition is checked under the lock. This is expected
907  *              to be called with the lock taken.
908  * @wq: the waitqueue to wait on
909  * @condition: a C expression for the event to wait for
910  * @lock: a locked spinlock_t, which will be released before schedule()
911  *        and reacquired afterwards.
912  *
913  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
914  * @condition evaluates to true or signal is received. The @condition is
915  * checked each time the waitqueue @wq is woken up.
916  *
917  * wake_up() has to be called after changing any variable that could
918  * change the result of the wait condition.
919  *
920  * This is supposed to be called while holding the lock. The lock is
921  * dropped before going to sleep and is reacquired afterwards.
922  *
923  * The macro will return -ERESTARTSYS if it was interrupted by a signal
924  * and 0 if @condition evaluated to true.
925  */
926 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
927 ({                                                                      \
928         int __ret = 0;                                                  \
929         if (!(condition))                                               \
930                 __ret = __wait_event_interruptible_lock_irq(wq,         \
931                                                 condition, lock,);      \
932         __ret;                                                          \
933 })
934
935 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
936                                                     lock, timeout)      \
937         ___wait_event(wq, ___wait_cond_timeout(condition),              \
938                       TASK_INTERRUPTIBLE, 0, timeout,                   \
939                       spin_unlock_irq(&lock);                           \
940                       __ret = schedule_timeout(__ret);                  \
941                       spin_lock_irq(&lock));
942
943 /**
944  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
945  *              true or a timeout elapses. The condition is checked under
946  *              the lock. This is expected to be called with the lock taken.
947  * @wq: the waitqueue to wait on
948  * @condition: a C expression for the event to wait for
949  * @lock: a locked spinlock_t, which will be released before schedule()
950  *        and reacquired afterwards.
951  * @timeout: timeout, in jiffies
952  *
953  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
954  * @condition evaluates to true or signal is received. The @condition is
955  * checked each time the waitqueue @wq is woken up.
956  *
957  * wake_up() has to be called after changing any variable that could
958  * change the result of the wait condition.
959  *
960  * This is supposed to be called while holding the lock. The lock is
961  * dropped before going to sleep and is reacquired afterwards.
962  *
963  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
964  * was interrupted by a signal, and the remaining jiffies otherwise
965  * if the condition evaluated to true before the timeout elapsed.
966  */
967 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
968                                                   timeout)              \
969 ({                                                                      \
970         long __ret = timeout;                                           \
971         if (!___wait_cond_timeout(condition))                           \
972                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
973                                         wq, condition, lock, timeout);  \
974         __ret;                                                          \
975 })
976
977 /*
978  * Waitqueues which are removed from the waitqueue_head at wakeup time
979  */
980 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
981 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
982 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
983 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
984 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
985 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
986 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
987 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
988
989 #define DEFINE_WAIT_FUNC(name, function)                                \
990         wait_queue_t name = {                                           \
991                 .private        = current,                              \
992                 .func           = function,                             \
993                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
994         }
995
996 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
997
998 #define DEFINE_WAIT_BIT(name, word, bit)                                \
999         struct wait_bit_queue name = {                                  \
1000                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
1001                 .wait   = {                                             \
1002                         .private        = current,                      \
1003                         .func           = wake_bit_function,            \
1004                         .task_list      =                               \
1005                                 LIST_HEAD_INIT((name).wait.task_list),  \
1006                 },                                                      \
1007         }
1008
1009 #define init_wait(wait)                                                 \
1010         do {                                                            \
1011                 (wait)->private = current;                              \
1012                 (wait)->func = autoremove_wake_function;                \
1013                 INIT_LIST_HEAD(&(wait)->task_list);                     \
1014                 (wait)->flags = 0;                                      \
1015         } while (0)
1016
1017
1018 extern int bit_wait(struct wait_bit_key *, int);
1019 extern int bit_wait_io(struct wait_bit_key *, int);
1020 extern int bit_wait_timeout(struct wait_bit_key *, int);
1021 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1022
1023 /**
1024  * wait_on_bit - wait for a bit to be cleared
1025  * @word: the word being waited on, a kernel virtual address
1026  * @bit: the bit of the word being waited on
1027  * @mode: the task state to sleep in
1028  *
1029  * There is a standard hashed waitqueue table for generic use. This
1030  * is the part of the hashtable's accessor API that waits on a bit.
1031  * For instance, if one were to have waiters on a bitflag, one would
1032  * call wait_on_bit() in threads waiting for the bit to clear.
1033  * One uses wait_on_bit() where one is waiting for the bit to clear,
1034  * but has no intention of setting it.
1035  * Returned value will be zero if the bit was cleared, or non-zero
1036  * if the process received a signal and the mode permitted wakeup
1037  * on that signal.
1038  */
1039 static inline int
1040 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1041 {
1042         might_sleep();
1043         if (!test_bit(bit, word))
1044                 return 0;
1045         return out_of_line_wait_on_bit(word, bit,
1046                                        bit_wait,
1047                                        mode);
1048 }
1049
1050 /**
1051  * wait_on_bit_io - wait for a bit to be cleared
1052  * @word: the word being waited on, a kernel virtual address
1053  * @bit: the bit of the word being waited on
1054  * @mode: the task state to sleep in
1055  *
1056  * Use the standard hashed waitqueue table to wait for a bit
1057  * to be cleared.  This is similar to wait_on_bit(), but calls
1058  * io_schedule() instead of schedule() for the actual waiting.
1059  *
1060  * Returned value will be zero if the bit was cleared, or non-zero
1061  * if the process received a signal and the mode permitted wakeup
1062  * on that signal.
1063  */
1064 static inline int
1065 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1066 {
1067         might_sleep();
1068         if (!test_bit(bit, word))
1069                 return 0;
1070         return out_of_line_wait_on_bit(word, bit,
1071                                        bit_wait_io,
1072                                        mode);
1073 }
1074
1075 /**
1076  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1077  * @word: the word being waited on, a kernel virtual address
1078  * @bit: the bit of the word being waited on
1079  * @mode: the task state to sleep in
1080  * @timeout: timeout, in jiffies
1081  *
1082  * Use the standard hashed waitqueue table to wait for a bit
1083  * to be cleared. This is similar to wait_on_bit(), except also takes a
1084  * timeout parameter.
1085  *
1086  * Returned value will be zero if the bit was cleared before the
1087  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1088  * received a signal and the mode permitted wakeup on that signal.
1089  */
1090 static inline int
1091 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1092                     unsigned long timeout)
1093 {
1094         might_sleep();
1095         if (!test_bit(bit, word))
1096                 return 0;
1097         return out_of_line_wait_on_bit_timeout(word, bit,
1098                                                bit_wait_timeout,
1099                                                mode, timeout);
1100 }
1101
1102 /**
1103  * wait_on_bit_action - wait for a bit to be cleared
1104  * @word: the word being waited on, a kernel virtual address
1105  * @bit: the bit of the word being waited on
1106  * @action: the function used to sleep, which may take special actions
1107  * @mode: the task state to sleep in
1108  *
1109  * Use the standard hashed waitqueue table to wait for a bit
1110  * to be cleared, and allow the waiting action to be specified.
1111  * This is like wait_on_bit() but allows fine control of how the waiting
1112  * is done.
1113  *
1114  * Returned value will be zero if the bit was cleared, or non-zero
1115  * if the process received a signal and the mode permitted wakeup
1116  * on that signal.
1117  */
1118 static inline int
1119 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1120                    unsigned mode)
1121 {
1122         might_sleep();
1123         if (!test_bit(bit, word))
1124                 return 0;
1125         return out_of_line_wait_on_bit(word, bit, action, mode);
1126 }
1127
1128 /**
1129  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1130  * @word: the word being waited on, a kernel virtual address
1131  * @bit: the bit of the word being waited on
1132  * @mode: the task state to sleep in
1133  *
1134  * There is a standard hashed waitqueue table for generic use. This
1135  * is the part of the hashtable's accessor API that waits on a bit
1136  * when one intends to set it, for instance, trying to lock bitflags.
1137  * For instance, if one were to have waiters trying to set bitflag
1138  * and waiting for it to clear before setting it, one would call
1139  * wait_on_bit() in threads waiting to be able to set the bit.
1140  * One uses wait_on_bit_lock() where one is waiting for the bit to
1141  * clear with the intention of setting it, and when done, clearing it.
1142  *
1143  * Returns zero if the bit was (eventually) found to be clear and was
1144  * set.  Returns non-zero if a signal was delivered to the process and
1145  * the @mode allows that signal to wake the process.
1146  */
1147 static inline int
1148 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1149 {
1150         might_sleep();
1151         if (!test_and_set_bit(bit, word))
1152                 return 0;
1153         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1154 }
1155
1156 /**
1157  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1158  * @word: the word being waited on, a kernel virtual address
1159  * @bit: the bit of the word being waited on
1160  * @mode: the task state to sleep in
1161  *
1162  * Use the standard hashed waitqueue table to wait for a bit
1163  * to be cleared and then to atomically set it.  This is similar
1164  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1165  * for the actual waiting.
1166  *
1167  * Returns zero if the bit was (eventually) found to be clear and was
1168  * set.  Returns non-zero if a signal was delivered to the process and
1169  * the @mode allows that signal to wake the process.
1170  */
1171 static inline int
1172 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1173 {
1174         might_sleep();
1175         if (!test_and_set_bit(bit, word))
1176                 return 0;
1177         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1178 }
1179
1180 /**
1181  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1182  * @word: the word being waited on, a kernel virtual address
1183  * @bit: the bit of the word being waited on
1184  * @action: the function used to sleep, which may take special actions
1185  * @mode: the task state to sleep in
1186  *
1187  * Use the standard hashed waitqueue table to wait for a bit
1188  * to be cleared and then to set it, and allow the waiting action
1189  * to be specified.
1190  * This is like wait_on_bit() but allows fine control of how the waiting
1191  * is done.
1192  *
1193  * Returns zero if the bit was (eventually) found to be clear and was
1194  * set.  Returns non-zero if a signal was delivered to the process and
1195  * the @mode allows that signal to wake the process.
1196  */
1197 static inline int
1198 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1199                         unsigned mode)
1200 {
1201         might_sleep();
1202         if (!test_and_set_bit(bit, word))
1203                 return 0;
1204         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1205 }
1206
1207 /**
1208  * wait_on_atomic_t - Wait for an atomic_t to become 0
1209  * @val: The atomic value being waited on, a kernel virtual address
1210  * @action: the function used to sleep, which may take special actions
1211  * @mode: the task state to sleep in
1212  *
1213  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1214  * the purpose of getting a waitqueue, but we set the key to a bit number
1215  * outside of the target 'word'.
1216  */
1217 static inline
1218 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1219 {
1220         might_sleep();
1221         if (atomic_read(val) == 0)
1222                 return 0;
1223         return out_of_line_wait_on_atomic_t(val, action, mode);
1224 }
1225
1226 #endif /* _LINUX_WAIT_H */