sched/wait: Avoid abort_exclusive_wait() in ___wait_event()
[cascardo/linux.git] / kernel / sched / wait.c
1 /*
2  * Generic waiting primitives.
3  *
4  * (C) 2004 Nadia Yvette Chambers, Oracle
5  */
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
12 #include <linux/kthread.h>
13
14 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
15 {
16         spin_lock_init(&q->lock);
17         lockdep_set_class_and_name(&q->lock, key, name);
18         INIT_LIST_HEAD(&q->task_list);
19 }
20
21 EXPORT_SYMBOL(__init_waitqueue_head);
22
23 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
24 {
25         unsigned long flags;
26
27         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
28         spin_lock_irqsave(&q->lock, flags);
29         __add_wait_queue(q, wait);
30         spin_unlock_irqrestore(&q->lock, flags);
31 }
32 EXPORT_SYMBOL(add_wait_queue);
33
34 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
35 {
36         unsigned long flags;
37
38         wait->flags |= WQ_FLAG_EXCLUSIVE;
39         spin_lock_irqsave(&q->lock, flags);
40         __add_wait_queue_tail(q, wait);
41         spin_unlock_irqrestore(&q->lock, flags);
42 }
43 EXPORT_SYMBOL(add_wait_queue_exclusive);
44
45 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
46 {
47         unsigned long flags;
48
49         spin_lock_irqsave(&q->lock, flags);
50         __remove_wait_queue(q, wait);
51         spin_unlock_irqrestore(&q->lock, flags);
52 }
53 EXPORT_SYMBOL(remove_wait_queue);
54
55
56 /*
57  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
58  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
59  * number) then we wake all the non-exclusive tasks and one exclusive task.
60  *
61  * There are circumstances in which we can try to wake a task which has already
62  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
63  * zero in this (rare) case, and we handle it by continuing to scan the queue.
64  */
65 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
66                         int nr_exclusive, int wake_flags, void *key)
67 {
68         wait_queue_t *curr, *next;
69
70         list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
71                 unsigned flags = curr->flags;
72
73                 if (curr->func(curr, mode, wake_flags, key) &&
74                                 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
75                         break;
76         }
77 }
78
79 /**
80  * __wake_up - wake up threads blocked on a waitqueue.
81  * @q: the waitqueue
82  * @mode: which threads
83  * @nr_exclusive: how many wake-one or wake-many threads to wake up
84  * @key: is directly passed to the wakeup function
85  *
86  * It may be assumed that this function implies a write memory barrier before
87  * changing the task state if and only if any tasks are woken up.
88  */
89 void __wake_up(wait_queue_head_t *q, unsigned int mode,
90                         int nr_exclusive, void *key)
91 {
92         unsigned long flags;
93
94         spin_lock_irqsave(&q->lock, flags);
95         __wake_up_common(q, mode, nr_exclusive, 0, key);
96         spin_unlock_irqrestore(&q->lock, flags);
97 }
98 EXPORT_SYMBOL(__wake_up);
99
100 /*
101  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
102  */
103 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
104 {
105         __wake_up_common(q, mode, nr, 0, NULL);
106 }
107 EXPORT_SYMBOL_GPL(__wake_up_locked);
108
109 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
110 {
111         __wake_up_common(q, mode, 1, 0, key);
112 }
113 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
114
115 /**
116  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
117  * @q: the waitqueue
118  * @mode: which threads
119  * @nr_exclusive: how many wake-one or wake-many threads to wake up
120  * @key: opaque value to be passed to wakeup targets
121  *
122  * The sync wakeup differs that the waker knows that it will schedule
123  * away soon, so while the target thread will be woken up, it will not
124  * be migrated to another CPU - ie. the two threads are 'synchronized'
125  * with each other. This can prevent needless bouncing between CPUs.
126  *
127  * On UP it can prevent extra preemption.
128  *
129  * It may be assumed that this function implies a write memory barrier before
130  * changing the task state if and only if any tasks are woken up.
131  */
132 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
133                         int nr_exclusive, void *key)
134 {
135         unsigned long flags;
136         int wake_flags = 1; /* XXX WF_SYNC */
137
138         if (unlikely(!q))
139                 return;
140
141         if (unlikely(nr_exclusive != 1))
142                 wake_flags = 0;
143
144         spin_lock_irqsave(&q->lock, flags);
145         __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
146         spin_unlock_irqrestore(&q->lock, flags);
147 }
148 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
149
150 /*
151  * __wake_up_sync - see __wake_up_sync_key()
152  */
153 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
154 {
155         __wake_up_sync_key(q, mode, nr_exclusive, NULL);
156 }
157 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
158
159 /*
160  * Note: we use "set_current_state()" _after_ the wait-queue add,
161  * because we need a memory barrier there on SMP, so that any
162  * wake-function that tests for the wait-queue being active
163  * will be guaranteed to see waitqueue addition _or_ subsequent
164  * tests in this thread will see the wakeup having taken place.
165  *
166  * The spin_unlock() itself is semi-permeable and only protects
167  * one way (it only protects stuff inside the critical region and
168  * stops them from bleeding out - it would still allow subsequent
169  * loads to move into the critical region).
170  */
171 void
172 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
173 {
174         unsigned long flags;
175
176         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
177         spin_lock_irqsave(&q->lock, flags);
178         if (list_empty(&wait->task_list))
179                 __add_wait_queue(q, wait);
180         set_current_state(state);
181         spin_unlock_irqrestore(&q->lock, flags);
182 }
183 EXPORT_SYMBOL(prepare_to_wait);
184
185 void
186 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
187 {
188         unsigned long flags;
189
190         wait->flags |= WQ_FLAG_EXCLUSIVE;
191         spin_lock_irqsave(&q->lock, flags);
192         if (list_empty(&wait->task_list))
193                 __add_wait_queue_tail(q, wait);
194         set_current_state(state);
195         spin_unlock_irqrestore(&q->lock, flags);
196 }
197 EXPORT_SYMBOL(prepare_to_wait_exclusive);
198
199 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
200 {
201         unsigned long flags;
202         long ret = 0;
203
204         wait->private = current;
205         wait->func = autoremove_wake_function;
206
207         spin_lock_irqsave(&q->lock, flags);
208         if (unlikely(signal_pending_state(state, current))) {
209                 /*
210                  * Exclusive waiter must not fail if it was selected by wakeup,
211                  * it should "consume" the condition we were waiting for.
212                  *
213                  * The caller will recheck the condition and return success if
214                  * we were already woken up, we can not miss the event because
215                  * wakeup locks/unlocks the same q->lock.
216                  *
217                  * But we need to ensure that set-condition + wakeup after that
218                  * can't see us, it should wake up another exclusive waiter if
219                  * we fail.
220                  */
221                 list_del_init(&wait->task_list);
222                 ret = -ERESTARTSYS;
223         } else {
224                 if (list_empty(&wait->task_list)) {
225                         if (wait->flags & WQ_FLAG_EXCLUSIVE)
226                                 __add_wait_queue_tail(q, wait);
227                         else
228                                 __add_wait_queue(q, wait);
229                 }
230                 set_current_state(state);
231         }
232         spin_unlock_irqrestore(&q->lock, flags);
233
234         return ret;
235 }
236 EXPORT_SYMBOL(prepare_to_wait_event);
237
238 /**
239  * finish_wait - clean up after waiting in a queue
240  * @q: waitqueue waited on
241  * @wait: wait descriptor
242  *
243  * Sets current thread back to running state and removes
244  * the wait descriptor from the given waitqueue if still
245  * queued.
246  */
247 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
248 {
249         unsigned long flags;
250
251         __set_current_state(TASK_RUNNING);
252         /*
253          * We can check for list emptiness outside the lock
254          * IFF:
255          *  - we use the "careful" check that verifies both
256          *    the next and prev pointers, so that there cannot
257          *    be any half-pending updates in progress on other
258          *    CPU's that we haven't seen yet (and that might
259          *    still change the stack area.
260          * and
261          *  - all other users take the lock (ie we can only
262          *    have _one_ other CPU that looks at or modifies
263          *    the list).
264          */
265         if (!list_empty_careful(&wait->task_list)) {
266                 spin_lock_irqsave(&q->lock, flags);
267                 list_del_init(&wait->task_list);
268                 spin_unlock_irqrestore(&q->lock, flags);
269         }
270 }
271 EXPORT_SYMBOL(finish_wait);
272
273 /**
274  * abort_exclusive_wait - abort exclusive waiting in a queue
275  * @q: waitqueue waited on
276  * @wait: wait descriptor
277  * @key: key to identify a wait bit queue or %NULL
278  *
279  * Sets current thread back to running state and removes
280  * the wait descriptor from the given waitqueue if still
281  * queued.
282  *
283  * Wakes up the next waiter if the caller is concurrently
284  * woken up through the queue.
285  *
286  * This prevents waiter starvation where an exclusive waiter
287  * aborts and is woken up concurrently and no one wakes up
288  * the next waiter.
289  */
290 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, void *key)
291 {
292         unsigned long flags;
293
294         __set_current_state(TASK_RUNNING);
295         spin_lock_irqsave(&q->lock, flags);
296         if (!list_empty(&wait->task_list))
297                 list_del_init(&wait->task_list);
298         else if (waitqueue_active(q))
299                 __wake_up_locked_key(q, TASK_NORMAL, key);
300         spin_unlock_irqrestore(&q->lock, flags);
301 }
302 EXPORT_SYMBOL(abort_exclusive_wait);
303
304 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
305 {
306         int ret = default_wake_function(wait, mode, sync, key);
307
308         if (ret)
309                 list_del_init(&wait->task_list);
310         return ret;
311 }
312 EXPORT_SYMBOL(autoremove_wake_function);
313
314 static inline bool is_kthread_should_stop(void)
315 {
316         return (current->flags & PF_KTHREAD) && kthread_should_stop();
317 }
318
319 /*
320  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
321  *
322  * add_wait_queue(&wq, &wait);
323  * for (;;) {
324  *     if (condition)
325  *         break;
326  *
327  *     p->state = mode;                         condition = true;
328  *     smp_mb(); // A                           smp_wmb(); // C
329  *     if (!wait->flags & WQ_FLAG_WOKEN)        wait->flags |= WQ_FLAG_WOKEN;
330  *         schedule()                           try_to_wake_up();
331  *     p->state = TASK_RUNNING;             ~~~~~~~~~~~~~~~~~~
332  *     wait->flags &= ~WQ_FLAG_WOKEN;           condition = true;
333  *     smp_mb() // B                            smp_wmb(); // C
334  *                                              wait->flags |= WQ_FLAG_WOKEN;
335  * }
336  * remove_wait_queue(&wq, &wait);
337  *
338  */
339 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
340 {
341         set_current_state(mode); /* A */
342         /*
343          * The above implies an smp_mb(), which matches with the smp_wmb() from
344          * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
345          * also observe all state before the wakeup.
346          */
347         if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
348                 timeout = schedule_timeout(timeout);
349         __set_current_state(TASK_RUNNING);
350
351         /*
352          * The below implies an smp_mb(), it too pairs with the smp_wmb() from
353          * woken_wake_function() such that we must either observe the wait
354          * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
355          * an event.
356          */
357         smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
358
359         return timeout;
360 }
361 EXPORT_SYMBOL(wait_woken);
362
363 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
364 {
365         /*
366          * Although this function is called under waitqueue lock, LOCK
367          * doesn't imply write barrier and the users expects write
368          * barrier semantics on wakeup functions.  The following
369          * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
370          * and is paired with smp_store_mb() in wait_woken().
371          */
372         smp_wmb(); /* C */
373         wait->flags |= WQ_FLAG_WOKEN;
374
375         return default_wake_function(wait, mode, sync, key);
376 }
377 EXPORT_SYMBOL(woken_wake_function);
378
379 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
380 {
381         struct wait_bit_key *key = arg;
382         struct wait_bit_queue *wait_bit
383                 = container_of(wait, struct wait_bit_queue, wait);
384
385         if (wait_bit->key.flags != key->flags ||
386                         wait_bit->key.bit_nr != key->bit_nr ||
387                         test_bit(key->bit_nr, key->flags))
388                 return 0;
389         else
390                 return autoremove_wake_function(wait, mode, sync, key);
391 }
392 EXPORT_SYMBOL(wake_bit_function);
393
394 /*
395  * To allow interruptible waiting and asynchronous (i.e. nonblocking)
396  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
397  * permitted return codes. Nonzero return codes halt waiting and return.
398  */
399 int __sched
400 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
401               wait_bit_action_f *action, unsigned mode)
402 {
403         int ret = 0;
404
405         do {
406                 prepare_to_wait(wq, &q->wait, mode);
407                 if (test_bit(q->key.bit_nr, q->key.flags))
408                         ret = (*action)(&q->key, mode);
409         } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
410         finish_wait(wq, &q->wait);
411         return ret;
412 }
413 EXPORT_SYMBOL(__wait_on_bit);
414
415 int __sched out_of_line_wait_on_bit(void *word, int bit,
416                                     wait_bit_action_f *action, unsigned mode)
417 {
418         wait_queue_head_t *wq = bit_waitqueue(word, bit);
419         DEFINE_WAIT_BIT(wait, word, bit);
420
421         return __wait_on_bit(wq, &wait, action, mode);
422 }
423 EXPORT_SYMBOL(out_of_line_wait_on_bit);
424
425 int __sched out_of_line_wait_on_bit_timeout(
426         void *word, int bit, wait_bit_action_f *action,
427         unsigned mode, unsigned long timeout)
428 {
429         wait_queue_head_t *wq = bit_waitqueue(word, bit);
430         DEFINE_WAIT_BIT(wait, word, bit);
431
432         wait.key.timeout = jiffies + timeout;
433         return __wait_on_bit(wq, &wait, action, mode);
434 }
435 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
436
437 int __sched
438 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
439                         wait_bit_action_f *action, unsigned mode)
440 {
441         do {
442                 int ret;
443
444                 prepare_to_wait_exclusive(wq, &q->wait, mode);
445                 if (!test_bit(q->key.bit_nr, q->key.flags))
446                         continue;
447                 ret = action(&q->key, mode);
448                 if (!ret)
449                         continue;
450                 abort_exclusive_wait(wq, &q->wait, &q->key);
451                 return ret;
452         } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
453         finish_wait(wq, &q->wait);
454         return 0;
455 }
456 EXPORT_SYMBOL(__wait_on_bit_lock);
457
458 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
459                                          wait_bit_action_f *action, unsigned mode)
460 {
461         wait_queue_head_t *wq = bit_waitqueue(word, bit);
462         DEFINE_WAIT_BIT(wait, word, bit);
463
464         return __wait_on_bit_lock(wq, &wait, action, mode);
465 }
466 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
467
468 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
469 {
470         struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
471         if (waitqueue_active(wq))
472                 __wake_up(wq, TASK_NORMAL, 1, &key);
473 }
474 EXPORT_SYMBOL(__wake_up_bit);
475
476 /**
477  * wake_up_bit - wake up a waiter on a bit
478  * @word: the word being waited on, a kernel virtual address
479  * @bit: the bit of the word being waited on
480  *
481  * There is a standard hashed waitqueue table for generic use. This
482  * is the part of the hashtable's accessor API that wakes up waiters
483  * on a bit. For instance, if one were to have waiters on a bitflag,
484  * one would call wake_up_bit() after clearing the bit.
485  *
486  * In order for this to function properly, as it uses waitqueue_active()
487  * internally, some kind of memory barrier must be done prior to calling
488  * this. Typically, this will be smp_mb__after_atomic(), but in some
489  * cases where bitflags are manipulated non-atomically under a lock, one
490  * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
491  * because spin_unlock() does not guarantee a memory barrier.
492  */
493 void wake_up_bit(void *word, int bit)
494 {
495         __wake_up_bit(bit_waitqueue(word, bit), word, bit);
496 }
497 EXPORT_SYMBOL(wake_up_bit);
498
499 wait_queue_head_t *bit_waitqueue(void *word, int bit)
500 {
501         const int shift = BITS_PER_LONG == 32 ? 5 : 6;
502         const struct zone *zone = page_zone(virt_to_page(word));
503         unsigned long val = (unsigned long)word << shift | bit;
504
505         return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
506 }
507 EXPORT_SYMBOL(bit_waitqueue);
508
509 /*
510  * Manipulate the atomic_t address to produce a better bit waitqueue table hash
511  * index (we're keying off bit -1, but that would produce a horrible hash
512  * value).
513  */
514 static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
515 {
516         if (BITS_PER_LONG == 64) {
517                 unsigned long q = (unsigned long)p;
518                 return bit_waitqueue((void *)(q & ~1), q & 1);
519         }
520         return bit_waitqueue(p, 0);
521 }
522
523 static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
524                                   void *arg)
525 {
526         struct wait_bit_key *key = arg;
527         struct wait_bit_queue *wait_bit
528                 = container_of(wait, struct wait_bit_queue, wait);
529         atomic_t *val = key->flags;
530
531         if (wait_bit->key.flags != key->flags ||
532             wait_bit->key.bit_nr != key->bit_nr ||
533             atomic_read(val) != 0)
534                 return 0;
535         return autoremove_wake_function(wait, mode, sync, key);
536 }
537
538 /*
539  * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
540  * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
541  * return codes halt waiting and return.
542  */
543 static __sched
544 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
545                        int (*action)(atomic_t *), unsigned mode)
546 {
547         atomic_t *val;
548         int ret = 0;
549
550         do {
551                 prepare_to_wait(wq, &q->wait, mode);
552                 val = q->key.flags;
553                 if (atomic_read(val) == 0)
554                         break;
555                 ret = (*action)(val);
556         } while (!ret && atomic_read(val) != 0);
557         finish_wait(wq, &q->wait);
558         return ret;
559 }
560
561 #define DEFINE_WAIT_ATOMIC_T(name, p)                                   \
562         struct wait_bit_queue name = {                                  \
563                 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),              \
564                 .wait   = {                                             \
565                         .private        = current,                      \
566                         .func           = wake_atomic_t_function,       \
567                         .task_list      =                               \
568                                 LIST_HEAD_INIT((name).wait.task_list),  \
569                 },                                                      \
570         }
571
572 __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
573                                          unsigned mode)
574 {
575         wait_queue_head_t *wq = atomic_t_waitqueue(p);
576         DEFINE_WAIT_ATOMIC_T(wait, p);
577
578         return __wait_on_atomic_t(wq, &wait, action, mode);
579 }
580 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
581
582 /**
583  * wake_up_atomic_t - Wake up a waiter on a atomic_t
584  * @p: The atomic_t being waited on, a kernel virtual address
585  *
586  * Wake up anyone waiting for the atomic_t to go to zero.
587  *
588  * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
589  * check is done by the waiter's wake function, not the by the waker itself).
590  */
591 void wake_up_atomic_t(atomic_t *p)
592 {
593         __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
594 }
595 EXPORT_SYMBOL(wake_up_atomic_t);
596
597 __sched int bit_wait(struct wait_bit_key *word, int mode)
598 {
599         schedule();
600         if (signal_pending_state(mode, current))
601                 return -EINTR;
602         return 0;
603 }
604 EXPORT_SYMBOL(bit_wait);
605
606 __sched int bit_wait_io(struct wait_bit_key *word, int mode)
607 {
608         io_schedule();
609         if (signal_pending_state(mode, current))
610                 return -EINTR;
611         return 0;
612 }
613 EXPORT_SYMBOL(bit_wait_io);
614
615 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
616 {
617         unsigned long now = READ_ONCE(jiffies);
618         if (time_after_eq(now, word->timeout))
619                 return -EAGAIN;
620         schedule_timeout(word->timeout - now);
621         if (signal_pending_state(mode, current))
622                 return -EINTR;
623         return 0;
624 }
625 EXPORT_SYMBOL_GPL(bit_wait_timeout);
626
627 __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
628 {
629         unsigned long now = READ_ONCE(jiffies);
630         if (time_after_eq(now, word->timeout))
631                 return -EAGAIN;
632         io_schedule_timeout(word->timeout - now);
633         if (signal_pending_state(mode, current))
634                 return -EINTR;
635         return 0;
636 }
637 EXPORT_SYMBOL_GPL(bit_wait_io_timeout);