1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 #include <linux/rwsem.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/rt.h>
17 #include <linux/osq_lock.h>
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
71 * Initialize an rwsem:
73 void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 * Make sure we are not reinitializing a held semaphore:
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
83 sem->count = RWSEM_UNLOCKED_VALUE;
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
88 osq_lock_init(&sem->osq);
92 EXPORT_SYMBOL(__init_rwsem);
94 enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
105 enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 * to actually wakeup the blocked task(s) and drop the reference count,
120 * preferably when the wait_lock is released
121 * - woken process blocks are discarded from the list after having task zeroed
122 * - writers are only marked woken if downgrading is false
124 static struct rw_semaphore *
125 __rwsem_mark_wake(struct rw_semaphore *sem,
126 enum rwsem_wake_type wake_type, struct wake_q_head *wake_q)
128 struct rwsem_waiter *waiter;
129 struct task_struct *tsk;
130 struct list_head *next;
131 long oldcount, woken, loop, adjustment;
133 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
134 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
135 if (wake_type == RWSEM_WAKE_ANY) {
137 * Mark writer at the front of the queue for wakeup.
138 * Until the task is actually later awoken later by
139 * the caller, other writers are able to steal it.
140 * Readers, on the other hand, will block as they
141 * will notice the queued writer.
143 wake_q_add(wake_q, waiter->task);
148 /* Writers might steal the lock before we grant it to the next reader.
149 * We prefer to do the first reader grant before counting readers
150 * so we can bail out early if a writer stole the lock.
153 if (wake_type != RWSEM_WAKE_READ_OWNED) {
154 adjustment = RWSEM_ACTIVE_READ_BIAS;
156 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
157 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
158 /* A writer stole the lock. Undo our reader grant. */
159 if (rwsem_atomic_update(-adjustment, sem) &
162 /* Last active locker left. Retry waking readers. */
163 goto try_reader_grant;
167 /* Grant an infinite number of read locks to the readers at the front
168 * of the queue. Note we increment the 'active part' of the count by
169 * the number of readers before waking any processes up.
175 if (waiter->list.next == &sem->wait_list)
178 waiter = list_entry(waiter->list.next,
179 struct rwsem_waiter, list);
181 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
183 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
184 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
185 /* hit end of list above */
186 adjustment -= RWSEM_WAITING_BIAS;
189 rwsem_atomic_add(adjustment, sem);
191 next = sem->wait_list.next;
194 waiter = list_entry(next, struct rwsem_waiter, list);
195 next = waiter->list.next;
198 * Make sure we do not wakeup the next reader before
199 * setting the nil condition to grant the next reader;
200 * otherwise we could miss the wakeup on the other
201 * side and end up sleeping again. See the pairing
202 * in rwsem_down_read_failed().
206 wake_q_add(wake_q, tsk);
207 put_task_struct(tsk);
210 sem->wait_list.next = next;
211 next->prev = &sem->wait_list;
218 * Wait for the read lock to be granted
221 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
223 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
224 struct rwsem_waiter waiter;
225 struct task_struct *tsk = current;
228 /* set up my own style of waitqueue */
230 waiter.type = RWSEM_WAITING_FOR_READ;
231 get_task_struct(tsk);
233 raw_spin_lock_irq(&sem->wait_lock);
234 if (list_empty(&sem->wait_list))
235 adjustment += RWSEM_WAITING_BIAS;
236 list_add_tail(&waiter.list, &sem->wait_list);
238 /* we're now waiting on the lock, but no longer actively locking */
239 count = rwsem_atomic_update(adjustment, sem);
241 /* If there are no active locks, wake the front queued process(es).
243 * If there are no writers and we are first in the queue,
244 * wake our own waiter to join the existing active readers !
246 if (count == RWSEM_WAITING_BIAS ||
247 (count > RWSEM_WAITING_BIAS &&
248 adjustment != -RWSEM_ACTIVE_READ_BIAS))
249 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
251 raw_spin_unlock_irq(&sem->wait_lock);
254 /* wait to be given the lock */
256 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
262 __set_task_state(tsk, TASK_RUNNING);
265 EXPORT_SYMBOL(rwsem_down_read_failed);
267 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
270 * Try acquiring the write lock. Check count first in order
271 * to reduce unnecessary expensive cmpxchg() operations.
273 if (count == RWSEM_WAITING_BIAS &&
274 cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
275 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
276 if (!list_is_singular(&sem->wait_list))
277 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
278 rwsem_set_owner(sem);
285 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
287 * Try to acquire write lock before the writer has been put on wait queue.
289 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
291 long old, count = READ_ONCE(sem->count);
294 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
297 old = cmpxchg_acquire(&sem->count, count,
298 count + RWSEM_ACTIVE_WRITE_BIAS);
300 rwsem_set_owner(sem);
308 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
310 struct task_struct *owner;
317 owner = READ_ONCE(sem->owner);
319 long count = READ_ONCE(sem->count);
321 * If sem->owner is not set, yet we have just recently entered the
322 * slowpath with the lock being active, then there is a possibility
323 * reader(s) may have the lock. To be safe, bail spinning in these
326 if (count & RWSEM_ACTIVE_MASK)
338 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
343 while (sem->owner == owner) {
345 * Ensure we emit the owner->on_cpu, dereference _after_
346 * checking sem->owner still matches owner, if that fails,
347 * owner might point to free()d memory, if it still matches,
348 * the rcu_read_lock() ensures the memory stays valid.
352 /* abort spinning when need_resched or owner is not running */
353 if (!owner->on_cpu || need_resched()) {
358 cpu_relax_lowlatency();
362 if (READ_ONCE(sem->owner))
363 return true; /* new owner, continue spinning */
366 * When the owner is not set, the lock could be free or
367 * held by readers. Check the counter to verify the
370 count = READ_ONCE(sem->count);
371 return (count == 0 || count == RWSEM_WAITING_BIAS);
374 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
376 struct task_struct *owner;
381 /* sem->wait_lock should not be held when doing optimistic spinning */
382 if (!rwsem_can_spin_on_owner(sem))
385 if (!osq_lock(&sem->osq))
389 owner = READ_ONCE(sem->owner);
390 if (owner && !rwsem_spin_on_owner(sem, owner))
393 /* wait_lock will be acquired if write_lock is obtained */
394 if (rwsem_try_write_lock_unqueued(sem)) {
400 * When there's no owner, we might have preempted between the
401 * owner acquiring the lock and setting the owner field. If
402 * we're an RT task that will live-lock because we won't let
403 * the owner complete.
405 if (!owner && (need_resched() || rt_task(current)))
409 * The cpu_relax() call is a compiler barrier which forces
410 * everything in this loop to be re-loaded. We don't need
411 * memory barriers as we'll eventually observe the right
412 * values at the cost of a few extra spins.
414 cpu_relax_lowlatency();
416 osq_unlock(&sem->osq);
423 * Return true if the rwsem has active spinner
425 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
427 return osq_is_locked(&sem->osq);
431 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
436 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
443 * Wait until we successfully acquire the write lock
445 static inline struct rw_semaphore *
446 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
449 bool waiting = true; /* any queued threads before us */
450 struct rwsem_waiter waiter;
451 struct rw_semaphore *ret = sem;
454 /* undo write bias from down_write operation, stop active locking */
455 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
457 /* do optimistic spinning and steal lock if possible */
458 if (rwsem_optimistic_spin(sem))
462 * Optimistic spinning failed, proceed to the slowpath
463 * and block until we can acquire the sem.
465 waiter.task = current;
466 waiter.type = RWSEM_WAITING_FOR_WRITE;
468 raw_spin_lock_irq(&sem->wait_lock);
470 /* account for this before adding a new element to the list */
471 if (list_empty(&sem->wait_list))
474 list_add_tail(&waiter.list, &sem->wait_list);
476 /* we're now waiting on the lock, but no longer actively locking */
478 count = READ_ONCE(sem->count);
481 * If there were already threads queued before us and there are
482 * no active writers, the lock must be read owned; so we try to
483 * wake any read locks that were queued ahead of us.
485 if (count > RWSEM_WAITING_BIAS) {
488 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
490 * The wakeup is normally called _after_ the wait_lock
491 * is released, but given that we are proactively waking
492 * readers we can deal with the wake_q overhead as it is
493 * similar to releasing and taking the wait_lock again
494 * for attempting rwsem_try_write_lock().
500 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
502 /* wait until we successfully acquire the lock */
503 set_current_state(state);
505 if (rwsem_try_write_lock(count, sem))
507 raw_spin_unlock_irq(&sem->wait_lock);
509 /* Block until there are no active lockers. */
511 if (signal_pending_state(state, current))
515 set_current_state(state);
516 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
518 raw_spin_lock_irq(&sem->wait_lock);
520 __set_current_state(TASK_RUNNING);
521 list_del(&waiter.list);
522 raw_spin_unlock_irq(&sem->wait_lock);
527 __set_current_state(TASK_RUNNING);
528 raw_spin_lock_irq(&sem->wait_lock);
529 list_del(&waiter.list);
530 if (list_empty(&sem->wait_list))
531 rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
533 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
534 raw_spin_unlock_irq(&sem->wait_lock);
537 return ERR_PTR(-EINTR);
540 __visible struct rw_semaphore * __sched
541 rwsem_down_write_failed(struct rw_semaphore *sem)
543 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
545 EXPORT_SYMBOL(rwsem_down_write_failed);
547 __visible struct rw_semaphore * __sched
548 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
550 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
552 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
555 * handle waking up a waiter on the semaphore
556 * - up_read/up_write has decremented the active part of count if we come here
559 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
565 * If a spinner is present, it is not necessary to do the wakeup.
566 * Try to do wakeup only if the trylock succeeds to minimize
567 * spinlock contention which may introduce too much delay in the
570 * spinning writer up_write/up_read caller
571 * --------------- -----------------------
572 * [S] osq_unlock() [L] osq
574 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
576 * Here, it is important to make sure that there won't be a missed
577 * wakeup while the rwsem is free and the only spinning writer goes
578 * to sleep without taking the rwsem. Even when the spinning writer
579 * is just going to break out of the waiting loop, it will still do
580 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
581 * rwsem_has_spinner() is true, it will guarantee at least one
582 * trylock attempt on the rwsem later on.
584 if (rwsem_has_spinner(sem)) {
586 * The smp_rmb() here is to make sure that the spinner
587 * state is consulted before reading the wait_lock.
590 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
594 raw_spin_lock_irqsave(&sem->wait_lock, flags);
597 /* do nothing if list empty */
598 if (!list_empty(&sem->wait_list))
599 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
601 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
606 EXPORT_SYMBOL(rwsem_wake);
609 * downgrade a write lock into a read lock
610 * - caller incremented waiting part of count and discovered it still negative
611 * - just wake up any readers at the front of the queue
614 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
619 raw_spin_lock_irqsave(&sem->wait_lock, flags);
621 /* do nothing if list empty */
622 if (!list_empty(&sem->wait_list))
623 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
625 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
630 EXPORT_SYMBOL(rwsem_downgrade_wake);