39a74110f55bd7066af11a4d8d9fa00ab8033d02
[cascardo/linux.git] / lib / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2  * generic spinlock implementation
3  *
4  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
5  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6  * - Derived also from comments by Linus
7  */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11
12 struct rwsem_waiter {
13         struct list_head list;
14         struct task_struct *task;
15         unsigned int flags;
16 #define RWSEM_WAITING_FOR_READ  0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
18 };
19
20 /*
21  * initialise the semaphore
22  */
23 void __init_rwsem(struct rw_semaphore *sem, const char *name,
24                   struct lock_class_key *key)
25 {
26 #ifdef CONFIG_DEBUG_LOCK_ALLOC
27         /*
28          * Make sure we are not reinitializing a held semaphore:
29          */
30         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
31         lockdep_init_map(&sem->dep_map, name, key, 0);
32 #endif
33         sem->activity = 0;
34         spin_lock_init(&sem->wait_lock);
35         INIT_LIST_HEAD(&sem->wait_list);
36 }
37 EXPORT_SYMBOL(__init_rwsem);
38
39 /*
40  * handle the lock release when processes blocked on it that can now run
41  * - if we come here, then:
42  *   - the 'active count' _reached_ zero
43  *   - the 'waiting count' is non-zero
44  * - the spinlock must be held by the caller
45  * - woken process blocks are discarded from the list after having task zeroed
46  * - writers are only woken if wakewrite is non-zero
47  */
48 static inline struct rw_semaphore *
49 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
50 {
51         struct rwsem_waiter *waiter;
52         struct task_struct *tsk;
53         int woken;
54
55         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
56
57         if (!wakewrite) {
58                 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
59                         goto out;
60                 goto dont_wake_writers;
61         }
62
63         /* if we are allowed to wake writers try to grant a single write lock
64          * if there's a writer at the front of the queue
65          * - we leave the 'waiting count' incremented to signify potential
66          *   contention
67          */
68         if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
69                 sem->activity = -1;
70                 list_del(&waiter->list);
71                 tsk = waiter->task;
72                 /* Don't touch waiter after ->task has been NULLed */
73                 smp_mb();
74                 waiter->task = NULL;
75                 wake_up_process(tsk);
76                 put_task_struct(tsk);
77                 goto out;
78         }
79
80         /* grant an infinite number of read locks to the front of the queue */
81  dont_wake_writers:
82         woken = 0;
83         while (waiter->flags & RWSEM_WAITING_FOR_READ) {
84                 struct list_head *next = waiter->list.next;
85
86                 list_del(&waiter->list);
87                 tsk = waiter->task;
88                 smp_mb();
89                 waiter->task = NULL;
90                 wake_up_process(tsk);
91                 put_task_struct(tsk);
92                 woken++;
93                 if (list_empty(&sem->wait_list))
94                         break;
95                 waiter = list_entry(next, struct rwsem_waiter, list);
96         }
97
98         sem->activity += woken;
99
100  out:
101         return sem;
102 }
103
104 /*
105  * wake a single writer
106  */
107 static inline struct rw_semaphore *
108 __rwsem_wake_one_writer(struct rw_semaphore *sem)
109 {
110         struct rwsem_waiter *waiter;
111         struct task_struct *tsk;
112
113         sem->activity = -1;
114
115         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
116         list_del(&waiter->list);
117
118         tsk = waiter->task;
119         smp_mb();
120         waiter->task = NULL;
121         wake_up_process(tsk);
122         put_task_struct(tsk);
123         return sem;
124 }
125
126 /*
127  * get a read lock on the semaphore
128  */
129 void __sched __down_read(struct rw_semaphore *sem)
130 {
131         struct rwsem_waiter waiter;
132         struct task_struct *tsk;
133
134         spin_lock_irq(&sem->wait_lock);
135
136         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
137                 /* granted */
138                 sem->activity++;
139                 spin_unlock_irq(&sem->wait_lock);
140                 goto out;
141         }
142
143         tsk = current;
144         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
145
146         /* set up my own style of waitqueue */
147         waiter.task = tsk;
148         waiter.flags = RWSEM_WAITING_FOR_READ;
149         get_task_struct(tsk);
150
151         list_add_tail(&waiter.list, &sem->wait_list);
152
153         /* we don't need to touch the semaphore struct anymore */
154         spin_unlock_irq(&sem->wait_lock);
155
156         /* wait to be given the lock */
157         for (;;) {
158                 if (!waiter.task)
159                         break;
160                 schedule();
161                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
162         }
163
164         tsk->state = TASK_RUNNING;
165  out:
166         ;
167 }
168
169 /*
170  * trylock for reading -- returns 1 if successful, 0 if contention
171  */
172 int __down_read_trylock(struct rw_semaphore *sem)
173 {
174         unsigned long flags;
175         int ret = 0;
176
177
178         spin_lock_irqsave(&sem->wait_lock, flags);
179
180         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
181                 /* granted */
182                 sem->activity++;
183                 ret = 1;
184         }
185
186         spin_unlock_irqrestore(&sem->wait_lock, flags);
187
188         return ret;
189 }
190
191 /*
192  * get a write lock on the semaphore
193  * - we increment the waiting count anyway to indicate an exclusive lock
194  */
195 void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
196 {
197         struct rwsem_waiter waiter;
198         struct task_struct *tsk;
199
200         spin_lock_irq(&sem->wait_lock);
201
202         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
203                 /* granted */
204                 sem->activity = -1;
205                 spin_unlock_irq(&sem->wait_lock);
206                 goto out;
207         }
208
209         tsk = current;
210         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
211
212         /* set up my own style of waitqueue */
213         waiter.task = tsk;
214         waiter.flags = RWSEM_WAITING_FOR_WRITE;
215         get_task_struct(tsk);
216
217         list_add_tail(&waiter.list, &sem->wait_list);
218
219         /* we don't need to touch the semaphore struct anymore */
220         spin_unlock_irq(&sem->wait_lock);
221
222         /* wait to be given the lock */
223         for (;;) {
224                 if (!waiter.task)
225                         break;
226                 schedule();
227                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
228         }
229
230         tsk->state = TASK_RUNNING;
231  out:
232         ;
233 }
234
235 void __sched __down_write(struct rw_semaphore *sem)
236 {
237         __down_write_nested(sem, 0);
238 }
239
240 /*
241  * trylock for writing -- returns 1 if successful, 0 if contention
242  */
243 int __down_write_trylock(struct rw_semaphore *sem)
244 {
245         unsigned long flags;
246         int ret = 0;
247
248         spin_lock_irqsave(&sem->wait_lock, flags);
249
250         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
251                 /* granted */
252                 sem->activity = -1;
253                 ret = 1;
254         }
255
256         spin_unlock_irqrestore(&sem->wait_lock, flags);
257
258         return ret;
259 }
260
261 /*
262  * release a read lock on the semaphore
263  */
264 void __up_read(struct rw_semaphore *sem)
265 {
266         unsigned long flags;
267
268         spin_lock_irqsave(&sem->wait_lock, flags);
269
270         if (--sem->activity == 0 && !list_empty(&sem->wait_list))
271                 sem = __rwsem_wake_one_writer(sem);
272
273         spin_unlock_irqrestore(&sem->wait_lock, flags);
274 }
275
276 /*
277  * release a write lock on the semaphore
278  */
279 void __up_write(struct rw_semaphore *sem)
280 {
281         unsigned long flags;
282
283         spin_lock_irqsave(&sem->wait_lock, flags);
284
285         sem->activity = 0;
286         if (!list_empty(&sem->wait_list))
287                 sem = __rwsem_do_wake(sem, 1);
288
289         spin_unlock_irqrestore(&sem->wait_lock, flags);
290 }
291
292 /*
293  * downgrade a write lock into a read lock
294  * - just wake up any readers at the front of the queue
295  */
296 void __downgrade_write(struct rw_semaphore *sem)
297 {
298         unsigned long flags;
299
300         spin_lock_irqsave(&sem->wait_lock, flags);
301
302         sem->activity = 1;
303         if (!list_empty(&sem->wait_list))
304                 sem = __rwsem_do_wake(sem, 0);
305
306         spin_unlock_irqrestore(&sem->wait_lock, flags);
307 }
308