Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / include / linux / seqlock.h
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4  * Reader/writer consistent mechanism without starving writers. This type of
5  * lock for data where the reader wants a consistent set of information
6  * and is willing to retry if the information changes. There are two types
7  * of readers:
8  * 1. Sequence readers which never block a writer but they may have to retry
9  *    if a writer is in progress by detecting change in sequence number.
10  *    Writers do not wait for a sequence reader.
11  * 2. Locking readers which will wait if a writer or another locking reader
12  *    is in progress. A locking reader in progress will also block a writer
13  *    from going forward. Unlike the regular rwlock, the read lock here is
14  *    exclusive so that only one locking reader can get it.
15  *
16  * This is not as cache friendly as brlock. Also, this may not work well
17  * for data that contains pointers, because any writer could
18  * invalidate a pointer that a reader was following.
19  *
20  * Expected non-blocking reader usage:
21  *      do {
22  *          seq = read_seqbegin(&foo);
23  *      ...
24  *      } while (read_seqretry(&foo, seq));
25  *
26  *
27  * On non-SMP the spin locks disappear but the writer still needs
28  * to increment the sequence variables because an interrupt routine could
29  * change the state of the data.
30  *
31  * Based on x86_64 vsyscall gettimeofday 
32  * by Keith Owens and Andrea Arcangeli
33  */
34
35 #include <linux/spinlock.h>
36 #include <linux/preempt.h>
37 #include <linux/lockdep.h>
38 #include <asm/processor.h>
39
40 /*
41  * Version using sequence counter only.
42  * This can be used when code has its own mutex protecting the
43  * updating starting before the write_seqcountbeqin() and ending
44  * after the write_seqcount_end().
45  */
46 typedef struct seqcount {
47         unsigned sequence;
48 #ifdef CONFIG_DEBUG_LOCK_ALLOC
49         struct lockdep_map dep_map;
50 #endif
51 } seqcount_t;
52
53 static inline void __seqcount_init(seqcount_t *s, const char *name,
54                                           struct lock_class_key *key)
55 {
56         /*
57          * Make sure we are not reinitializing a held lock:
58          */
59         lockdep_init_map(&s->dep_map, name, key, 0);
60         s->sequence = 0;
61 }
62
63 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
65                 .dep_map = { .name = #lockname } \
66
67 # define seqcount_init(s)                               \
68         do {                                            \
69                 static struct lock_class_key __key;     \
70                 __seqcount_init((s), #s, &__key);       \
71         } while (0)
72
73 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74 {
75         seqcount_t *l = (seqcount_t *)s;
76         unsigned long flags;
77
78         local_irq_save(flags);
79         seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80         seqcount_release(&l->dep_map, 1, _RET_IP_);
81         local_irq_restore(flags);
82 }
83
84 #else
85 # define SEQCOUNT_DEP_MAP_INIT(lockname)
86 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87 # define seqcount_lockdep_reader_access(x)
88 #endif
89
90 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
92
93 /**
94  * __read_seqcount_begin - begin a seq-read critical section (without barrier)
95  * @s: pointer to seqcount_t
96  * Returns: count to be passed to read_seqcount_retry
97  *
98  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
99  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
100  * provided before actually loading any of the variables that are to be
101  * protected in this critical section.
102  *
103  * Use carefully, only in critical code, and comment how the barrier is
104  * provided.
105  */
106 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
107 {
108         unsigned ret;
109
110 repeat:
111         ret = ACCESS_ONCE(s->sequence);
112         if (unlikely(ret & 1)) {
113                 cpu_relax();
114                 goto repeat;
115         }
116         return ret;
117 }
118
119 /**
120  * raw_read_seqcount - Read the raw seqcount
121  * @s: pointer to seqcount_t
122  * Returns: count to be passed to read_seqcount_retry
123  *
124  * raw_read_seqcount opens a read critical section of the given
125  * seqcount without any lockdep checking and without checking or
126  * masking the LSB. Calling code is responsible for handling that.
127  */
128 static inline unsigned raw_read_seqcount(const seqcount_t *s)
129 {
130         unsigned ret = ACCESS_ONCE(s->sequence);
131         smp_rmb();
132         return ret;
133 }
134
135 /**
136  * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
137  * @s: pointer to seqcount_t
138  * Returns: count to be passed to read_seqcount_retry
139  *
140  * raw_read_seqcount_begin opens a read critical section of the given
141  * seqcount, but without any lockdep checking. Validity of the critical
142  * section is tested by checking read_seqcount_retry function.
143  */
144 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
145 {
146         unsigned ret = __read_seqcount_begin(s);
147         smp_rmb();
148         return ret;
149 }
150
151 /**
152  * read_seqcount_begin - begin a seq-read critical section
153  * @s: pointer to seqcount_t
154  * Returns: count to be passed to read_seqcount_retry
155  *
156  * read_seqcount_begin opens a read critical section of the given seqcount.
157  * Validity of the critical section is tested by checking read_seqcount_retry
158  * function.
159  */
160 static inline unsigned read_seqcount_begin(const seqcount_t *s)
161 {
162         seqcount_lockdep_reader_access(s);
163         return raw_read_seqcount_begin(s);
164 }
165
166 /**
167  * raw_seqcount_begin - begin a seq-read critical section
168  * @s: pointer to seqcount_t
169  * Returns: count to be passed to read_seqcount_retry
170  *
171  * raw_seqcount_begin opens a read critical section of the given seqcount.
172  * Validity of the critical section is tested by checking read_seqcount_retry
173  * function.
174  *
175  * Unlike read_seqcount_begin(), this function will not wait for the count
176  * to stabilize. If a writer is active when we begin, we will fail the
177  * read_seqcount_retry() instead of stabilizing at the beginning of the
178  * critical section.
179  */
180 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
181 {
182         unsigned ret = ACCESS_ONCE(s->sequence);
183         smp_rmb();
184         return ret & ~1;
185 }
186
187 /**
188  * __read_seqcount_retry - end a seq-read critical section (without barrier)
189  * @s: pointer to seqcount_t
190  * @start: count, from read_seqcount_begin
191  * Returns: 1 if retry is required, else 0
192  *
193  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
194  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
195  * provided before actually loading any of the variables that are to be
196  * protected in this critical section.
197  *
198  * Use carefully, only in critical code, and comment how the barrier is
199  * provided.
200  */
201 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
202 {
203         return unlikely(s->sequence != start);
204 }
205
206 /**
207  * read_seqcount_retry - end a seq-read critical section
208  * @s: pointer to seqcount_t
209  * @start: count, from read_seqcount_begin
210  * Returns: 1 if retry is required, else 0
211  *
212  * read_seqcount_retry closes a read critical section of the given seqcount.
213  * If the critical section was invalid, it must be ignored (and typically
214  * retried).
215  */
216 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
217 {
218         smp_rmb();
219         return __read_seqcount_retry(s, start);
220 }
221
222
223
224 static inline void raw_write_seqcount_begin(seqcount_t *s)
225 {
226         s->sequence++;
227         smp_wmb();
228 }
229
230 static inline void raw_write_seqcount_end(seqcount_t *s)
231 {
232         smp_wmb();
233         s->sequence++;
234 }
235
236 /*
237  * raw_write_seqcount_latch - redirect readers to even/odd copy
238  * @s: pointer to seqcount_t
239  */
240 static inline void raw_write_seqcount_latch(seqcount_t *s)
241 {
242        smp_wmb();      /* prior stores before incrementing "sequence" */
243        s->sequence++;
244        smp_wmb();      /* increment "sequence" before following stores */
245 }
246
247 /*
248  * Sequence counter only version assumes that callers are using their
249  * own mutexing.
250  */
251 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
252 {
253         raw_write_seqcount_begin(s);
254         seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
255 }
256
257 static inline void write_seqcount_begin(seqcount_t *s)
258 {
259         write_seqcount_begin_nested(s, 0);
260 }
261
262 static inline void write_seqcount_end(seqcount_t *s)
263 {
264         seqcount_release(&s->dep_map, 1, _RET_IP_);
265         raw_write_seqcount_end(s);
266 }
267
268 /**
269  * write_seqcount_barrier - invalidate in-progress read-side seq operations
270  * @s: pointer to seqcount_t
271  *
272  * After write_seqcount_barrier, no read-side seq operations will complete
273  * successfully and see data older than this.
274  */
275 static inline void write_seqcount_barrier(seqcount_t *s)
276 {
277         smp_wmb();
278         s->sequence+=2;
279 }
280
281 typedef struct {
282         struct seqcount seqcount;
283         spinlock_t lock;
284 } seqlock_t;
285
286 /*
287  * These macros triggered gcc-3.x compile-time problems.  We think these are
288  * OK now.  Be cautious.
289  */
290 #define __SEQLOCK_UNLOCKED(lockname)                    \
291         {                                               \
292                 .seqcount = SEQCNT_ZERO(lockname),      \
293                 .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
294         }
295
296 #define seqlock_init(x)                                 \
297         do {                                            \
298                 seqcount_init(&(x)->seqcount);          \
299                 spin_lock_init(&(x)->lock);             \
300         } while (0)
301
302 #define DEFINE_SEQLOCK(x) \
303                 seqlock_t x = __SEQLOCK_UNLOCKED(x)
304
305 /*
306  * Read side functions for starting and finalizing a read side section.
307  */
308 static inline unsigned read_seqbegin(const seqlock_t *sl)
309 {
310         return read_seqcount_begin(&sl->seqcount);
311 }
312
313 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
314 {
315         return read_seqcount_retry(&sl->seqcount, start);
316 }
317
318 /*
319  * Lock out other writers and update the count.
320  * Acts like a normal spin_lock/unlock.
321  * Don't need preempt_disable() because that is in the spin_lock already.
322  */
323 static inline void write_seqlock(seqlock_t *sl)
324 {
325         spin_lock(&sl->lock);
326         write_seqcount_begin(&sl->seqcount);
327 }
328
329 static inline void write_sequnlock(seqlock_t *sl)
330 {
331         write_seqcount_end(&sl->seqcount);
332         spin_unlock(&sl->lock);
333 }
334
335 static inline void write_seqlock_bh(seqlock_t *sl)
336 {
337         spin_lock_bh(&sl->lock);
338         write_seqcount_begin(&sl->seqcount);
339 }
340
341 static inline void write_sequnlock_bh(seqlock_t *sl)
342 {
343         write_seqcount_end(&sl->seqcount);
344         spin_unlock_bh(&sl->lock);
345 }
346
347 static inline void write_seqlock_irq(seqlock_t *sl)
348 {
349         spin_lock_irq(&sl->lock);
350         write_seqcount_begin(&sl->seqcount);
351 }
352
353 static inline void write_sequnlock_irq(seqlock_t *sl)
354 {
355         write_seqcount_end(&sl->seqcount);
356         spin_unlock_irq(&sl->lock);
357 }
358
359 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
360 {
361         unsigned long flags;
362
363         spin_lock_irqsave(&sl->lock, flags);
364         write_seqcount_begin(&sl->seqcount);
365         return flags;
366 }
367
368 #define write_seqlock_irqsave(lock, flags)                              \
369         do { flags = __write_seqlock_irqsave(lock); } while (0)
370
371 static inline void
372 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
373 {
374         write_seqcount_end(&sl->seqcount);
375         spin_unlock_irqrestore(&sl->lock, flags);
376 }
377
378 /*
379  * A locking reader exclusively locks out other writers and locking readers,
380  * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
381  * Don't need preempt_disable() because that is in the spin_lock already.
382  */
383 static inline void read_seqlock_excl(seqlock_t *sl)
384 {
385         spin_lock(&sl->lock);
386 }
387
388 static inline void read_sequnlock_excl(seqlock_t *sl)
389 {
390         spin_unlock(&sl->lock);
391 }
392
393 /**
394  * read_seqbegin_or_lock - begin a sequence number check or locking block
395  * @lock: sequence lock
396  * @seq : sequence number to be checked
397  *
398  * First try it once optimistically without taking the lock. If that fails,
399  * take the lock. The sequence number is also used as a marker for deciding
400  * whether to be a reader (even) or writer (odd).
401  * N.B. seq must be initialized to an even number to begin with.
402  */
403 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
404 {
405         if (!(*seq & 1))        /* Even */
406                 *seq = read_seqbegin(lock);
407         else                    /* Odd */
408                 read_seqlock_excl(lock);
409 }
410
411 static inline int need_seqretry(seqlock_t *lock, int seq)
412 {
413         return !(seq & 1) && read_seqretry(lock, seq);
414 }
415
416 static inline void done_seqretry(seqlock_t *lock, int seq)
417 {
418         if (seq & 1)
419                 read_sequnlock_excl(lock);
420 }
421
422 static inline void read_seqlock_excl_bh(seqlock_t *sl)
423 {
424         spin_lock_bh(&sl->lock);
425 }
426
427 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
428 {
429         spin_unlock_bh(&sl->lock);
430 }
431
432 static inline void read_seqlock_excl_irq(seqlock_t *sl)
433 {
434         spin_lock_irq(&sl->lock);
435 }
436
437 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
438 {
439         spin_unlock_irq(&sl->lock);
440 }
441
442 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
443 {
444         unsigned long flags;
445
446         spin_lock_irqsave(&sl->lock, flags);
447         return flags;
448 }
449
450 #define read_seqlock_excl_irqsave(lock, flags)                          \
451         do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
452
453 static inline void
454 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
455 {
456         spin_unlock_irqrestore(&sl->lock, flags);
457 }
458
459 #endif /* __LINUX_SEQLOCK_H */