Revert "ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle"
[cascardo/linux.git] / arch / arc / include / asm / spinlock.h
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15
16 #define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags)       arch_spin_lock(lock)
18 #define arch_spin_unlock_wait(x) \
19         do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21 #ifdef CONFIG_ARC_HAS_LLSC
22
23 /*
24  * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25  */
26 #ifndef CONFIG_ARC_STAR_9000923308
27
28 static inline void arch_spin_lock(arch_spinlock_t *lock)
29 {
30         unsigned int val;
31
32         smp_mb();
33
34         __asm__ __volatile__(
35         "1:     llock   %[val], [%[slock]]      \n"
36         "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
37         "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
38         "       bnz     1b                      \n"
39         "                                       \n"
40         : [val]         "=&r"   (val)
41         : [slock]       "r"     (&(lock->slock)),
42           [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
43         : "memory", "cc");
44
45         smp_mb();
46 }
47
48 /* 1 - lock taken successfully */
49 static inline int arch_spin_trylock(arch_spinlock_t *lock)
50 {
51         unsigned int val, got_it = 0;
52
53         smp_mb();
54
55         __asm__ __volatile__(
56         "1:     llock   %[val], [%[slock]]      \n"
57         "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
58         "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
59         "       bnz     1b                      \n"
60         "       mov     %[got_it], 1            \n"
61         "4:                                     \n"
62         "                                       \n"
63         : [val]         "=&r"   (val),
64           [got_it]      "+&r"   (got_it)
65         : [slock]       "r"     (&(lock->slock)),
66           [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
67         : "memory", "cc");
68
69         smp_mb();
70
71         return got_it;
72 }
73
74 static inline void arch_spin_unlock(arch_spinlock_t *lock)
75 {
76         smp_mb();
77
78         lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79
80         smp_mb();
81 }
82
83 /*
84  * Read-write spinlocks, allowing multiple readers but only one writer.
85  * Unfair locking as Writers could be starved indefinitely by Reader(s)
86  */
87
88 static inline void arch_read_lock(arch_rwlock_t *rw)
89 {
90         unsigned int val;
91
92         smp_mb();
93
94         /*
95          * zero means writer holds the lock exclusively, deny Reader.
96          * Otherwise grant lock to first/subseq reader
97          *
98          *      if (rw->counter > 0) {
99          *              rw->counter--;
100          *              ret = 1;
101          *      }
102          */
103
104         __asm__ __volatile__(
105         "1:     llock   %[val], [%[rwlock]]     \n"
106         "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
107         "       sub     %[val], %[val], 1       \n"     /* reader lock */
108         "       scond   %[val], [%[rwlock]]     \n"
109         "       bnz     1b                      \n"
110         "                                       \n"
111         : [val]         "=&r"   (val)
112         : [rwlock]      "r"     (&(rw->counter)),
113           [WR_LOCKED]   "ir"    (0)
114         : "memory", "cc");
115
116         smp_mb();
117 }
118
119 /* 1 - lock taken successfully */
120 static inline int arch_read_trylock(arch_rwlock_t *rw)
121 {
122         unsigned int val, got_it = 0;
123
124         smp_mb();
125
126         __asm__ __volatile__(
127         "1:     llock   %[val], [%[rwlock]]     \n"
128         "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
129         "       sub     %[val], %[val], 1       \n"     /* counter-- */
130         "       scond   %[val], [%[rwlock]]     \n"
131         "       bnz     1b                      \n"     /* retry if collided with someone */
132         "       mov     %[got_it], 1            \n"
133         "                                       \n"
134         "4: ; --- done ---                      \n"
135
136         : [val]         "=&r"   (val),
137           [got_it]      "+&r"   (got_it)
138         : [rwlock]      "r"     (&(rw->counter)),
139           [WR_LOCKED]   "ir"    (0)
140         : "memory", "cc");
141
142         smp_mb();
143
144         return got_it;
145 }
146
147 static inline void arch_write_lock(arch_rwlock_t *rw)
148 {
149         unsigned int val;
150
151         smp_mb();
152
153         /*
154          * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155          * deny writer. Otherwise if unlocked grant to writer
156          * Hence the claim that Linux rwlocks are unfair to writers.
157          * (can be starved for an indefinite time by readers).
158          *
159          *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160          *              rw->counter = 0;
161          *              ret = 1;
162          *      }
163          */
164
165         __asm__ __volatile__(
166         "1:     llock   %[val], [%[rwlock]]     \n"
167         "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
168         "       mov     %[val], %[WR_LOCKED]    \n"
169         "       scond   %[val], [%[rwlock]]     \n"
170         "       bnz     1b                      \n"
171         "                                       \n"
172         : [val]         "=&r"   (val)
173         : [rwlock]      "r"     (&(rw->counter)),
174           [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
175           [WR_LOCKED]   "ir"    (0)
176         : "memory", "cc");
177
178         smp_mb();
179 }
180
181 /* 1 - lock taken successfully */
182 static inline int arch_write_trylock(arch_rwlock_t *rw)
183 {
184         unsigned int val, got_it = 0;
185
186         smp_mb();
187
188         __asm__ __volatile__(
189         "1:     llock   %[val], [%[rwlock]]     \n"
190         "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
191         "       mov     %[val], %[WR_LOCKED]    \n"
192         "       scond   %[val], [%[rwlock]]     \n"
193         "       bnz     1b                      \n"     /* retry if collided with someone */
194         "       mov     %[got_it], 1            \n"
195         "                                       \n"
196         "4: ; --- done ---                      \n"
197
198         : [val]         "=&r"   (val),
199           [got_it]      "+&r"   (got_it)
200         : [rwlock]      "r"     (&(rw->counter)),
201           [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
202           [WR_LOCKED]   "ir"    (0)
203         : "memory", "cc");
204
205         smp_mb();
206
207         return got_it;
208 }
209
210 static inline void arch_read_unlock(arch_rwlock_t *rw)
211 {
212         unsigned int val;
213
214         smp_mb();
215
216         /*
217          * rw->counter++;
218          */
219         __asm__ __volatile__(
220         "1:     llock   %[val], [%[rwlock]]     \n"
221         "       add     %[val], %[val], 1       \n"
222         "       scond   %[val], [%[rwlock]]     \n"
223         "       bnz     1b                      \n"
224         "                                       \n"
225         : [val]         "=&r"   (val)
226         : [rwlock]      "r"     (&(rw->counter))
227         : "memory", "cc");
228
229         smp_mb();
230 }
231
232 static inline void arch_write_unlock(arch_rwlock_t *rw)
233 {
234         smp_mb();
235
236         rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238         smp_mb();
239 }
240
241 #else   /* CONFIG_ARC_STAR_9000923308 */
242
243 /*
244  * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245  * coherency transactions in the SCU. The exclusive line state keeps rotating
246  * among contenting cores leading to a never ending cycle. So break the cycle
247  * by deferring the retry of failed exclusive access (SCOND). The actual delay
248  * needed is function of number of contending cores as well as the unrelated
249  * coherency traffic from other cores. To keep the code simple, start off with
250  * small delay of 1 which would suffice most cases and in case of contention
251  * double the delay. Eventually the delay is sufficient such that the coherency
252  * pipeline is drained, thus a subsequent exclusive access would succeed.
253  */
254
255 #define SCOND_FAIL_RETRY_VAR_DEF                                                \
256         unsigned int delay, tmp;                                                \
257
258 #define SCOND_FAIL_RETRY_ASM                                                    \
259         "   ; --- scond fail delay ---          \n"                             \
260         "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
261         "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
262         "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
263         "       asl.f   %[delay], %[delay], 1   \n"     /* delay *= 2 */        \
264         "       mov.z   %[delay], 1             \n"     /* handle overflow */   \
265         "       b       1b                      \n"     /* start over */        \
266         "                                       \n"                             \
267         "4: ; --- done ---                      \n"                             \
268
269 #define SCOND_FAIL_RETRY_VARS                                                   \
270           ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
271
272 static inline void arch_spin_lock(arch_spinlock_t *lock)
273 {
274         unsigned int val;
275         SCOND_FAIL_RETRY_VAR_DEF;
276
277         smp_mb();
278
279         __asm__ __volatile__(
280         "0:     mov     %[delay], 1             \n"
281         "1:     llock   %[val], [%[slock]]      \n"
282         "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
283         "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
284         "       bz      4f                      \n"     /* done */
285         "                                       \n"
286         SCOND_FAIL_RETRY_ASM
287
288         : [val]         "=&r"   (val)
289           SCOND_FAIL_RETRY_VARS
290         : [slock]       "r"     (&(lock->slock)),
291           [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
292         : "memory", "cc");
293
294         smp_mb();
295 }
296
297 /* 1 - lock taken successfully */
298 static inline int arch_spin_trylock(arch_spinlock_t *lock)
299 {
300         unsigned int val, got_it = 0;
301         SCOND_FAIL_RETRY_VAR_DEF;
302
303         smp_mb();
304
305         __asm__ __volatile__(
306         "0:     mov     %[delay], 1             \n"
307         "1:     llock   %[val], [%[slock]]      \n"
308         "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
309         "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
310         "       bz.d    4f                      \n"
311         "       mov.z   %[got_it], 1            \n"     /* got it */
312         "                                       \n"
313         SCOND_FAIL_RETRY_ASM
314
315         : [val]         "=&r"   (val),
316           [got_it]      "+&r"   (got_it)
317           SCOND_FAIL_RETRY_VARS
318         : [slock]       "r"     (&(lock->slock)),
319           [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
320         : "memory", "cc");
321
322         smp_mb();
323
324         return got_it;
325 }
326
327 static inline void arch_spin_unlock(arch_spinlock_t *lock)
328 {
329         smp_mb();
330
331         lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
332
333         smp_mb();
334 }
335
336 /*
337  * Read-write spinlocks, allowing multiple readers but only one writer.
338  * Unfair locking as Writers could be starved indefinitely by Reader(s)
339  */
340
341 static inline void arch_read_lock(arch_rwlock_t *rw)
342 {
343         unsigned int val;
344         SCOND_FAIL_RETRY_VAR_DEF;
345
346         smp_mb();
347
348         /*
349          * zero means writer holds the lock exclusively, deny Reader.
350          * Otherwise grant lock to first/subseq reader
351          *
352          *      if (rw->counter > 0) {
353          *              rw->counter--;
354          *              ret = 1;
355          *      }
356          */
357
358         __asm__ __volatile__(
359         "0:     mov     %[delay], 1             \n"
360         "1:     llock   %[val], [%[rwlock]]     \n"
361         "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
362         "       sub     %[val], %[val], 1       \n"     /* reader lock */
363         "       scond   %[val], [%[rwlock]]     \n"
364         "       bz      4f                      \n"     /* done */
365         "                                       \n"
366         SCOND_FAIL_RETRY_ASM
367
368         : [val]         "=&r"   (val)
369           SCOND_FAIL_RETRY_VARS
370         : [rwlock]      "r"     (&(rw->counter)),
371           [WR_LOCKED]   "ir"    (0)
372         : "memory", "cc");
373
374         smp_mb();
375 }
376
377 /* 1 - lock taken successfully */
378 static inline int arch_read_trylock(arch_rwlock_t *rw)
379 {
380         unsigned int val, got_it = 0;
381         SCOND_FAIL_RETRY_VAR_DEF;
382
383         smp_mb();
384
385         __asm__ __volatile__(
386         "0:     mov     %[delay], 1             \n"
387         "1:     llock   %[val], [%[rwlock]]     \n"
388         "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
389         "       sub     %[val], %[val], 1       \n"     /* counter-- */
390         "       scond   %[val], [%[rwlock]]     \n"
391         "       bz.d    4f                      \n"
392         "       mov.z   %[got_it], 1            \n"     /* got it */
393         "                                       \n"
394         SCOND_FAIL_RETRY_ASM
395
396         : [val]         "=&r"   (val),
397           [got_it]      "+&r"   (got_it)
398           SCOND_FAIL_RETRY_VARS
399         : [rwlock]      "r"     (&(rw->counter)),
400           [WR_LOCKED]   "ir"    (0)
401         : "memory", "cc");
402
403         smp_mb();
404
405         return got_it;
406 }
407
408 static inline void arch_write_lock(arch_rwlock_t *rw)
409 {
410         unsigned int val;
411         SCOND_FAIL_RETRY_VAR_DEF;
412
413         smp_mb();
414
415         /*
416          * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
417          * deny writer. Otherwise if unlocked grant to writer
418          * Hence the claim that Linux rwlocks are unfair to writers.
419          * (can be starved for an indefinite time by readers).
420          *
421          *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
422          *              rw->counter = 0;
423          *              ret = 1;
424          *      }
425          */
426
427         __asm__ __volatile__(
428         "0:     mov     %[delay], 1             \n"
429         "1:     llock   %[val], [%[rwlock]]     \n"
430         "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
431         "       mov     %[val], %[WR_LOCKED]    \n"
432         "       scond   %[val], [%[rwlock]]     \n"
433         "       bz      4f                      \n"
434         "                                       \n"
435         SCOND_FAIL_RETRY_ASM
436
437         : [val]         "=&r"   (val)
438           SCOND_FAIL_RETRY_VARS
439         : [rwlock]      "r"     (&(rw->counter)),
440           [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
441           [WR_LOCKED]   "ir"    (0)
442         : "memory", "cc");
443
444         smp_mb();
445 }
446
447 /* 1 - lock taken successfully */
448 static inline int arch_write_trylock(arch_rwlock_t *rw)
449 {
450         unsigned int val, got_it = 0;
451         SCOND_FAIL_RETRY_VAR_DEF;
452
453         smp_mb();
454
455         __asm__ __volatile__(
456         "0:     mov     %[delay], 1             \n"
457         "1:     llock   %[val], [%[rwlock]]     \n"
458         "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
459         "       mov     %[val], %[WR_LOCKED]    \n"
460         "       scond   %[val], [%[rwlock]]     \n"
461         "       bz.d    4f                      \n"
462         "       mov.z   %[got_it], 1            \n"     /* got it */
463         "                                       \n"
464         SCOND_FAIL_RETRY_ASM
465
466         : [val]         "=&r"   (val),
467           [got_it]      "+&r"   (got_it)
468           SCOND_FAIL_RETRY_VARS
469         : [rwlock]      "r"     (&(rw->counter)),
470           [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
471           [WR_LOCKED]   "ir"    (0)
472         : "memory", "cc");
473
474         smp_mb();
475
476         return got_it;
477 }
478
479 static inline void arch_read_unlock(arch_rwlock_t *rw)
480 {
481         unsigned int val;
482
483         smp_mb();
484
485         /*
486          * rw->counter++;
487          */
488         __asm__ __volatile__(
489         "1:     llock   %[val], [%[rwlock]]     \n"
490         "       add     %[val], %[val], 1       \n"
491         "       scond   %[val], [%[rwlock]]     \n"
492         "       bnz     1b                      \n"
493         "                                       \n"
494         : [val]         "=&r"   (val)
495         : [rwlock]      "r"     (&(rw->counter))
496         : "memory", "cc");
497
498         smp_mb();
499 }
500
501 static inline void arch_write_unlock(arch_rwlock_t *rw)
502 {
503         unsigned int val;
504
505         smp_mb();
506
507         /*
508          * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
509          */
510         __asm__ __volatile__(
511         "1:     llock   %[val], [%[rwlock]]     \n"
512         "       scond   %[UNLOCKED], [%[rwlock]]\n"
513         "       bnz     1b                      \n"
514         "                                       \n"
515         : [val]         "=&r"   (val)
516         : [rwlock]      "r"     (&(rw->counter)),
517           [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
518         : "memory", "cc");
519
520         smp_mb();
521 }
522
523 #undef SCOND_FAIL_RETRY_VAR_DEF
524 #undef SCOND_FAIL_RETRY_ASM
525 #undef SCOND_FAIL_RETRY_VARS
526
527 #endif  /* CONFIG_ARC_STAR_9000923308 */
528
529 #else   /* !CONFIG_ARC_HAS_LLSC */
530
531 static inline void arch_spin_lock(arch_spinlock_t *lock)
532 {
533         unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
534
535         /*
536          * This smp_mb() is technically superfluous, we only need the one
537          * after the lock for providing the ACQUIRE semantics.
538          * However doing the "right" thing was regressing hackbench
539          * so keeping this, pending further investigation
540          */
541         smp_mb();
542
543         __asm__ __volatile__(
544         "1:     ex  %0, [%1]            \n"
545         "       breq  %0, %2, 1b        \n"
546         : "+&r" (val)
547         : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
548         : "memory");
549
550         /*
551          * ACQUIRE barrier to ensure load/store after taking the lock
552          * don't "bleed-up" out of the critical section (leak-in is allowed)
553          * http://www.spinics.net/lists/kernel/msg2010409.html
554          *
555          * ARCv2 only has load-load, store-store and all-all barrier
556          * thus need the full all-all barrier
557          */
558         smp_mb();
559 }
560
561 /* 1 - lock taken successfully */
562 static inline int arch_spin_trylock(arch_spinlock_t *lock)
563 {
564         unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
565
566         smp_mb();
567
568         __asm__ __volatile__(
569         "1:     ex  %0, [%1]            \n"
570         : "+r" (val)
571         : "r"(&(lock->slock))
572         : "memory");
573
574         smp_mb();
575
576         return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
577 }
578
579 static inline void arch_spin_unlock(arch_spinlock_t *lock)
580 {
581         unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
582
583         /*
584          * RELEASE barrier: given the instructions avail on ARCv2, full barrier
585          * is the only option
586          */
587         smp_mb();
588
589         __asm__ __volatile__(
590         "       ex  %0, [%1]            \n"
591         : "+r" (val)
592         : "r"(&(lock->slock))
593         : "memory");
594
595         /*
596          * superfluous, but keeping for now - see pairing version in
597          * arch_spin_lock above
598          */
599         smp_mb();
600 }
601
602 /*
603  * Read-write spinlocks, allowing multiple readers but only one writer.
604  * Unfair locking as Writers could be starved indefinitely by Reader(s)
605  *
606  * The spinlock itself is contained in @counter and access to it is
607  * serialized with @lock_mutex.
608  */
609
610 /* 1 - lock taken successfully */
611 static inline int arch_read_trylock(arch_rwlock_t *rw)
612 {
613         int ret = 0;
614         unsigned long flags;
615
616         local_irq_save(flags);
617         arch_spin_lock(&(rw->lock_mutex));
618
619         /*
620          * zero means writer holds the lock exclusively, deny Reader.
621          * Otherwise grant lock to first/subseq reader
622          */
623         if (rw->counter > 0) {
624                 rw->counter--;
625                 ret = 1;
626         }
627
628         arch_spin_unlock(&(rw->lock_mutex));
629         local_irq_restore(flags);
630
631         smp_mb();
632         return ret;
633 }
634
635 /* 1 - lock taken successfully */
636 static inline int arch_write_trylock(arch_rwlock_t *rw)
637 {
638         int ret = 0;
639         unsigned long flags;
640
641         local_irq_save(flags);
642         arch_spin_lock(&(rw->lock_mutex));
643
644         /*
645          * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
646          * deny writer. Otherwise if unlocked grant to writer
647          * Hence the claim that Linux rwlocks are unfair to writers.
648          * (can be starved for an indefinite time by readers).
649          */
650         if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
651                 rw->counter = 0;
652                 ret = 1;
653         }
654         arch_spin_unlock(&(rw->lock_mutex));
655         local_irq_restore(flags);
656
657         return ret;
658 }
659
660 static inline void arch_read_lock(arch_rwlock_t *rw)
661 {
662         while (!arch_read_trylock(rw))
663                 cpu_relax();
664 }
665
666 static inline void arch_write_lock(arch_rwlock_t *rw)
667 {
668         while (!arch_write_trylock(rw))
669                 cpu_relax();
670 }
671
672 static inline void arch_read_unlock(arch_rwlock_t *rw)
673 {
674         unsigned long flags;
675
676         local_irq_save(flags);
677         arch_spin_lock(&(rw->lock_mutex));
678         rw->counter++;
679         arch_spin_unlock(&(rw->lock_mutex));
680         local_irq_restore(flags);
681 }
682
683 static inline void arch_write_unlock(arch_rwlock_t *rw)
684 {
685         unsigned long flags;
686
687         local_irq_save(flags);
688         arch_spin_lock(&(rw->lock_mutex));
689         rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
690         arch_spin_unlock(&(rw->lock_mutex));
691         local_irq_restore(flags);
692 }
693
694 #endif
695
696 #define arch_read_can_lock(x)   ((x)->counter > 0)
697 #define arch_write_can_lock(x)  ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
698
699 #define arch_read_lock_flags(lock, flags)       arch_read_lock(lock)
700 #define arch_write_lock_flags(lock, flags)      arch_write_lock(lock)
701
702 #define arch_spin_relax(lock)   cpu_relax()
703 #define arch_read_relax(lock)   cpu_relax()
704 #define arch_write_relax(lock)  cpu_relax()
705
706 #endif /* __ASM_SPINLOCK_H */